Bases: BaseThinkingReasoningParser
Reasoning parser for the Qwen3/Qwen3.5 model family.
The Qwen3 model family uses ... tokens to denote reasoning text. Starting with Qwen3.5, the chat template places in the prompt so only appears in the generated output. The model provides a strict switch to disable reasoning output via the 'enable_thinking=False' parameter.
When thinking is disabled, the template places \n\n\n\n in the prompt. The serving layer detects this via prompt_is_reasoning_end and routes deltas as content without calling the streaming parser.
NOTE: Models up to the 2507 release (e.g., Qwen/Qwen3-235B-A22B-Instruct-2507) use an older chat template where the model generates itself. This parser handles both styles: if appears in the generated output it is stripped before extraction (non-streaming) or skipped (streaming).
Source code in vllm/reasoning/qwen3_reasoning_parser.py
| class Qwen3ReasoningParser(BaseThinkingReasoningParser):
"""
Reasoning parser for the Qwen3/Qwen3.5 model family.
The Qwen3 model family uses <think>...</think> tokens to denote reasoning
text. Starting with Qwen3.5, the chat template places <think> in the
prompt so only </think> appears in the generated output. The model
provides a strict switch to disable reasoning output via the
'enable_thinking=False' parameter.
When thinking is disabled, the template places <think>\\n\\n</think>\\n\\n
in the prompt. The serving layer detects this via prompt_is_reasoning_end
and routes deltas as content without calling the streaming parser.
NOTE: Models up to the 2507 release (e.g., Qwen/Qwen3-235B-A22B-Instruct-2507)
use an older chat template where the model generates <think> itself.
This parser handles both styles: if <think> appears in the generated output
it is stripped before extraction (non-streaming) or skipped (streaming).
"""
@property
def start_token(self) -> str:
"""The token that starts reasoning content."""
return "<think>"
@property
def end_token(self) -> str:
"""The token that ends reasoning content."""
return "</think>"
def extract_reasoning(
self, model_output: str, request: ChatCompletionRequest | ResponsesRequest
) -> tuple[str | None, str | None]:
"""
Extract reasoning content from the model output.
The <think> token is placed in the prompt by the chat template,
so typically only </think> appears in the generated output.
If <think> is present (e.g. from a different template), it is
stripped before extraction.
When thinking is disabled (no </think> in output), returns
(None, model_output) to indicate all output is content.
Returns:
tuple[Optional[str], Optional[str]]: reasoning content and content
"""
# Strip <think> if present in the generated output.
model_output_parts = model_output.partition(self.start_token)
model_output = (
model_output_parts[2] if model_output_parts[1] else model_output_parts[0]
)
if self.end_token not in model_output:
# No end token means thinking is disabled or the model
# did not produce reasoning. Treat everything as content.
return None, model_output
# Extract reasoning content from the model output.
reasoning, _, content = model_output.partition(self.end_token)
final_content = content or None
return reasoning, final_content
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
"""
Extract reasoning content from a streaming delta.
Since <think> is placed in the prompt by the chat template, all
generated tokens before </think> are reasoning and tokens after
are content.
NOTE: When thinking is disabled, no think tokens appear in the
generated output. The serving layer detects this via
prompt_is_reasoning_end and routes deltas as content without
calling this method.
"""
# Strip <think> from delta if present (old template / edge case
# where the model generates <think> itself).
if self.start_token_id in delta_token_ids:
start_idx = delta_text.find(self.start_token)
if start_idx >= 0:
delta_text = delta_text[start_idx + len(self.start_token) :]
if self.end_token_id in delta_token_ids:
# End token in this delta: split reasoning from content.
end_index = delta_text.find(self.end_token)
if end_index >= 0:
reasoning = delta_text[:end_index]
content = delta_text[end_index + len(self.end_token) :]
if not reasoning and not content:
return None
return DeltaMessage(
reasoning=reasoning if reasoning else None,
content=content if content else None,
)
# end_token_id in IDs but not in text (already stripped)
return None
# No end token in this delta.
if not delta_text:
# Nothing left after stripping start token.
return None
elif self.end_token_id in previous_token_ids:
# End token already passed: everything is content now.
return DeltaMessage(content=delta_text)
else:
# No end token yet: still in reasoning phase.
return DeltaMessage(reasoning=delta_text)
|
end_token property
The token that ends reasoning content.
start_token property
The token that starts reasoning content.
Extract reasoning content from the model output.
The token is placed in the prompt by the chat template, so typically only appears in the generated output. If is present (e.g. from a different template), it is stripped before extraction.
When thinking is disabled (no in output), returns (None, model_output) to indicate all output is content.
Returns:
| Type | Description |
tuple[str | None, str | None] | tuple[Optional[str], Optional[str]]: reasoning content and content |
Source code in vllm/reasoning/qwen3_reasoning_parser.py
| def extract_reasoning(
self, model_output: str, request: ChatCompletionRequest | ResponsesRequest
) -> tuple[str | None, str | None]:
"""
Extract reasoning content from the model output.
The <think> token is placed in the prompt by the chat template,
so typically only </think> appears in the generated output.
If <think> is present (e.g. from a different template), it is
stripped before extraction.
When thinking is disabled (no </think> in output), returns
(None, model_output) to indicate all output is content.
Returns:
tuple[Optional[str], Optional[str]]: reasoning content and content
"""
# Strip <think> if present in the generated output.
model_output_parts = model_output.partition(self.start_token)
model_output = (
model_output_parts[2] if model_output_parts[1] else model_output_parts[0]
)
if self.end_token not in model_output:
# No end token means thinking is disabled or the model
# did not produce reasoning. Treat everything as content.
return None, model_output
# Extract reasoning content from the model output.
reasoning, _, content = model_output.partition(self.end_token)
final_content = content or None
return reasoning, final_content
|
extract_reasoning_streaming(
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None
Extract reasoning content from a streaming delta.
Since is placed in the prompt by the chat template, all generated tokens before are reasoning and tokens after are content.
NOTE: When thinking is disabled, no think tokens appear in the generated output. The serving layer detects this via prompt_is_reasoning_end and routes deltas as content without calling this method.
Source code in vllm/reasoning/qwen3_reasoning_parser.py
| def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
"""
Extract reasoning content from a streaming delta.
Since <think> is placed in the prompt by the chat template, all
generated tokens before </think> are reasoning and tokens after
are content.
NOTE: When thinking is disabled, no think tokens appear in the
generated output. The serving layer detects this via
prompt_is_reasoning_end and routes deltas as content without
calling this method.
"""
# Strip <think> from delta if present (old template / edge case
# where the model generates <think> itself).
if self.start_token_id in delta_token_ids:
start_idx = delta_text.find(self.start_token)
if start_idx >= 0:
delta_text = delta_text[start_idx + len(self.start_token) :]
if self.end_token_id in delta_token_ids:
# End token in this delta: split reasoning from content.
end_index = delta_text.find(self.end_token)
if end_index >= 0:
reasoning = delta_text[:end_index]
content = delta_text[end_index + len(self.end_token) :]
if not reasoning and not content:
return None
return DeltaMessage(
reasoning=reasoning if reasoning else None,
content=content if content else None,
)
# end_token_id in IDs but not in text (already stripped)
return None
# No end token in this delta.
if not delta_text:
# Nothing left after stripping start token.
return None
elif self.end_token_id in previous_token_ids:
# End token already passed: everything is content now.
return DeltaMessage(content=delta_text)
else:
# No end token yet: still in reasoning phase.
return DeltaMessage(reasoning=delta_text)
|