Skip to content

vllm.reasoning.identity_reasoning_parser

logger module-attribute

logger = init_logger(__name__)

IdentityReasoningParser

Bases: ReasoningParser

Identity reasoning parser.

This parser does not attempt to parse or strip out reasoning tokens. It treats the entire model output as content and ignores reasoning.

Source code in vllm/reasoning/identity_reasoning_parser.py
class IdentityReasoningParser(ReasoningParser):
    """
    Identity reasoning parser.

    This parser does not attempt to parse or strip out reasoning tokens.
    It treats the entire model output as content and ignores reasoning.
    """

    def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
        super().__init__(tokenizer, *args, **kwargs)
        if not self.model_tokenizer:
            raise ValueError(
                "The model tokenizer must be passed to the ReasoningParser "
                "constructor during construction."
            )

    def is_reasoning_end(self, input_ids: list[int]) -> bool:
        # Always return True, since we never treat reasoning specially
        return True

    def extract_content_ids(self, input_ids: list[int]) -> list[int]:
        # Identity: return all tokens as content
        return input_ids

    def extract_reasoning_content_streaming(
        self,
        previous_text: str,
        current_text: str,
        delta_text: str,
        previous_token_ids: Sequence[int],
        current_token_ids: Sequence[int],
        delta_token_ids: Sequence[int],
    ) -> DeltaMessage | None:
        # Just wrap delta_text as content, ignore reasoning
        if delta_text:
            return DeltaMessage(content=delta_text)
        return None

    def extract_reasoning_content(
        self, model_output: str, request: ChatCompletionRequest
    ) -> tuple[str | None, str | None]:
        # No reasoning separation: return None for reasoning_content,
        # and full model_output as content
        return None, model_output

__init__

__init__(
    tokenizer: PreTrainedTokenizerBase, *args, **kwargs
)
Source code in vllm/reasoning/identity_reasoning_parser.py
def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
    super().__init__(tokenizer, *args, **kwargs)
    if not self.model_tokenizer:
        raise ValueError(
            "The model tokenizer must be passed to the ReasoningParser "
            "constructor during construction."
        )

extract_content_ids

extract_content_ids(input_ids: list[int]) -> list[int]
Source code in vllm/reasoning/identity_reasoning_parser.py
def extract_content_ids(self, input_ids: list[int]) -> list[int]:
    # Identity: return all tokens as content
    return input_ids

extract_reasoning_content

extract_reasoning_content(
    model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]
Source code in vllm/reasoning/identity_reasoning_parser.py
def extract_reasoning_content(
    self, model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]:
    # No reasoning separation: return None for reasoning_content,
    # and full model_output as content
    return None, model_output

extract_reasoning_content_streaming

extract_reasoning_content_streaming(
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None
Source code in vllm/reasoning/identity_reasoning_parser.py
def extract_reasoning_content_streaming(
    self,
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
    # Just wrap delta_text as content, ignore reasoning
    if delta_text:
        return DeltaMessage(content=delta_text)
    return None

is_reasoning_end

is_reasoning_end(input_ids: list[int]) -> bool
Source code in vllm/reasoning/identity_reasoning_parser.py
def is_reasoning_end(self, input_ids: list[int]) -> bool:
    # Always return True, since we never treat reasoning specially
    return True