Skip to content

vllm.reasoning.deepseek_v3_reasoning_parser

logger module-attribute

logger = init_logger(__name__)

DeepSeekV3ReasoningParser

Bases: ReasoningParser

V3 parser that delegates to either DeepSeekR1ReasoningParser or IdentityReasoningParser based on thinking and separate_reasoning.

Source code in vllm/reasoning/deepseek_v3_reasoning_parser.py
@ReasoningParserManager.register_module("deepseek_v3")
class DeepSeekV3ReasoningParser(ReasoningParser):
    """
    V3 parser that delegates to either DeepSeekR1ReasoningParser or
    IdentityReasoningParser based on `thinking` and `separate_reasoning`.
    """

    def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
        super().__init__(tokenizer, *args, **kwargs)

        chat_kwargs = kwargs.pop("chat_template_kwargs", {}) or {}
        thinking = bool(chat_kwargs.pop("thinking", False))

        if thinking:
            self._parser = DeepSeekR1ReasoningParser(tokenizer, *args, **kwargs)
        else:
            self._parser = IdentityReasoningParser(tokenizer, *args, **kwargs)

    def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
        return self._parser.is_reasoning_end(input_ids)

    def extract_content_ids(self, input_ids: list[int]) -> list[int]:
        return self._parser.extract_content_ids(input_ids)

    def extract_reasoning_content(
        self, model_output: str, request: ChatCompletionRequest
    ) -> tuple[str | None, str | None]:
        return self._parser.extract_reasoning_content(model_output, request)

    def extract_reasoning_content_streaming(
        self,
        previous_text: str,
        current_text: str,
        delta_text: str,
        previous_token_ids: Sequence[int],
        current_token_ids: Sequence[int],
        delta_token_ids: Sequence[int],
    ) -> DeltaMessage | None:
        return self._parser.extract_reasoning_content_streaming(
            previous_text,
            current_text,
            delta_text,
            previous_token_ids,
            current_token_ids,
            delta_token_ids,
        )

_parser instance-attribute

_parser = DeepSeekR1ReasoningParser(
    tokenizer, *args, **kwargs
)

__init__

__init__(
    tokenizer: PreTrainedTokenizerBase, *args, **kwargs
)
Source code in vllm/reasoning/deepseek_v3_reasoning_parser.py
def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
    super().__init__(tokenizer, *args, **kwargs)

    chat_kwargs = kwargs.pop("chat_template_kwargs", {}) or {}
    thinking = bool(chat_kwargs.pop("thinking", False))

    if thinking:
        self._parser = DeepSeekR1ReasoningParser(tokenizer, *args, **kwargs)
    else:
        self._parser = IdentityReasoningParser(tokenizer, *args, **kwargs)

extract_content_ids

extract_content_ids(input_ids: list[int]) -> list[int]
Source code in vllm/reasoning/deepseek_v3_reasoning_parser.py
def extract_content_ids(self, input_ids: list[int]) -> list[int]:
    return self._parser.extract_content_ids(input_ids)

extract_reasoning_content

extract_reasoning_content(
    model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]
Source code in vllm/reasoning/deepseek_v3_reasoning_parser.py
def extract_reasoning_content(
    self, model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]:
    return self._parser.extract_reasoning_content(model_output, request)

extract_reasoning_content_streaming

extract_reasoning_content_streaming(
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None
Source code in vllm/reasoning/deepseek_v3_reasoning_parser.py
def extract_reasoning_content_streaming(
    self,
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
    return self._parser.extract_reasoning_content_streaming(
        previous_text,
        current_text,
        delta_text,
        previous_token_ids,
        current_token_ids,
        delta_token_ids,
    )

is_reasoning_end

is_reasoning_end(input_ids: Sequence[int]) -> bool
Source code in vllm/reasoning/deepseek_v3_reasoning_parser.py
def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
    return self._parser.is_reasoning_end(input_ids)