add support back for old gpt versions by supporting max_tokens (#1860)

This commit is contained in:
Shuchang Zheng
2025-03-02 00:16:00 -05:00
committed by GitHub
parent 8da1a9babf
commit 179c12db8c
2 changed files with 9 additions and 3 deletions

View File

@@ -36,7 +36,8 @@ class LLMConfigBase:
@dataclass(frozen=True)
class LLMConfig(LLMConfigBase):
litellm_params: Optional[LiteLLMParams] = field(default=None)
max_completion_tokens: int = SettingsManager.get_settings().LLM_CONFIG_MAX_TOKENS
max_tokens: int | None = SettingsManager.get_settings().LLM_CONFIG_MAX_TOKENS
max_completion_tokens: int | None = None
temperature: float | None = SettingsManager.get_settings().LLM_CONFIG_TEMPERATURE
reasoning_effort: str | None = None
@@ -74,7 +75,8 @@ class LLMRouterConfig(LLMConfigBase):
allowed_fails: int | None = None
allowed_fails_policy: AllowedFailsPolicy | None = None
cooldown_time: float | None = None
max_completion_tokens: int = SettingsManager.get_settings().LLM_CONFIG_MAX_TOKENS
max_tokens: int | None = SettingsManager.get_settings().LLM_CONFIG_MAX_TOKENS
max_completion_tokens: int | None = None
reasoning_effort: str | None = None
temperature: float | None = SettingsManager.get_settings().LLM_CONFIG_TEMPERATURE