From eefddb41051736e5496c139490d43b977e58d841 Mon Sep 17 00:00:00 2001 From: Suchintan Date: Wed, 16 Apr 2025 21:34:00 -0400 Subject: [PATCH] GPT-o4-mini and GPT-o3 support is here (#2170) --- setup.sh | 2 ++ skyvern/cli/commands.py | 2 ++ skyvern/forge/sdk/api/llm/config_registry.py | 32 +++++++++++++++++++- 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/setup.sh b/setup.sh index 48c50e63..8321f730 100755 --- a/setup.sh +++ b/setup.sh @@ -57,6 +57,8 @@ setup_llm_providers() { "OPENAI_GPT4_1_MINI" "OPENAI_GPT4_1_NANO" "OPENAI_GPT4O" + "OPENAI_O4_MINI" + "OPENAI_O3" ) fi else diff --git a/skyvern/cli/commands.py b/skyvern/cli/commands.py index 46348246..1e8bca78 100644 --- a/skyvern/cli/commands.py +++ b/skyvern/cli/commands.py @@ -227,6 +227,8 @@ def setup_llm_providers() -> None: "OPENAI_GPT4_1_MINI", "OPENAI_GPT4_1_NANO", "OPENAI_GPT4O", + "OPENAI_O4_MINI", + "OPENAI_O3", ] ) else: diff --git a/skyvern/forge/sdk/api/llm/config_registry.py b/skyvern/forge/sdk/api/llm/config_registry.py index 500465d9..6536f6cf 100644 --- a/skyvern/forge/sdk/api/llm/config_registry.py +++ b/skyvern/forge/sdk/api/llm/config_registry.py @@ -148,6 +148,36 @@ if settings.ENABLE_OPENAI: max_completion_tokens=16384, ), ) + LLMConfigRegistry.register_config( + "OPENAI_O4_MINI", + LLMConfig( + "o4-mini", + ["OPENAI_API_KEY"], + supports_vision=False, + add_assistant_prefix=False, + max_completion_tokens=16384, + temperature=None, # Temperature isn't supported in the O-model series + reasoning_effort="high", + litellm_params=LiteLLMParams( + drop_params=True, # type: ignore + ), + ), + ) + LLMConfigRegistry.register_config( + "OPENAI_O3", + LLMConfig( + "o3", + ["OPENAI_API_KEY"], + supports_vision=False, + add_assistant_prefix=False, + max_completion_tokens=16384, + temperature=None, # Temperature isn't supported in the O-model series + reasoning_effort="high", + litellm_params=LiteLLMParams( + drop_params=True, # type: ignore + ), + ), + ) if settings.ENABLE_ANTHROPIC: @@ -343,7 +373,7 @@ if settings.ENABLE_AZURE_O3_MINI: add_assistant_prefix=False, max_completion_tokens=16384, temperature=None, # Temperature isn't supported in the O-model series - reasoning_effort="low", + reasoning_effort="high", ), )