update latest openai model configs for azure & openai (#2202)

This commit is contained in:
Shuchang Zheng
2025-04-21 14:44:15 +08:00
committed by GitHub
parent f238c991f7
commit 7a15ea7405
2 changed files with 161 additions and 9 deletions

View File

@@ -164,6 +164,41 @@ class Settings(BaseSettings):
AZURE_O3_MINI_API_BASE: str | None = None
AZURE_O3_MINI_API_VERSION: str | None = None
# AZURE gpt-4.1
ENABLE_AZURE_GPT4_1: bool = False
AZURE_GPT4_1_DEPLOYMENT: str = "gpt-4.1"
AZURE_GPT4_1_API_KEY: str | None = None
AZURE_GPT4_1_API_BASE: str | None = None
AZURE_GPT4_1_API_VERSION: str = "2025-01-01-preview"
# AZURE gpt-4.1 mini
ENABLE_AZURE_GPT4_1_MINI: bool = False
AZURE_GPT4_1_MINI_DEPLOYMENT: str = "gpt-4.1-mini"
AZURE_GPT4_1_MINI_API_KEY: str | None = None
AZURE_GPT4_1_MINI_API_BASE: str | None = None
AZURE_GPT4_1_MINI_API_VERSION: str = "2025-01-01-preview"
# AZURE gpt-4.1 nano
ENABLE_AZURE_GPT4_1_NANO: bool = False
AZURE_GPT4_1_NANO_DEPLOYMENT: str = "gpt-4.1-nano"
AZURE_GPT4_1_NANO_API_KEY: str | None = None
AZURE_GPT4_1_NANO_API_BASE: str | None = None
AZURE_GPT4_1_NANO_API_VERSION: str = "2025-01-01-preview"
# AZURE o4-mini
ENABLE_AZURE_O4_MINI: bool = False
AZURE_O4_MINI_DEPLOYMENT: str = "o4-mini"
AZURE_O4_MINI_API_KEY: str | None = None
AZURE_O4_MINI_API_BASE: str | None = None
AZURE_O4_MINI_API_VERSION: str = "2025-01-01-preview"
# AZURE o3
ENABLE_AZURE_O3: bool = False
AZURE_O3_DEPLOYMENT: str = "o3"
AZURE_O3_API_KEY: str | None = None
AZURE_O3_API_BASE: str | None = None
AZURE_O3_API_VERSION: str = "2025-01-01-preview"
# GEMINI
GEMINI_API_KEY: str | None = None

View File

@@ -69,7 +69,7 @@ if settings.ENABLE_OPENAI:
["OPENAI_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=16384,
max_completion_tokens=32768,
),
)
LLMConfigRegistry.register_config(
@@ -79,7 +79,7 @@ if settings.ENABLE_OPENAI:
["OPENAI_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=16384,
max_completion_tokens=32768,
),
)
LLMConfigRegistry.register_config(
@@ -89,7 +89,7 @@ if settings.ENABLE_OPENAI:
["OPENAI_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=16384,
max_completion_tokens=32768,
),
)
LLMConfigRegistry.register_config(
@@ -153,9 +153,9 @@ if settings.ENABLE_OPENAI:
LLMConfig(
"o4-mini",
["OPENAI_API_KEY"],
supports_vision=False,
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=16384,
max_completion_tokens=100000,
temperature=None, # Temperature isn't supported in the O-model series
reasoning_effort="high",
litellm_params=LiteLLMParams(
@@ -168,9 +168,9 @@ if settings.ENABLE_OPENAI:
LLMConfig(
"o3",
["OPENAI_API_KEY"],
supports_vision=False,
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=16384,
max_completion_tokens=100000,
temperature=None, # Temperature isn't supported in the O-model series
reasoning_effort="high",
litellm_params=LiteLLMParams(
@@ -377,6 +377,123 @@ if settings.ENABLE_AZURE_O3_MINI:
),
)
if settings.ENABLE_AZURE_GPT4_1:
LLMConfigRegistry.register_config(
"AZURE_OPENAI_GPT4_1",
LLMConfig(
f"azure/{settings.AZURE_GPT4_1_DEPLOYMENT}",
[
"AZURE_GPT4_1_DEPLOYMENT",
"AZURE_GPT4_1_API_KEY",
"AZURE_GPT4_1_API_BASE",
"AZURE_GPT4_1_API_VERSION",
],
litellm_params=LiteLLMParams(
api_base=settings.AZURE_GPT4_1_API_BASE,
api_key=settings.AZURE_GPT4_1_API_KEY,
api_version=settings.AZURE_GPT4_1_API_VERSION,
model_info={"model_name": "azure/gpt-4.1"},
),
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=32768,
),
)
if settings.ENABLE_AZURE_GPT4_1_MINI:
LLMConfigRegistry.register_config(
"AZURE_OPENAI_GPT4_1_MINI",
LLMConfig(
f"azure/{settings.AZURE_GPT4_1_MINI_DEPLOYMENT}",
[
"AZURE_GPT4_1_MINI_DEPLOYMENT",
"AZURE_GPT4_1_MINI_API_KEY",
"AZURE_GPT4_1_MINI_API_BASE",
"AZURE_GPT4_1_MINI_API_VERSION",
],
litellm_params=LiteLLMParams(
api_base=settings.AZURE_GPT4_1_MINI_API_BASE,
api_key=settings.AZURE_GPT4_1_MINI_API_KEY,
api_version=settings.AZURE_GPT4_1_MINI_API_VERSION,
model_info={"model_name": "azure/gpt-4.1-mini"},
),
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=32768,
),
)
if settings.ENABLE_AZURE_GPT4_1_NANO:
LLMConfigRegistry.register_config(
"AZURE_OPENAI_GPT4_1_NANO",
LLMConfig(
f"azure/{settings.AZURE_GPT4_1_NANO_DEPLOYMENT}",
[
"AZURE_GPT4_1_NANO_DEPLOYMENT",
"AZURE_GPT4_1_NANO_API_KEY",
"AZURE_GPT4_1_NANO_API_BASE",
"AZURE_GPT4_1_NANO_API_VERSION",
],
litellm_params=LiteLLMParams(
api_base=settings.AZURE_GPT4_1_NANO_API_BASE,
api_key=settings.AZURE_GPT4_1_NANO_API_KEY,
api_version=settings.AZURE_GPT4_1_NANO_API_VERSION,
model_info={"model_name": "azure/gpt-4.1-nano"},
),
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=32768,
),
)
if settings.ENABLE_AZURE_O4_MINI:
LLMConfigRegistry.register_config(
"AZURE_OPENAI_O4_MINI",
LLMConfig(
f"azure/{settings.AZURE_O4_MINI_DEPLOYMENT}",
[
"AZURE_O4_MINI_DEPLOYMENT",
"AZURE_O4_MINI_API_KEY",
"AZURE_O4_MINI_API_BASE",
"AZURE_O4_MINI_API_VERSION",
],
litellm_params=LiteLLMParams(
api_base=settings.AZURE_O4_MINI_API_BASE,
api_key=settings.AZURE_O4_MINI_API_KEY,
api_version=settings.AZURE_O4_MINI_API_VERSION,
model_info={"model_name": "azure/o4-mini"},
),
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=100000,
),
)
if settings.ENABLE_AZURE_O3:
LLMConfigRegistry.register_config(
"AZURE_OPENAI_O3",
LLMConfig(
f"azure/{settings.AZURE_O3_DEPLOYMENT}",
[
"AZURE_O3_DEPLOYMENT",
"AZURE_O3_API_KEY",
"AZURE_O3_API_BASE",
"AZURE_O3_API_VERSION",
],
litellm_params=LiteLLMParams(
api_base=settings.AZURE_O3_API_BASE,
api_key=settings.AZURE_O3_API_KEY,
api_version=settings.AZURE_O3_API_VERSION,
model_info={"model_name": "azure/o3"},
),
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=100000,
),
)
if settings.ENABLE_GEMINI:
LLMConfigRegistry.register_config(
"GEMINI_FLASH_2_0",
@@ -425,7 +542,7 @@ if settings.ENABLE_GEMINI:
["GEMINI_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=1048576,
max_completion_tokens=65536,
),
)
LLMConfigRegistry.register_config(
@@ -435,7 +552,7 @@ if settings.ENABLE_GEMINI:
["GEMINI_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=1048576,
max_completion_tokens=65536,
),
)