Add GEMINI_3.0_FLASH support and update docker-compose.yml (#4687)

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Suchintan
2026-02-11 22:30:40 -05:00
committed by GitHub
parent dbca5f7094
commit fd241dba54
3 changed files with 20 additions and 7 deletions

View File

@@ -561,7 +561,7 @@ Recommended `LLM_KEY`: `BEDROCK_ANTHROPIC_CLAUDE4.5_OPUS_INFERENCE_PROFILE`, `BE
| `ENABLE_GEMINI` | Register Gemini models| Boolean | `true`, `false` |
| `GEMINI_API_KEY` | Gemini API Key| String | `your_google_gemini_api_key`|
Recommended `LLM_KEY`: `GEMINI_2.5_PRO`, `GEMINI_2.5_FLASH`, `GEMINI_2.5_PRO_PREVIEW`, `GEMINI_2.5_FLASH_PREVIEW`
Recommended `LLM_KEY`: `GEMINI_3.0_FLASH`, `GEMINI_2.5_PRO`, `GEMINI_2.5_FLASH`, `GEMINI_2.5_PRO_PREVIEW`, `GEMINI_2.5_FLASH_PREVIEW`
##### Ollama
| Variable | Description| Type | Sample Value|

View File

@@ -52,12 +52,12 @@ services:
# - ENABLE_OPENAI=true
# - LLM_KEY=OPENAI_GPT4O
# - OPENAI_API_KEY=<your_openai_key>
# Gemini Support (via Vertex AI):
# - ENABLE_VERTEX_AI=true
# - LLM_KEY=VERTEX_GEMINI_3.0_FLASH
# - GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account.json
# - GCP_PROJECT_ID=your-gcp-project-id
# - GCP_REGION=us-central1
# Gemini Support:
# Gemini is a new LLM provider that is currently in beta. You can use it by uncommenting the following lines and filling in your Gemini API key.
# - LLM_KEY=GEMINI
# - ENABLE_GEMINI=true
# - GEMINI_API_KEY=YOUR_GEMINI_KEY
# - LLM_KEY=GEMINI_3.0_FLASH
# If you want to use other LLM provider, like azure and anthropic:
# - ENABLE_ANTHROPIC=true
# - LLM_KEY=ANTHROPIC_CLAUDE3.5_SONNET

View File

@@ -995,6 +995,19 @@ if settings.ENABLE_GEMINI:
),
),
)
LLMConfigRegistry.register_config(
"GEMINI_3.0_FLASH",
LLMConfig(
"gemini/gemini-3-flash-preview",
["GEMINI_API_KEY"],
supports_vision=True,
add_assistant_prefix=False,
max_completion_tokens=65536,
litellm_params=LiteLLMParams(
thinking_level="medium" if settings.GEMINI_INCLUDE_THOUGHT else "minimal",
),
),
)
if settings.ENABLE_NOVITA: