fix linter errors for config registry & cli (#2292)

This commit is contained in:
Shuchang Zheng
2025-05-05 00:12:46 -07:00
committed by GitHub
parent c3072d7572
commit e2a82a75d9
4 changed files with 41 additions and 36 deletions

View File

@@ -7,9 +7,9 @@ import time
import uuid import uuid
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
import requests
from urllib.parse import urlparse from urllib.parse import urlparse
import requests
import typer import typer
import uvicorn import uvicorn
from dotenv import load_dotenv, set_key from dotenv import load_dotenv, set_key
@@ -474,23 +474,23 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
print("\nTo use CDP connection, Chrome must be running with remote debugging enabled.") print("\nTo use CDP connection, Chrome must be running with remote debugging enabled.")
print("Example: chrome --remote-debugging-port=9222") print("Example: chrome --remote-debugging-port=9222")
print("Default debugging URL: http://localhost:9222") print("Default debugging URL: http://localhost:9222")
default_port = "9222" default_port = "9222"
if remote_debugging_url is None: if remote_debugging_url is None:
remote_debugging_url = "http://localhost:9222" remote_debugging_url = "http://localhost:9222"
elif ":" in remote_debugging_url.split("/")[-1]: elif ":" in remote_debugging_url.split("/")[-1]:
default_port = remote_debugging_url.split(":")[-1].split("/")[0] default_port = remote_debugging_url.split(":")[-1].split("/")[0]
parsed_url = urlparse(remote_debugging_url) parsed_url = urlparse(remote_debugging_url)
version_url = f"{parsed_url.scheme}://{parsed_url.netloc}/json/version" version_url = f"{parsed_url.scheme}://{parsed_url.netloc}/json/version"
print(f"\nChecking if Chrome is already running with remote debugging on port {default_port}...") print(f"\nChecking if Chrome is already running with remote debugging on port {default_port}...")
try: try:
response = requests.get(version_url, timeout=2) response = requests.get(version_url, timeout=2)
if response.status_code == 200: if response.status_code == 200:
try: try:
browser_info = response.json() browser_info = response.json()
print(f"Chrome is already running with remote debugging!") print("Chrome is already running with remote debugging!")
if "Browser" in browser_info: if "Browser" in browser_info:
print(f"Browser: {browser_info['Browser']}") print(f"Browser: {browser_info['Browser']}")
if "webSocketDebuggerUrl" in browser_info: if "webSocketDebuggerUrl" in browser_info:
@@ -501,9 +501,9 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
print("Port is in use, but doesn't appear to be Chrome with remote debugging.") print("Port is in use, but doesn't appear to be Chrome with remote debugging.")
except requests.RequestException: except requests.RequestException:
print(f"No Chrome instance detected on {remote_debugging_url}") print(f"No Chrome instance detected on {remote_debugging_url}")
print("\nExecuting Chrome with remote debugging enabled:") print("\nExecuting Chrome with remote debugging enabled:")
if host_system == "darwin" or host_system == "linux": if host_system == "darwin" or host_system == "linux":
chrome_cmd = f'{browser_location} --remote-debugging-port={default_port} --user-data-dir="$HOME/chrome-cdp-profile" --no-first-run --no-default-browser-check' chrome_cmd = f'{browser_location} --remote-debugging-port={default_port} --user-data-dir="$HOME/chrome-cdp-profile" --no-first-run --no-default-browser-check'
print(f" {chrome_cmd}") print(f" {chrome_cmd}")
@@ -512,9 +512,11 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
print(f" {chrome_cmd}") print(f" {chrome_cmd}")
else: else:
print("Unsupported OS for Chrome configuration. Please set it up manually.") print("Unsupported OS for Chrome configuration. Please set it up manually.")
# Ask user if they want to execute the command # Ask user if they want to execute the command
execute_browser = input("\nWould you like to start Chrome with remote debugging now? (y/n) [y]: ").strip().lower() execute_browser = (
input("\nWould you like to start Chrome with remote debugging now? (y/n) [y]: ").strip().lower()
)
if not execute_browser or execute_browser == "y": if not execute_browser or execute_browser == "y":
print(f"Starting Chrome with remote debugging on port {default_port}...") print(f"Starting Chrome with remote debugging on port {default_port}...")
try: try:
@@ -525,12 +527,12 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
subprocess.Popen(f"start {chrome_cmd}", shell=True) subprocess.Popen(f"start {chrome_cmd}", shell=True)
elif host_system == "wsl": elif host_system == "wsl":
subprocess.Popen(f"cmd.exe /c start {chrome_cmd}", shell=True) subprocess.Popen(f"cmd.exe /c start {chrome_cmd}", shell=True)
print(f"Chrome started successfully. Connecting to {remote_debugging_url}") print(f"Chrome started successfully. Connecting to {remote_debugging_url}")
print("Waiting for Chrome to initialize...") print("Waiting for Chrome to initialize...")
time.sleep(2) time.sleep(2)
try: try:
verification_response = requests.get(version_url, timeout=5) verification_response = requests.get(version_url, timeout=5)
if verification_response.status_code == 200: if verification_response.status_code == 200:
@@ -549,7 +551,7 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
except Exception as e: except Exception as e:
print(f"Error starting Chrome: {e}") print(f"Error starting Chrome: {e}")
print("Please start Chrome manually using the command above.") print("Please start Chrome manually using the command above.")
remote_debugging_url = input("Enter remote debugging URL (press Enter for default): ").strip() remote_debugging_url = input("Enter remote debugging URL (press Enter for default): ").strip()
if not remote_debugging_url: if not remote_debugging_url:
remote_debugging_url = "http://localhost:9222" remote_debugging_url = "http://localhost:9222"

View File

@@ -218,7 +218,7 @@ class Settings(BaseSettings):
ENABLE_OLLAMA: bool = False ENABLE_OLLAMA: bool = False
OLLAMA_SERVER_URL: str | None = None OLLAMA_SERVER_URL: str | None = None
OLLAMA_MODEL: str | None = None OLLAMA_MODEL: str | None = None
# OPENROUTER # OPENROUTER
ENABLE_OPENROUTER: bool = False ENABLE_OPENROUTER: bool = False
OPENROUTER_API_KEY: str | None = None OPENROUTER_API_KEY: str | None = None

View File

@@ -807,20 +807,20 @@ if settings.ENABLE_VERTEX_AI:
if settings.ENABLE_OLLAMA: if settings.ENABLE_OLLAMA:
# Register Ollama model configured in settings # Register Ollama model configured in settings
if settings.OLLAMA_MODEL: if settings.OLLAMA_MODEL:
model_name = settings.OLLAMA_MODEL ollama_model_name = settings.OLLAMA_MODEL
LLMConfigRegistry.register_config( LLMConfigRegistry.register_config(
"OLLAMA", "OLLAMA",
LLMConfig( LLMConfig(
f"ollama/{model_name}", f"ollama/{ollama_model_name}",
["OLLAMA_SERVER_URL", "OLLAMA_MODEL"], ["OLLAMA_SERVER_URL", "OLLAMA_MODEL"],
supports_vision=False, # Ollama does not support vision yet supports_vision=False, # Ollama does not support vision yet
add_assistant_prefix=False, add_assistant_prefix=False,
max_completion_tokens=settings.LLM_CONFIG_MAX_TOKENS, max_completion_tokens=settings.LLM_CONFIG_MAX_TOKENS,
litellm_params=LiteLLMParams( litellm_params=LiteLLMParams(
api_base=settings.OLLAMA_SERVER_URL, api_base=settings.OLLAMA_SERVER_URL,
api_key=None, api_key=None,
api_version=None, api_version=None,
model_info={"model_name": f"ollama/{model_name}"}, model_info={"model_name": f"ollama/{ollama_model_name}"},
), ),
), ),
) )
@@ -828,31 +828,31 @@ if settings.ENABLE_OLLAMA:
if settings.ENABLE_OPENROUTER: if settings.ENABLE_OPENROUTER:
# Register OpenRouter model configured in settings # Register OpenRouter model configured in settings
if settings.OPENROUTER_MODEL: if settings.OPENROUTER_MODEL:
model_name = settings.OPENROUTER_MODEL openrouter_model_name = settings.OPENROUTER_MODEL
LLMConfigRegistry.register_config( LLMConfigRegistry.register_config(
"OPENROUTER", "OPENROUTER",
LLMConfig( LLMConfig(
f"openrouter/{model_name}", f"openrouter/{openrouter_model_name}",
["OPENROUTER_API_KEY", "OPENROUTER_MODEL"], ["OPENROUTER_API_KEY", "OPENROUTER_MODEL"],
supports_vision=settings.LLM_CONFIG_SUPPORT_VISION, supports_vision=settings.LLM_CONFIG_SUPPORT_VISION,
add_assistant_prefix=False, add_assistant_prefix=False,
max_completion_tokens=settings.LLM_CONFIG_MAX_TOKENS, max_completion_tokens=settings.LLM_CONFIG_MAX_TOKENS,
litellm_params=LiteLLMParams( litellm_params=LiteLLMParams(
api_key=settings.OPENROUTER_API_KEY, api_key=settings.OPENROUTER_API_KEY,
api_base=settings.OPENROUTER_API_BASE, api_base=settings.OPENROUTER_API_BASE,
api_version=None, api_version=None,
model_info={"model_name": f"openrouter/{model_name}"}, model_info={"model_name": f"openrouter/{openrouter_model_name}"},
), ),
), ),
) )
if settings.ENABLE_GROQ: if settings.ENABLE_GROQ:
# Register Groq model configured in settings # Register Groq model configured in settings
if settings.GROQ_MODEL: if settings.GROQ_MODEL:
model_name = settings.GROQ_MODEL groq_model_name = settings.GROQ_MODEL
LLMConfigRegistry.register_config( LLMConfigRegistry.register_config(
"GROQ", "GROQ",
LLMConfig( LLMConfig(
f"groq/{model_name}", f"groq/{groq_model_name}",
["GROQ_API_KEY", "GROQ_MODEL"], ["GROQ_API_KEY", "GROQ_MODEL"],
supports_vision=settings.LLM_CONFIG_SUPPORT_VISION, supports_vision=settings.LLM_CONFIG_SUPPORT_VISION,
add_assistant_prefix=False, add_assistant_prefix=False,
@@ -861,7 +861,7 @@ if settings.ENABLE_GROQ:
api_key=settings.GROQ_API_KEY, api_key=settings.GROQ_API_KEY,
api_version=None, api_version=None,
api_base=settings.GROQ_API_BASE, api_base=settings.GROQ_API_BASE,
model_info={"model_name": f"groq/{model_name}"}, model_info={"model_name": f"groq/{groq_model_name}"},
), ),
), ),
) )
@@ -870,10 +870,10 @@ if settings.ENABLE_GROQ:
# See documentation: https://docs.litellm.ai/docs/providers/openai_compatible # See documentation: https://docs.litellm.ai/docs/providers/openai_compatible
if settings.ENABLE_OPENAI_COMPATIBLE: if settings.ENABLE_OPENAI_COMPATIBLE:
# Check for required model name # Check for required model name
model_key = settings.OPENAI_COMPATIBLE_MODEL_KEY openai_compatible_model_key = settings.OPENAI_COMPATIBLE_MODEL_KEY
model_name = settings.OPENAI_COMPATIBLE_MODEL_NAME openai_compatible_model_name = settings.OPENAI_COMPATIBLE_MODEL_NAME
if not model_name: if not openai_compatible_model_name:
raise InvalidLLMConfigError( raise InvalidLLMConfigError(
"OPENAI_COMPATIBLE_MODEL_NAME is required but not set. OpenAI-compatible model will not be registered." "OPENAI_COMPATIBLE_MODEL_NAME is required but not set. OpenAI-compatible model will not be registered."
) )
@@ -886,14 +886,14 @@ if settings.ENABLE_OPENAI_COMPATIBLE:
api_key=settings.OPENAI_COMPATIBLE_API_KEY, api_key=settings.OPENAI_COMPATIBLE_API_KEY,
api_base=settings.OPENAI_COMPATIBLE_API_BASE, api_base=settings.OPENAI_COMPATIBLE_API_BASE,
api_version=settings.OPENAI_COMPATIBLE_API_VERSION, api_version=settings.OPENAI_COMPATIBLE_API_VERSION,
model_info={"model_name": f"openai/{model_name}"}, model_info={"model_name": f"openai/{openai_compatible_model_name}"},
) )
# Configure LLMConfig # Configure LLMConfig
LLMConfigRegistry.register_config( LLMConfigRegistry.register_config(
model_key, openai_compatible_model_key,
LLMConfig( LLMConfig(
f"openai/{model_name}", # Add openai/ prefix for liteLLM f"openai/{openai_compatible_model_name}", # Add openai/ prefix for liteLLM
required_env_vars, required_env_vars,
supports_vision=settings.OPENAI_COMPATIBLE_SUPPORTS_VISION, supports_vision=settings.OPENAI_COMPATIBLE_SUPPORTS_VISION,
add_assistant_prefix=settings.OPENAI_COMPATIBLE_ADD_ASSISTANT_PREFIX, add_assistant_prefix=settings.OPENAI_COMPATIBLE_ADD_ASSISTANT_PREFIX,
@@ -905,4 +905,7 @@ if settings.ENABLE_OPENAI_COMPATIBLE:
reasoning_effort=settings.OPENAI_COMPATIBLE_REASONING_EFFORT, reasoning_effort=settings.OPENAI_COMPATIBLE_REASONING_EFFORT,
), ),
) )
LOG.info(f"Registered OpenAI-compatible model with key {model_key}", model_name=model_name) LOG.info(
f"Registered OpenAI-compatible model with key {openai_compatible_model_key}",
model_name=openai_compatible_model_name,
)

View File

@@ -471,9 +471,9 @@ async def _create_cdp_connection_browser(
"--no-first-run", "--no-first-run",
"--no-default-browser-check", "--no-default-browser-check",
"--remote-debugging-address=0.0.0.0", "--remote-debugging-address=0.0.0.0",
], ],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE stderr=subprocess.PIPE,
) )
# Add small delay to allow browser to start # Add small delay to allow browser to start
time.sleep(2) time.sleep(2)