format code

This commit is contained in:
liangxinbing 2025-03-16 12:57:06 +08:00
parent 4783f8a0d6
commit 5e35f01ea8
2 changed files with 16 additions and 12 deletions

View File

@ -1,8 +1,9 @@
import asyncio
from typing import List
from app.config import config
from tenacity import retry, stop_after_attempt, wait_exponential
from app.config import config
from app.tool.base import BaseTool
from app.tool.search import (
BaiduSearchEngine,
@ -11,6 +12,7 @@ from app.tool.search import (
WebSearchEngine,
)
class WebSearch(BaseTool):
name: str = "web_search"
description: str = """Perform a web search and return a list of relevant links.
@ -52,7 +54,9 @@ class WebSearch(BaseTool):
for engine_name in engine_order:
engine = self._search_engine[engine_name]
try:
links = await self._perform_search_with_engine(engine, query, num_results)
links = await self._perform_search_with_engine(
engine, query, num_results
)
if links:
return links
except Exception as e:

View File

@ -1,6 +1,6 @@
# Global LLM configuration
[llm]
model = "claude-3-7-sonnet" # The LLM model to use
model = "gpt-4o" # The LLM model to use
base_url = "https://api.openai.com/v1" # API endpoint URL
api_key = "sk-..." # Your API key
max_tokens = 8192 # Maximum number of tokens in the response
@ -26,7 +26,7 @@ temperature = 0.0 # Controls randomness
# Optional configuration for specific LLM models
[llm.vision]
model = "claude-3-7-sonnet" # The vision model to use
model = "gpt-4o" # The vision model to use
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
api_key = "sk-..." # Your API key for vision model
max_tokens = 8192 # Maximum number of tokens in the response