format code

This commit is contained in:
liangxinbing 2025-03-16 12:57:06 +08:00
parent 4783f8a0d6
commit 5e35f01ea8
2 changed files with 16 additions and 12 deletions

View File

@ -1,8 +1,9 @@
import asyncio import asyncio
from typing import List from typing import List
from app.config import config
from tenacity import retry, stop_after_attempt, wait_exponential from tenacity import retry, stop_after_attempt, wait_exponential
from app.config import config
from app.tool.base import BaseTool from app.tool.base import BaseTool
from app.tool.search import ( from app.tool.search import (
BaiduSearchEngine, BaiduSearchEngine,
@ -11,10 +12,11 @@ from app.tool.search import (
WebSearchEngine, WebSearchEngine,
) )
class WebSearch(BaseTool): class WebSearch(BaseTool):
name: str = "web_search" name: str = "web_search"
description: str = """Perform a web search and return a list of relevant links. description: str = """Perform a web search and return a list of relevant links.
This function attempts to use the primary search engine API to get up-to-date results. This function attempts to use the primary search engine API to get up-to-date results.
If an error occurs, it falls back to an alternative search engine.""" If an error occurs, it falls back to an alternative search engine."""
parameters: dict = { parameters: dict = {
"type": "object", "type": "object",
@ -52,13 +54,15 @@ class WebSearch(BaseTool):
for engine_name in engine_order: for engine_name in engine_order:
engine = self._search_engine[engine_name] engine = self._search_engine[engine_name]
try: try:
links = await self._perform_search_with_engine(engine, query, num_results) links = await self._perform_search_with_engine(
engine, query, num_results
)
if links: if links:
return links return links
except Exception as e: except Exception as e:
print(f"Search engine '{engine_name}' failed with error: {e}") print(f"Search engine '{engine_name}' failed with error: {e}")
return [] return []
def _get_engine_order(self) -> List[str]: def _get_engine_order(self) -> List[str]:
""" """
Determines the order in which to try search engines. Determines the order in which to try search engines.
@ -78,18 +82,18 @@ class WebSearch(BaseTool):
if key not in engine_order: if key not in engine_order:
engine_order.append(key) engine_order.append(key)
return engine_order return engine_order
@retry( @retry(
stop=stop_after_attempt(3), stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=1, max=10), wait=wait_exponential(multiplier=1, min=1, max=10),
) )
async def _perform_search_with_engine( async def _perform_search_with_engine(
self, self,
engine: WebSearchEngine, engine: WebSearchEngine,
query: str, query: str,
num_results: int, num_results: int,
) -> List[str]: ) -> List[str]:
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
return await loop.run_in_executor( return await loop.run_in_executor(
None, lambda: list(engine.perform_search(query, num_results=num_results)) None, lambda: list(engine.perform_search(query, num_results=num_results))
) )

View File

@ -1,6 +1,6 @@
# Global LLM configuration # Global LLM configuration
[llm] [llm]
model = "claude-3-7-sonnet" # The LLM model to use model = "gpt-4o" # The LLM model to use
base_url = "https://api.openai.com/v1" # API endpoint URL base_url = "https://api.openai.com/v1" # API endpoint URL
api_key = "sk-..." # Your API key api_key = "sk-..." # Your API key
max_tokens = 8192 # Maximum number of tokens in the response max_tokens = 8192 # Maximum number of tokens in the response
@ -26,7 +26,7 @@ temperature = 0.0 # Controls randomness
# Optional configuration for specific LLM models # Optional configuration for specific LLM models
[llm.vision] [llm.vision]
model = "claude-3-7-sonnet" # The vision model to use model = "gpt-4o" # The vision model to use
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
api_key = "sk-..." # Your API key for vision model api_key = "sk-..." # Your API key for vision model
max_tokens = 8192 # Maximum number of tokens in the response max_tokens = 8192 # Maximum number of tokens in the response