format code
This commit is contained in:
parent
4783f8a0d6
commit
5e35f01ea8
@ -1,8 +1,9 @@
|
||||
import asyncio
|
||||
from typing import List
|
||||
|
||||
from app.config import config
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
from app.config import config
|
||||
from app.tool.base import BaseTool
|
||||
from app.tool.search import (
|
||||
BaiduSearchEngine,
|
||||
@ -11,10 +12,11 @@ from app.tool.search import (
|
||||
WebSearchEngine,
|
||||
)
|
||||
|
||||
|
||||
class WebSearch(BaseTool):
|
||||
name: str = "web_search"
|
||||
description: str = """Perform a web search and return a list of relevant links.
|
||||
This function attempts to use the primary search engine API to get up-to-date results.
|
||||
description: str = """Perform a web search and return a list of relevant links.
|
||||
This function attempts to use the primary search engine API to get up-to-date results.
|
||||
If an error occurs, it falls back to an alternative search engine."""
|
||||
parameters: dict = {
|
||||
"type": "object",
|
||||
@ -52,13 +54,15 @@ class WebSearch(BaseTool):
|
||||
for engine_name in engine_order:
|
||||
engine = self._search_engine[engine_name]
|
||||
try:
|
||||
links = await self._perform_search_with_engine(engine, query, num_results)
|
||||
links = await self._perform_search_with_engine(
|
||||
engine, query, num_results
|
||||
)
|
||||
if links:
|
||||
return links
|
||||
except Exception as e:
|
||||
print(f"Search engine '{engine_name}' failed with error: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def _get_engine_order(self) -> List[str]:
|
||||
"""
|
||||
Determines the order in which to try search engines.
|
||||
@ -78,18 +82,18 @@ class WebSearch(BaseTool):
|
||||
if key not in engine_order:
|
||||
engine_order.append(key)
|
||||
return engine_order
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=1, max=10),
|
||||
)
|
||||
async def _perform_search_with_engine(
|
||||
self,
|
||||
engine: WebSearchEngine,
|
||||
query: str,
|
||||
self,
|
||||
engine: WebSearchEngine,
|
||||
query: str,
|
||||
num_results: int,
|
||||
) -> List[str]:
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None, lambda: list(engine.perform_search(query, num_results=num_results))
|
||||
)
|
||||
)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Global LLM configuration
|
||||
[llm]
|
||||
model = "claude-3-7-sonnet" # The LLM model to use
|
||||
model = "gpt-4o" # The LLM model to use
|
||||
base_url = "https://api.openai.com/v1" # API endpoint URL
|
||||
api_key = "sk-..." # Your API key
|
||||
max_tokens = 8192 # Maximum number of tokens in the response
|
||||
@ -26,7 +26,7 @@ temperature = 0.0 # Controls randomness
|
||||
|
||||
# Optional configuration for specific LLM models
|
||||
[llm.vision]
|
||||
model = "claude-3-7-sonnet" # The vision model to use
|
||||
model = "gpt-4o" # The vision model to use
|
||||
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
|
||||
api_key = "sk-..." # Your API key for vision model
|
||||
max_tokens = 8192 # Maximum number of tokens in the response
|
||||
|
Loading…
x
Reference in New Issue
Block a user