format code
This commit is contained in:
parent
4783f8a0d6
commit
5e35f01ea8
@ -1,8 +1,9 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from app.config import config
|
|
||||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||||
|
|
||||||
|
from app.config import config
|
||||||
from app.tool.base import BaseTool
|
from app.tool.base import BaseTool
|
||||||
from app.tool.search import (
|
from app.tool.search import (
|
||||||
BaiduSearchEngine,
|
BaiduSearchEngine,
|
||||||
@ -11,6 +12,7 @@ from app.tool.search import (
|
|||||||
WebSearchEngine,
|
WebSearchEngine,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class WebSearch(BaseTool):
|
class WebSearch(BaseTool):
|
||||||
name: str = "web_search"
|
name: str = "web_search"
|
||||||
description: str = """Perform a web search and return a list of relevant links.
|
description: str = """Perform a web search and return a list of relevant links.
|
||||||
@ -52,7 +54,9 @@ class WebSearch(BaseTool):
|
|||||||
for engine_name in engine_order:
|
for engine_name in engine_order:
|
||||||
engine = self._search_engine[engine_name]
|
engine = self._search_engine[engine_name]
|
||||||
try:
|
try:
|
||||||
links = await self._perform_search_with_engine(engine, query, num_results)
|
links = await self._perform_search_with_engine(
|
||||||
|
engine, query, num_results
|
||||||
|
)
|
||||||
if links:
|
if links:
|
||||||
return links
|
return links
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Global LLM configuration
|
# Global LLM configuration
|
||||||
[llm]
|
[llm]
|
||||||
model = "claude-3-7-sonnet" # The LLM model to use
|
model = "gpt-4o" # The LLM model to use
|
||||||
base_url = "https://api.openai.com/v1" # API endpoint URL
|
base_url = "https://api.openai.com/v1" # API endpoint URL
|
||||||
api_key = "sk-..." # Your API key
|
api_key = "sk-..." # Your API key
|
||||||
max_tokens = 8192 # Maximum number of tokens in the response
|
max_tokens = 8192 # Maximum number of tokens in the response
|
||||||
@ -26,7 +26,7 @@ temperature = 0.0 # Controls randomness
|
|||||||
|
|
||||||
# Optional configuration for specific LLM models
|
# Optional configuration for specific LLM models
|
||||||
[llm.vision]
|
[llm.vision]
|
||||||
model = "claude-3-7-sonnet" # The vision model to use
|
model = "gpt-4o" # The vision model to use
|
||||||
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
|
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
|
||||||
api_key = "sk-..." # Your API key for vision model
|
api_key = "sk-..." # Your API key for vision model
|
||||||
max_tokens = 8192 # Maximum number of tokens in the response
|
max_tokens = 8192 # Maximum number of tokens in the response
|
||||||
|
Loading…
x
Reference in New Issue
Block a user