update config.example.toml and format file_saver.py
This commit is contained in:
parent
95f4ce1e81
commit
114bd46720
@ -2,8 +2,8 @@ import os
|
|||||||
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
|
||||||
from app.tool.base import BaseTool
|
|
||||||
from app.config import WORKSPACE_ROOT
|
from app.config import WORKSPACE_ROOT
|
||||||
|
from app.tool.base import BaseTool
|
||||||
|
|
||||||
|
|
||||||
class FileSaver(BaseTool):
|
class FileSaver(BaseTool):
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
# Global LLM configuration
|
# Global LLM configuration
|
||||||
[llm]
|
[llm]
|
||||||
model = "gpt-4o" # The LLM model to use
|
model = "claude-3-7-sonnet-20250219" # The LLM model to use
|
||||||
base_url = "https://api.openai.com/v1" # API endpoint URL
|
base_url = "https://api.anthropic.com/v1/" # API endpoint URL
|
||||||
api_key = "sk-..." # Your API key
|
api_key = "YOUR_API_KEY" # Your API key
|
||||||
max_tokens = 8192 # Maximum number of tokens in the response
|
max_tokens = 8192 # Maximum number of tokens in the response
|
||||||
temperature = 0.0 # Controls randomness
|
temperature = 0.0 # Controls randomness
|
||||||
# max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited)
|
|
||||||
|
|
||||||
# [llm] #AZURE OPENAI:
|
# [llm] #AZURE OPENAI:
|
||||||
# api_type= 'azure'
|
# api_type= 'azure'
|
||||||
@ -26,11 +25,11 @@ temperature = 0.0 # Controls randomness
|
|||||||
|
|
||||||
# Optional configuration for specific LLM models
|
# Optional configuration for specific LLM models
|
||||||
[llm.vision]
|
[llm.vision]
|
||||||
model = "gpt-4o" # The vision model to use
|
model = "claude-3-7-sonnet-20250219" # The vision model to use
|
||||||
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
|
base_url = "https://api.anthropic.com/v1/" # API endpoint URL for vision model
|
||||||
api_key = "sk-..." # Your API key for vision model
|
api_key = "YOUR_API_KEY" # Your API key for vision model
|
||||||
max_tokens = 8192 # Maximum number of tokens in the response
|
max_tokens = 8192 # Maximum number of tokens in the response
|
||||||
temperature = 0.0 # Controls randomness for vision model
|
temperature = 0.0 # Controls randomness for vision model
|
||||||
|
|
||||||
# [llm.vision] #OLLAMA VISION:
|
# [llm.vision] #OLLAMA VISION:
|
||||||
# api_type = 'ollama'
|
# api_type = 'ollama'
|
||||||
|
Loading…
x
Reference in New Issue
Block a user