Merge pull request #648 from fred913/main

chore: several optimizations
This commit is contained in:
mannaandpoem 2025-03-15 19:03:22 +08:00 committed by GitHub
commit 564a9fd88c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 63 additions and 20 deletions

31
.gitignore vendored
View File

@ -1,3 +1,14 @@
### Project-specific ###
# Logs
logs/
# Data
data/
# Workspace
workspace/
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@ -170,14 +181,16 @@ cython_debug/
# PyPI configuration file
.pypirc
# Logs
logs/
### Visual Studio Code ###
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-snippets
# Data
data/
# Local History for Visual Studio Code
.history/
# Workspace
workspace/
# sensitive information
config/config.toml
# Built Visual Studio Code Extensions
*.vsix

8
.vscode/extensions.json vendored Normal file
View File

@ -0,0 +1,8 @@
{
"recommendations": [
"tamasfe.even-better-toml",
"ms-python.black-formatter",
"ms-python.isort"
],
"unwantedRecommendations": []
}

20
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,20 @@
{
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.codeActionsOnSave": {
"source.organizeImports": "always"
}
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
},
"pre-commit-helper.runOnSave": "none",
"pre-commit-helper.config": ".pre-commit-config.yaml",
"evenBetterToml.schema.enabled": true,
"evenBetterToml.schema.associations": {
"^.+config[/\\\\].+\\.toml$": "../config/schema.config.json"
},
"files.insertFinalNewline": true,
"files.trimTrailingWhitespace": true,
"editor.formatOnSave": true
}

2
config/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# prevent the local config file from being uploaded to the remote repository
config.toml

View File

@ -1,11 +1,11 @@
# Global LLM configuration
[llm]
model = "claude-3-7-sonnet" # The LLM model to use
base_url = "https://api.openai.com/v1" # API endpoint URL
api_key = "sk-..." # Your API key
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness
#max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited)
model = "claude-3-7-sonnet" # The LLM model to use
base_url = "https://api.openai.com/v1" # API endpoint URL
api_key = "sk-..." # Your API key
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness
# max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited)
# [llm] #AZURE OPENAI:
# api_type= 'azure'
@ -26,11 +26,11 @@ temperature = 0.0 # Controls randomness
# Optional configuration for specific LLM models
[llm.vision]
model = "claude-3-7-sonnet" # The vision model to use
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
api_key = "sk-..." # Your API key for vision model
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness for vision model
model = "claude-3-7-sonnet" # The vision model to use
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
api_key = "sk-..." # Your API key for vision model
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness for vision model
# [llm.vision] #OLLAMA VISION:
# api_type = 'ollama'