chore: ensure TOML configuration files are formatted well

This commit is contained in:
Sheng Fan 2025-03-15 12:58:18 +08:00
parent 3671e1d866
commit b6f8f825e0
4 changed files with 37 additions and 10 deletions

8
.vscode/extensions.json vendored Normal file
View File

@ -0,0 +1,8 @@
{
"recommendations": [
"tamasfe.even-better-toml",
"ms-python.black-formatter",
"ms-python.isort"
],
"unwantedRecommendations": []
}

17
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,17 @@
{
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.codeActionsOnSave": {
"source.organizeImports": "always"
}
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
},
"pre-commit-helper.runOnSave": "none",
"pre-commit-helper.config": ".pre-commit-config.yaml",
"evenBetterToml.schema.enabled": true,
"evenBetterToml.schema.associations": {
"^.+config[/\\\\].+\\.toml$": "../config/schema.config.json"
}
}

2
config/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# prevent the local config file from being uploaded to the remote repository
config.toml

View File

@ -1,10 +1,10 @@
# Global LLM configuration
[llm]
model = "claude-3-7-sonnet" # The LLM model to use
base_url = "https://api.openai.com/v1" # API endpoint URL
api_key = "sk-..." # Your API key
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness
model = "claude-3-7-sonnet" # The LLM model to use
base_url = "https://api.openai.com/v1" # API endpoint URL
api_key = "sk-..." # Your API key
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness
# [llm] #AZURE OPENAI:
# api_type= 'azure'
@ -25,11 +25,11 @@ temperature = 0.0 # Controls randomness
# Optional configuration for specific LLM models
[llm.vision]
model = "claude-3-7-sonnet" # The vision model to use
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
api_key = "sk-..." # Your API key for vision model
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness for vision model
model = "claude-3-7-sonnet" # The vision model to use
base_url = "https://api.openai.com/v1" # API endpoint URL for vision model
api_key = "sk-..." # Your API key for vision model
max_tokens = 8192 # Maximum number of tokens in the response
temperature = 0.0 # Controls randomness for vision model
# [llm.vision] #OLLAMA VISION:
# api_type = 'ollama'