From b6f8f825e0b7a53b5826b2b2baf5cf0fcccb9cc6 Mon Sep 17 00:00:00 2001 From: Sheng Fan Date: Sat, 15 Mar 2025 12:58:18 +0800 Subject: [PATCH] chore: ensure TOML configuration files are formatted well --- .vscode/extensions.json | 8 ++++++++ .vscode/settings.json | 17 +++++++++++++++++ config/.gitignore | 2 ++ config/config.example.toml | 20 ++++++++++---------- 4 files changed, 37 insertions(+), 10 deletions(-) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json create mode 100644 config/.gitignore diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..e518685 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "recommendations": [ + "tamasfe.even-better-toml", + "ms-python.black-formatter", + "ms-python.isort" + ], + "unwantedRecommendations": [] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..84c0e9d --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,17 @@ +{ + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.codeActionsOnSave": { + "source.organizeImports": "always" + } + }, + "[toml]": { + "editor.defaultFormatter": "tamasfe.even-better-toml", + }, + "pre-commit-helper.runOnSave": "none", + "pre-commit-helper.config": ".pre-commit-config.yaml", + "evenBetterToml.schema.enabled": true, + "evenBetterToml.schema.associations": { + "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json" + } +} \ No newline at end of file diff --git a/config/.gitignore b/config/.gitignore new file mode 100644 index 0000000..eaff182 --- /dev/null +++ b/config/.gitignore @@ -0,0 +1,2 @@ +# prevent the local config file from being uploaded to the remote repository +config.toml diff --git a/config/config.example.toml b/config/config.example.toml index e9a9620..aae395b 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,10 +1,10 @@ # Global LLM configuration [llm] -model = "claude-3-7-sonnet" # The LLM model to use -base_url = "https://api.openai.com/v1" # API endpoint URL -api_key = "sk-..." # Your API key -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness +model = "claude-3-7-sonnet" # The LLM model to use +base_url = "https://api.openai.com/v1" # API endpoint URL +api_key = "sk-..." # Your API key +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness # [llm] #AZURE OPENAI: # api_type= 'azure' @@ -25,11 +25,11 @@ temperature = 0.0 # Controls randomness # Optional configuration for specific LLM models [llm.vision] -model = "claude-3-7-sonnet" # The vision model to use -base_url = "https://api.openai.com/v1" # API endpoint URL for vision model -api_key = "sk-..." # Your API key for vision model -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness for vision model +model = "claude-3-7-sonnet" # The vision model to use +base_url = "https://api.openai.com/v1" # API endpoint URL for vision model +api_key = "sk-..." # Your API key for vision model +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness for vision model # [llm.vision] #OLLAMA VISION: # api_type = 'ollama'