diff --git a/.gitignore b/.gitignore index c319d64..ff8e80d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,14 @@ +### Project-specific ### +# Logs +logs/ + +# Data +data/ + +# Workspace +workspace/ + +### Python ### # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -170,14 +181,16 @@ cython_debug/ # PyPI configuration file .pypirc -# Logs -logs/ +### Visual Studio Code ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets -# Data -data/ +# Local History for Visual Studio Code +.history/ -# Workspace -workspace/ - -# sensitive information -config/config.toml +# Built Visual Studio Code Extensions +*.vsix diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..f2c6cd0 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "recommendations": [ + "tamasfe.even-better-toml", + "ms-python.black-formatter", + "ms-python.isort" + ], + "unwantedRecommendations": [] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..d3aa302 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,20 @@ +{ + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.codeActionsOnSave": { + "source.organizeImports": "always" + } + }, + "[toml]": { + "editor.defaultFormatter": "tamasfe.even-better-toml", + }, + "pre-commit-helper.runOnSave": "none", + "pre-commit-helper.config": ".pre-commit-config.yaml", + "evenBetterToml.schema.enabled": true, + "evenBetterToml.schema.associations": { + "^.+config[/\\\\].+\\.toml$": "../config/schema.config.json" + }, + "files.insertFinalNewline": true, + "files.trimTrailingWhitespace": true, + "editor.formatOnSave": true +} diff --git a/config/.gitignore b/config/.gitignore new file mode 100644 index 0000000..eaff182 --- /dev/null +++ b/config/.gitignore @@ -0,0 +1,2 @@ +# prevent the local config file from being uploaded to the remote repository +config.toml diff --git a/config/config.example.toml b/config/config.example.toml index ad36bec..de02853 100644 --- a/config/config.example.toml +++ b/config/config.example.toml @@ -1,11 +1,11 @@ # Global LLM configuration [llm] -model = "claude-3-7-sonnet" # The LLM model to use -base_url = "https://api.openai.com/v1" # API endpoint URL -api_key = "sk-..." # Your API key -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness -#max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited) +model = "claude-3-7-sonnet" # The LLM model to use +base_url = "https://api.openai.com/v1" # API endpoint URL +api_key = "sk-..." # Your API key +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness +# max_input_tokens = 100000 # Maximum input tokens to use across all requests (set to null or delete this line for unlimited) # [llm] #AZURE OPENAI: # api_type= 'azure' @@ -26,11 +26,11 @@ temperature = 0.0 # Controls randomness # Optional configuration for specific LLM models [llm.vision] -model = "claude-3-7-sonnet" # The vision model to use -base_url = "https://api.openai.com/v1" # API endpoint URL for vision model -api_key = "sk-..." # Your API key for vision model -max_tokens = 8192 # Maximum number of tokens in the response -temperature = 0.0 # Controls randomness for vision model +model = "claude-3-7-sonnet" # The vision model to use +base_url = "https://api.openai.com/v1" # API endpoint URL for vision model +api_key = "sk-..." # Your API key for vision model +max_tokens = 8192 # Maximum number of tokens in the response +temperature = 0.0 # Controls randomness for vision model # [llm.vision] #OLLAMA VISION: # api_type = 'ollama'