feat 啟用AI Ollama程式編輯助手功能

This commit is contained in:
Yuan Chiu 2025-05-23 07:58:26 +08:00
parent 479540f0b2
commit eba0ff804e
Signed by: yuan
GPG Key ID: 50FBE4156404B98D
5 changed files with 241 additions and 3 deletions

View File

@ -1,6 +1,8 @@
{{- $name := promptStringOnce . "name" "請輸入您的姓名" -}}
{{- $email := promptStringOnce . "email" "輸入您的Email地址" -}}
{{- $signingkey := promptStringOnce . "signingkey" "輸入您的GPG金鑰ID (可從 gpg --list-secret-keys --keyid-format=long 去找)" -}}
{{- $signingkey := promptStringOnce . "signingkey" "輸入您的GPG金鑰ID (若不使用,直接留空。可從 gpg --list-secret-keys --keyid-format=long 去找)" -}}
{{- $ollamaUrl := promptStringOnce . "ollamaUrl" "輸入您的Ollama URL (若不使用,直接留空)" -}}
{{- $ollamaKey := promptStringOnce . "ollamaKey" "輸入您的Ollama ApiKey (若不使用,直接留空)" -}}
{{- $hosttype_choices := list "desktop" "server" "unroot_server" "manual" -}}
{{- $hosttype := promptChoice "What type of host are you on" $hosttype_choices -}}
@ -39,3 +41,7 @@ sourceDir = "~/.local/share/chezmoi"
enableSixel = {{ $enableSixel }} {{/* TODO: packages.toml還沒處理 */}}
noGUI = {{ not $enableGUI }}
enableYcm = {{ $enableYcm }}
enableOllama = {{ not (not $ollamaUrl) }}
ollamaUrl = {{ $ollamaUrl | quote }}
ollamaKey = {{ $ollamaKey | quote }}

View File

@ -8,6 +8,13 @@ dconf.ini
.config/nvim/lua/plugins/components/image.lua
{{ end }}
{{/* ---- 若不啟用AI Ollama伺服器 ----------------------------------------- */ -}}
{{ if not .enableOllama }}
.config/nvim/lua/plugins/components/ai.lua
.continue/config.yaml
{{ end }}
{{/* ---- 沒有圖形界面 ---------------------------------------------------- */ -}}
{{ if .noGUI }}
.config/iterm2/

View File

@ -0,0 +1,132 @@
-- if true then return {} end -- 暫時停用
return {
{
"yetone/avante.nvim",
-- Key Bindings
-- Leaderaa show sidebar
-- Leaderat toggle sidebar visibility
-- Leaderar refresh sidebar
-- Leaderaf switch sidebar focus
-- Leadera? select model
-- Leaderae edit selected blocks
-- LeaderaS stop current AI request
-- Leaderah select between chat histories
-- co choose ours
-- ct choose theirs
-- ca choose all theirs
-- c0 choose none
-- cb choose both
-- cc choose cursor
-- ]x move to previous conflict
-- [x move to next conflict
-- [[ jump to previous codeblocks (results window)
-- ]] jump to next codeblocks (results windows)
event = "VeryLazy",
version = false, -- Never set this value to "*"! Never!
keys = {
{ "<C-A-i>", "<cmd>AvanteToggle<cr>", desc = "Avante Chat Toggle" },
},
opts = {
provider = "openai",
openai = {
endpoint = {{ print .ollamaUrl "/v1" | quote }},
model = "qwen2.5-coder:7b", -- your desired model (or use gpt-4o, etc.)
timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
-- temperature = 0,
-- max_completion_tokens = 8192, -- Increase this to include reasoning tokens (for reasoning models)
-- reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
disable_tools = true, -- Open-source models often do not support tools.
},
auto_suggestions_provider = "ollama",
ollama = {
api_key_name = "OLLAMA_API_KEY",
endpoint = {{ .ollamaUrl | quote }},
model = "qwen2.5-coder:1.5b",
disable_tools = true, -- Open-source models often do not support tools.
},
behaviour = {
auto_suggestions = false, -- Experimental stage
auto_set_highlight_group = true,
auto_set_keymaps = true,
auto_apply_diff_after_generation = false,
support_paste_from_clipboard = false,
minimize_diff = true, -- Whether to remove unchanged lines when applying a code block
enable_token_counting = true, -- Whether to enable token counting. Default to true.
},
},
-- if you want to build from source then do `make BUILD_FROM_SOURCE=true`
build = "make",
-- build = "powershell -ExecutionPolicy Bypass -File Build.ps1 -BuildFromSource false" -- for windows
dependencies = {
"nvim-treesitter/nvim-treesitter",
"stevearc/dressing.nvim",
"nvim-lua/plenary.nvim",
"MunifTanjim/nui.nvim",
--- The below dependencies are optional,
"echasnovski/mini.pick", -- for file_selector provider mini.pick
"nvim-telescope/telescope.nvim", -- for file_selector provider telescope
"hrsh7th/nvim-cmp", -- autocompletion for avante commands and mentions
"ibhagwan/fzf-lua", -- for file_selector provider fzf
"nvim-tree/nvim-web-devicons", -- or echasnovski/mini.icons
"zbirenbaum/copilot.lua", -- for providers='copilot'
{
-- support for image pasting
"HakonHarnes/img-clip.nvim",
event = "VeryLazy",
opts = {
-- recommended settings
default = {
embed_image_as_base64 = false,
prompt_for_file_name = false,
drag_and_drop = {
insert_mode = true,
},
-- required for Windows users
use_absolute_path = true,
},
},
},
{
-- Make sure to set this up properly if you have lazy=true
'MeanderingProgrammer/render-markdown.nvim',
opts = {
file_types = { "markdown", "Avante" },
},
ft = { "markdown", "Avante" },
},
},
config = function(_, opts)
-- 這裡動態設定 Neovim 的環境變數(不會傳給 shell
vim.env.OPENAI_API_KEY = {{ .ollamaKey | quote }}
vim.env.OLLAMA_API_KEY = {{ .ollamaKey | quote }}
require("avante").setup(opts)
-- -- 配置 Copilot
-- require("copilot").setup()
end,
},
{
'huggingface/llm.nvim',
dependencies = { 'kevinhwang91/nvim-ufo' }, -- 確保 ufo 先載入
event = 'VeryLazy', -- 啟動後自動載入
opts = {
model = "qwen2.5-coder:1.5b",
backend = "openai",
url = {{ .ollamaUrl | quote }}, -- llm-ls uses "/api/generate"
api_token = {{ .ollamaKey | quote }},
-- -- cf https://github.com/ollama/ollama/blob/main/docs/api.md#parameters
request_body = {
-- Modelfile options for the model you use
options = {
temperature = 0.2,
top_p = 0.95,
}
}
}
},
}

View File

@ -46,9 +46,15 @@ return {
component_separators = { left = '', right = ''},
section_separators = { left = '', right = ''},
disabled_filetypes = {
statusline = {},
statusline = {
'Avante',
'AvanteSelectedFiles',
},
winbar = {},
{'undotree'}
{
'undotree',
'AvanteInput',
}
},
ignore_focus = {},
always_divide_middle = true,

View File

@ -0,0 +1,87 @@
name: Local Assistant
version: 1.0.0
schema: v1
models:
- name: qwen2.5-coder:7b
provider: ollama
model: qwen2.5-coder:7b
apiBase: {{ .ollamaUrl }}
apiKey: {{ .ollamaKey }}
roles:
- chat
- edit
- apply
- name: qwen2.5-coder:1.5b
provider: ollama
model: qwen2.5-coder:1.5b
apiBase: {{ .ollamaUrl }}
apiKey: {{ .ollamaKey }}
roles:
- autocomplete
- name: mxbai-embed-large
provider: ollama
model: mxbai-embed-large
apiBase: {{ .ollamaUrl }}
apiKey: {{ .ollamaKey }}
roles:
- embed
- name: codellama:13b
provider: ollama
model: codellama:13b
apiBase: {{ .ollamaUrl }}
apiKey: {{ .ollamaKey }}
- name: llama3.1-claude
provider: ollama
model: incept5/llama3.1-claude
apiBase: {{ .ollamaUrl }}/
apiKey: {{ .ollamaKey }}
# - name: deepseek-coder
# provider: ollama
# model: deepseek-coder
# apiBase: {{ .ollamaUrl }}
# apiKey: {{ .ollamaKey }}
# tabAutocompleteModel:
# title: qwen2.5-coder:1.5b
# provider: ollama
# model: qwen2.5-coder:1.5b
# embeddingsProvider:
# provider: ollama
# model: mxbai-embed-large
context:
- provider: code
- provider: docs
- provider: diff
- provider: terminal
- provider: problems
- provider: folder
- provider: codebase
# models:
# - name: Llama 3.1 8B
# provider: ollama
# model: llama3.1:8b
# roles:
# - chat
# - edit
# - apply
# - name: Qwen2.5-Coder 1.5B
# provider: ollama
# model: qwen2.5-coder:1.5b-base
# roles:
# - autocomplete
# - name: Nomic Embed
# provider: ollama
# model: nomic-embed-text:latest
# roles:
# - embed
# context:
# - provider: code
# - provider: docs
# - provider: diff
# - provider: terminal
# - provider: problems
# - provider: folder
# - provider: codebase