add chatgpt, norg and gen plugins

This commit is contained in:
Gustavo Silva (es00679121_prosegur) 2024-01-21 20:35:43 +01:00
parent da79e8d990
commit 793143c6d2
7 changed files with 291 additions and 8 deletions

View File

@ -58,3 +58,6 @@ opt.foldmethod = 'indent'
opt.foldenable = false opt.foldenable = false
opt.foldlevel = 99 opt.foldlevel = 99
global.markdown_folding = 1 global.markdown_folding = 1
global.mkdp_markdown_css = '~/markdown.css'
global.mkdp_highlight_css = ''

View File

@ -690,11 +690,11 @@ local servers = {
} }
}, },
grammarly = { grammarly = {
filetypes = { 'markdown' }, filetypes = { 'markdown', 'norg' },
}, },
tsserver = {},
-- pyright = {}, -- pyright = {},
-- rust_analyzer = {}, -- rust_analyzer = {},
-- tsserver = {},
-- html = { filetypes = { 'html', 'twig', 'hbs'} }, -- html = { filetypes = { 'html', 'twig', 'hbs'} },
lua_ls = { lua_ls = {

View File

@ -1,4 +1,5 @@
{ {
"ChatGPT.nvim": { "branch": "main", "commit": "48c59167beeb6ee0caa501c46cecc97b9be8571d" },
"Comment.nvim": { "branch": "master", "commit": "0236521ea582747b58869cb72f70ccfa967d2e89" }, "Comment.nvim": { "branch": "master", "commit": "0236521ea582747b58869cb72f70ccfa967d2e89" },
"LuaSnip": { "branch": "master", "commit": "80a8528f084a97b624ae443a6f50ff8074ba486b" }, "LuaSnip": { "branch": "master", "commit": "80a8528f084a97b624ae443a6f50ff8074ba486b" },
"cmp-nvim-lsp": { "branch": "main", "commit": "44b16d11215dce86f253ce0c30949813c0a90765" }, "cmp-nvim-lsp": { "branch": "main", "commit": "44b16d11215dce86f253ce0c30949813c0a90765" },
@ -6,6 +7,7 @@
"copilot.vim": { "branch": "release", "commit": "309b3c803d1862d5e84c7c9c5749ae04010123b8" }, "copilot.vim": { "branch": "release", "commit": "309b3c803d1862d5e84c7c9c5749ae04010123b8" },
"fidget.nvim": { "branch": "main", "commit": "0ba1e16d07627532b6cae915cc992ecac249fb97" }, "fidget.nvim": { "branch": "main", "commit": "0ba1e16d07627532b6cae915cc992ecac249fb97" },
"friendly-snippets": { "branch": "main", "commit": "43727c2ff84240e55d4069ec3e6158d74cb534b6" }, "friendly-snippets": { "branch": "main", "commit": "43727c2ff84240e55d4069ec3e6158d74cb534b6" },
"gen.nvim": { "branch": "main", "commit": "41ad952c8269fa7aa3a4b8a5abb44541cb628313" },
"gitsigns.nvim": { "branch": "main", "commit": "af0f583cd35286dd6f0e3ed52622728703237e50" }, "gitsigns.nvim": { "branch": "main", "commit": "af0f583cd35286dd6f0e3ed52622728703237e50" },
"go.nvim": { "branch": "master", "commit": "c4819d1625c59747d4a6c3de8c86e62bc2ca84ab" }, "go.nvim": { "branch": "master", "commit": "c4819d1625c59747d4a6c3de8c86e62bc2ca84ab" },
"goto-preview": { "branch": "main", "commit": "b428db4d2a5b7c06e149a020e31b2121fbf57a67" }, "goto-preview": { "branch": "main", "commit": "b428db4d2a5b7c06e149a020e31b2121fbf57a67" },
@ -20,6 +22,7 @@
"mason.nvim": { "branch": "main", "commit": "cd7835b15f5a4204fc37e0aa739347472121a54c" }, "mason.nvim": { "branch": "main", "commit": "cd7835b15f5a4204fc37e0aa739347472121a54c" },
"neo-tree.nvim": { "branch": "main", "commit": "1236db954ce502eb5b340bcdb69aa057cc372e8d" }, "neo-tree.nvim": { "branch": "main", "commit": "1236db954ce502eb5b340bcdb69aa057cc372e8d" },
"neodev.nvim": { "branch": "main", "commit": "0491a9c7a1775fa5380ecf38d79de1a32f68cc52" }, "neodev.nvim": { "branch": "main", "commit": "0491a9c7a1775fa5380ecf38d79de1a32f68cc52" },
"neorg": { "branch": "main", "commit": "0ccc3bba3f67f6f8740b86a50aa5e1428327a741" },
"noice.nvim": { "branch": "main", "commit": "92433164e2f7118d4122c7674c3834d9511722ba" }, "noice.nvim": { "branch": "main", "commit": "92433164e2f7118d4122c7674c3834d9511722ba" },
"nui.nvim": { "branch": "main", "commit": "c0c8e347ceac53030f5c1ece1c5a5b6a17a25b32" }, "nui.nvim": { "branch": "main", "commit": "c0c8e347ceac53030f5c1ece1c5a5b6a17a25b32" },
"null-ls.nvim": { "branch": "main", "commit": "0010ea927ab7c09ef0ce9bf28c2b573fc302f5a7" }, "null-ls.nvim": { "branch": "main", "commit": "0010ea927ab7c09ef0ce9bf28c2b573fc302f5a7" },

View File

@ -0,0 +1,185 @@
return {
"jackMort/ChatGPT.nvim",
event = "VeryLazy",
config = function()
require("chatgpt").setup({
api_key_cmd = "pass chatgpt",
yank_register = "+",
edit_with_instructions = {
diff = false,
keymaps = {
close = "<C-c>",
accept = "<C-y>",
toggle_diff = "<C-d>",
toggle_settings = "<C-o>",
toggle_help = "<C-h>",
cycle_windows = "<Tab>",
use_output_as_input = "<C-i>",
},
},
chat = {
welcome_message = WELCOME_MESSAGE,
loading_text = "Loading, please wait ...",
question_sign = "", -- 🙂
answer_sign = "", -- 🤖
border_left_sign = "",
border_right_sign = "",
max_line_length = 120,
sessions_window = {
active_sign = "",
inactive_sign = "",
current_line_sign = "",
border = {
style = "rounded",
text = {
top = " Sessions ",
},
},
win_options = {
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
keymaps = {
close = "<C-c>",
yank_last = "<C-y>",
yank_last_code = "<C-k>",
scroll_up = "<C-u>",
scroll_down = "<C-d>",
new_session = "<C-n>",
cycle_windows = "<Tab>",
cycle_modes = "<C-f>",
next_message = "<C-j>",
prev_message = "<C-k>",
select_session = "<Space>",
rename_session = "r",
delete_session = "d",
draft_message = "<C-r>",
edit_message = "e",
delete_message = "d",
toggle_settings = "<C-o>",
toggle_sessions = "<C-p>",
toggle_help = "<C-h>",
toggle_message_role = "<C-r>",
toggle_system_role_open = "<C-s>",
stop_generating = "<C-x>",
},
},
popup_layout = {
default = "center",
center = {
width = "80%",
height = "80%",
},
right = {
width = "30%",
width_settings_open = "50%",
},
},
popup_window = {
border = {
highlight = "FloatBorder",
style = "rounded",
text = {
top = " ChatGPT ",
},
},
win_options = {
wrap = true,
linebreak = true,
foldcolumn = "1",
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
buf_options = {
filetype = "markdown",
},
},
system_window = {
border = {
highlight = "FloatBorder",
style = "rounded",
text = {
top = " SYSTEM ",
},
},
win_options = {
wrap = true,
linebreak = true,
foldcolumn = "2",
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
popup_input = {
prompt = "",
border = {
highlight = "FloatBorder",
style = "rounded",
text = {
top_align = "center",
top = " Prompt ",
},
},
win_options = {
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
submit = "<C-Enter>",
submit_n = "<Enter>",
max_visible_lines = 20,
},
settings_window = {
setting_sign = "",
border = {
style = "rounded",
text = {
top = " Settings ",
},
},
win_options = {
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
help_window = {
setting_sign = "",
border = {
style = "rounded",
text = {
top = " Help ",
},
},
win_options = {
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
openai_params = {
model = "gpt-3.5-turbo",
-- model = "gpt-4",
frequency_penalty = 0,
presence_penalty = 0,
max_tokens = 300,
temperature = 0,
top_p = 1,
n = 1,
},
openai_edit_params = {
model = "gpt-3.5-turbo",
frequency_penalty = 0,
presence_penalty = 0,
temperature = 0,
top_p = 1,
n = 1,
},
use_openai_functions_for_edits = false,
actions_paths = {},
show_quickfixes_cmd = "Trouble quickfix",
predefined_chat_gpt_prompts = "https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv",
highlights = {
help_key = "@symbol",
help_description = "@comment",
},
})
end,
dependencies = {
"MunifTanjim/nui.nvim",
"nvim-lua/plenary.nvim",
"nvim-telescope/telescope.nvim"
}
}

View File

@ -1,9 +1,27 @@
return { return {
"iamcco/markdown-preview.nvim", "iamcco/markdown-preview.nvim",
config = function() config = function()
vim.fn["mkdp#util#install"]() vim.fn["mkdp#util#install"]()
vim.keymap.set("n", "<leader>m", "<CMD>MarkdownPreview<CR>")
vim.keymap.set("n", "<leader>mn", "<CMD>MarkdownPreviewStop<CR>")
vim.g.mkdp_markdown_css = '~/markdown.css'
vim.g.mkdp_highlight_css = ''
vim.keymap.set("n", "<leader>m", "<CMD>MarkdownPreview<CR>") local mkdp_preview_options = {
vim.keymap.set("n", "<leader>mn", "<CMD>MarkdownPreviewStop<CR>") mkit = {},
end, katex = {},
uml = {},
maid = {},
disable_sync_scroll = 0,
sync_scroll_type = 'middle',
hide_yaml_meta = 1,
sequence_diagrams = {},
flowchart_diagrams = {},
content_editable = true,
disable_filename = 1,
toc = {}
}
vim.g.mkdp_preview_options = mkdp_preview_options
end,
} }

View File

@ -0,0 +1,35 @@
return {
"nvim-neorg/neorg",
build = ":Neorg sync-parsers",
dependencies = { "nvim-lua/plenary.nvim" },
config = function()
require("neorg").setup {
load = {
["core.export"] = {},
["core.export.markdown"] = {
config = {
extension = "md",
},
},
["core.defaults"] = {},
["core.concealer"] = {},
["core.dirman"] = {
config = {
workspaces = {
notes = "~/notes",
},
default_workspace = "notes",
},
},
["core.completion"] = {
config = {
engine = "nvim-cmp",
},
},
},
}
vim.wo.foldlevel = 99
vim.wo.conceallevel = 2
end,
}

View File

@ -0,0 +1,39 @@
return {
"David-Kunz/gen.nvim",
config = function()
require('gen').prompts['Elaborate_Text'] = {
prompt = "Elaborate the following text:\n$text",
replace = true
}
require('gen').prompts['Golang'] = {
prompt = "You are a senior Golang engineer, acting as an assitant. You offer help with backend tecnologies: \
mongodb, gorilla/mux, algorithms, data structures, azure cloud, terraform. You answer with code examples when \
possible. $input:\n$text",
replace = true
}
require('gen').setup({
model = "codellama", -- The default model to use.
display_mode = 'split',
})
vim.keymap.set({ 'n', 'v' }, '<leader>]', ':Gen<CR>')
vim.keymap.set('v', '<leader>]', ':Gen Enhance_Grammar_Spelling<CR>')
end
-- opts = {
-- model = "mistral", -- The default model to use.
-- display_mode = "float", -- The display mode. Can be "float" or "split".
-- show_prompt = false, -- Shows the Prompt submitted to Ollama.
-- show_model = false, -- Displays which model you are using at the beginning of your chat session.
-- no_auto_close = false, -- Never closes the window automatically.
-- init = function(options) pcall(io.popen, "ollama serve > /dev/null 2>&1 &") end,
-- -- Function to initialize Ollama
-- command = "curl --silent --no-buffer -X POST http://localhost:11434/api/generate -d $body",
-- -- The command for the Ollama service. You can use placeholders $prompt, $model and $body (shellescaped).
-- -- This can also be a lua function returning a command string, with options as the input parameter.
-- -- The executed command must return a JSON object with { response, context }
-- -- (context property is optional).
-- -- list_models = '<omitted lua function>', -- Retrieves a list of model names
-- debug = false -- Prints errors and the command which is run.
-- }
}