Files
LiNix/modules/nixosModules/ai/llama.nix

103 lines
3.3 KiB
Nix

{self, inputs, ...}:{
flake.nixosModules.llama = { lib, pkgs, ... }: let
llama_cmd = "${pkgs.llama-cpp-cuda}/bin/llama-server --port \${PORT} --mmap";
sd_cmd = "${pkgs.stable-diffusion-cpp-cuda}/bin/sd-server --listen-port \${PORT}";
chat_models = "/var/AI/Models/Chat/ggufs";
sd_models = "/var/AI/Models/Art";
in {
nixpkgs.overlays = [
(final: prev: {
llama-cpp-cuda = prev.llama-cpp.override {
cudaSupport = true;
blasSupport = true;
};
})
];
services.llama-swap = {
enable = true;
port = 9001;
openFirewall = true;
listenAddress = "0.0.0.0";
settings = {
models = {
#Language Models
"angelic_eclipse" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Angelic_Eclipse_12B-Q4_K_M.gguf";
};
"cydonia" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/TheDrummer_Cydonia-24B-v4.3-Q4_K_M.gguf";
};
"cydonia-redux" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 32768 --model ${chat_models}/Cydonia-Redux-22B-v1.1-Q4_K_M.gguf";
};
"gemma3-R1" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Gemma3-R1-27B-v1-Q4_K_M.gguf";
};
"hearthfire" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Hearthfire-24B-Q4_K_M.gguf";
};
"impish_bloodmoon" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Impish_Bloodmoon-Q4_K_M.gguf";
};
"llama_abliterated_dark_champion" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Llama-3.2-8X3B-18.4B-MOE-Dark-Champion-Instruct-uncensored-abliterated-Q4_K_M.gguf";
};
"loki" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/M3.2-24B-Loki-V2-Q4_K_M.gguf";
};
"magidonia" = {
ttl = 3600;
cmd = "${llama_cmd} -m ${chat_models}/Magidonia-24B-v4.3-Q4_K_M.gguf --ctx-size 131072";
};
"mars" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Mars_27B_V1-28B-Q4_K_M.gguf";
};
"omega_directive" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/MS3.2-The-Omega-Directive-24B-Unslop-v2.0-Q4_K_M.gguf";
};
"qwen_14b" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 40960 --model ${chat_models}/Qwen3-14B-Q4_K_M.gguf";
};
"qwen_32b" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 40960 --model ${chat_models}/Qwen3-32B-Q4_K_M.gguf";
};
"snowpiercer" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 65536 --model ${chat_models}/Snowpiercer-15B-v1-Q4_K_M.gguf";
};
"valkyrie" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/Valkyrie-49B-v2.1-Q4_K_M.gguf";
};
"wayfarer" = {
ttl = 3600;
cmd = "${llama_cmd} --ctx-size 131072 --model ${chat_models}/wayfarer-12B-Q4_K_M.gguf";
};
#Diffusion Models
"cyberrealistic" = {
ttl = 3600;
cmd = "${sd_cmd} --model ${sd_models}/cyberrealisticPony_semiRealV45.safetensors";
};
};
};
};
environment.systemPackages = with pkgs; [
llama-cpp-cuda
stable-diffusion-cpp-cuda
];
};
}