diff --git a/modules/hosts/desktop/desktop.nix b/modules/hosts/desktop/desktop.nix index 1278779..7da0b55 100644 --- a/modules/hosts/desktop/desktop.nix +++ b/modules/hosts/desktop/desktop.nix @@ -7,6 +7,7 @@ self.nixosModules.llama self.nixosModules.localization self.nixosModules.applications + self.nixosModules.utilities ]; }; } diff --git a/modules/nixosModules/ai/llama.nix b/modules/nixosModules/ai/llama.nix index 336f042..7edca8e 100644 --- a/modules/nixosModules/ai/llama.nix +++ b/modules/nixosModules/ai/llama.nix @@ -1,5 +1,10 @@ -{self, inputs, ...}: { - flake.nixosModules.llama = { lib, pkgs, ... }: { +{self, inputs, ...}:{ + flake.nixosModules.llama = { lib, pkgs, ... }: let + llama_cmd = "${pkgs.llama-cpp-cuda}/bin/llama-server --port \${PORT}"; + sd_cmd = "${pkgs.stable-diffusion-cpp-cuda}/bin/sd-server --listen-port \${PORT}"; + chat_models = "/var/AI/Models/Chat/ggufs"; + sd_models = "/var/AI/Models/Art"; +in { nixpkgs.overlays = [ (final: prev: { llama-cpp-cuda = prev.llama-cpp.override { @@ -17,13 +22,18 @@ models = { "magidonia" = { ttl = 3600; - cmd = "${pkgs.llama-cpp-cuda}/bin/llama-server --port \${PORT} -m /var/AI/Models/Chat/ggufs/Magidonia-24B-v4.3-Q4_K_M.gguf"; + cmd = "${llama_cmd} -m ${chat_models}/Magidonia-24B-v4.3-Q4_K_M.gguf"; + }; + "cyberrealistic" = { + ttl = 3600; + cmd = "${sd_cmd} --model ${sd_models}/cyberrealisticPony_semiRealV45.safetensors"; }; }; }; }; environment.systemPackages = with pkgs; [ llama-cpp-cuda + stable-diffusion-cpp-cuda ]; }; }