feat(home): add prismlauncher and ida apps, enable ollama service
Some checks failed
Flake checker / Build Nix targets (push) Has been cancelled

feat(nixos): enable ollama service with cuda support
fix(nixos): update llama service to use fixed port and remove commented code
chore(home): reorganize app imports and remove unused packages
This commit is contained in:
uttarayan21
2025-12-09 17:45:27 +05:30
parent f0bb05678c
commit 8bd6da8f75
6 changed files with 41 additions and 50 deletions

View File

@@ -16,6 +16,7 @@ lib.optionalAttrs device.hasGui {
./gimp.nix
# ./guitarix.nix
./hyprpicker.nix
./ida.nix
# ./jellyflix.nix
# ./kicad.nix
./kitty.nix
@@ -27,19 +28,19 @@ lib.optionalAttrs device.hasGui {
# ./openscad.nix
./orcaslicer.nix
# ./pcsx2.nix
./prismlauncher.nix
# ./rpcs3.nix
# ./shadps4.nix
./slack.nix
# ./thunderbird.nix
# ./tsukimi.nix
# ./vial.nix
./vicinae.nix
./vlc.nix
./vscode.nix
./wezterm.nix
./zathura.nix
./zed.nix
./zen.nix
./vicinae.nix
./ida.nix
];
}

View File

@@ -0,0 +1,13 @@
{pkgs, ...}: {
home.packages = with pkgs; [
(prismlauncher.override {
additionalPrograms = [ffmpeg zenity];
jdks = [
# graalvm-ce
zulu8
zulu17
zulu
];
})
];
}

View File

@@ -32,14 +32,5 @@
spotify
steam-run
wl-clipboard
# (prismlauncher.override {
# additionalPrograms = [ffmpeg zenity];
# jdks = [
# # graalvm-ce
# zulu8
# zulu17
# zulu
# ];
# })
];
}

View File

@@ -4,8 +4,8 @@
# ./sunshine.nix
# ./zerotier.nix
# ./dnscrypt.nix
# ./ollama.nix
./llama.nix
./ollama.nix
./tailscale.nix
./samba.nix
./mullvad.nix

View File

@@ -4,35 +4,32 @@
inputs,
...
}: {
# llama-cpp = {
# enable = false;
# port = 11345;
# # model = "/nix/store/ch6z9di3l0k54ad29pzv8k3zv47q30d1-Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf";
# model = pkgs.fetchurl {
# # url = "https://huggingface.co/lmstudio-community/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-MXFP4.gguf";
# # sha256 = "65d06d31a3977d553cb3af137b5c26b5f1e9297a6aaa29ae7caa98788cde53ab";
# url = "https://huggingface.co/lmstudio-community/Qwen3-Coder-30B-A3B-Instruct-GGUF/resolve/main/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf";
# sha256 = "79ad15a5ee3caddc3f4ff0db33a14454a5a3eb503d7fa1c1e35feafc579de486";
# };
# extraFlags = [
# "-c"
# "98304"
# "--jinja"
# "--chat-template-file"
# "${../../../assets/chat.hbs}"
# # "/nix/store/4zk1p50hrzghp3jzzysz96pa64i2kmjl-promp.hbs"
# ];
# # package = inputs.llama-cpp.packages.${pkgs.system}.cuda;
# };
services = {
llama-cpp = {
enable = false;
port = 11345;
# model = "/nix/store/ch6z9di3l0k54ad29pzv8k3zv47q30d1-Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf";
model = pkgs.fetchurl {
# url = "https://huggingface.co/lmstudio-community/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-MXFP4.gguf";
# sha256 = "65d06d31a3977d553cb3af137b5c26b5f1e9297a6aaa29ae7caa98788cde53ab";
url = "https://huggingface.co/lmstudio-community/Qwen3-Coder-30B-A3B-Instruct-GGUF/resolve/main/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf";
sha256 = "79ad15a5ee3caddc3f4ff0db33a14454a5a3eb503d7fa1c1e35feafc579de486";
};
extraFlags = [
"-c"
"98304"
"--jinja"
"--chat-template-file"
"${../../../assets/chat.hbs}"
# "/nix/store/4zk1p50hrzghp3jzzysz96pa64i2kmjl-promp.hbs"
];
# package = inputs.llama-cpp.packages.${pkgs.system}.cuda;
};
caddy = {
virtualHosts."llama.ryu.darksailor.dev".extraConfig = ''
import cloudflare
reverse_proxy localhost:${builtins.toString config.services.llama-cpp.port}
reverse_proxy localhost:11345
'';
};
};
environment.systemPackages = with pkgs; [
llama-cpp
];
}

View File

@@ -6,30 +6,19 @@
}: {
services = {
ollama = {
enable = false;
enable = true;
host = "0.0.0.0";
# loadModels = ["deepseek-r1:7b" "deepseek-r1:14b" "RobinBially/nomic-embed-text-8k" "qwen3:8b" "qwen3:14b"];
# loadModels = ["deepseek-r1:7b" "deepseek-r1:14b" "RobinBially/nomic-embed-text-8k" "qwen3:8b" "qwen3:14b"];
port = 11434;
acceleration = "cuda";
# acceleration = "cuda";
environmentVariables = {
OLLAMA_ORIGINS = "*";
OLLAMA_LLM_LIBRARY = "cuda";
LD_LIBRARY_PATH = "run/opengl-driver/lib";
HTTP_PROXY = "https://ollama.ryu.darksailor.dev";
};
# package = pkgs.ollama.overrideAttrs {
# version = "0.11.0";
# src = pkgs.fetchFromGitHub {
# owner = "ollama";
# repo = "ollama";
# tag = "v0.11.0";
# hash = "sha256-po7BxJAj9eOpOaXsLDmw6/1RyjXPtXza0YUv0pVojZ0=";
# fetchSubmodules = true;
# };
# doCheck = false;
# vendorHash = "sha256-SlaDsu001TUW+t9WRp7LqxUSQSGDF1Lqu9M1bgILoX4=";
# };
package = pkgs.ollama-cuda;
};
# open-webui = {
# enable = false;