feat: Disable ollama-model-loader
This commit is contained in:
@@ -78,6 +78,7 @@
|
||||
"nvidia"
|
||||
"nvidia_modeset"
|
||||
"nvidia_drm"
|
||||
"dm-snapshot"
|
||||
];
|
||||
boot.kernelParams = [
|
||||
"intel_iommu=on"
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
ollama = {
|
||||
enable = true;
|
||||
host = "0.0.0.0";
|
||||
loadModels = ["deepseek-r1:7b" "deepseek-r1:14b" "RobinBially/nomic-embed-text-8k" "qwen3:8b" "qwen3:14b"];
|
||||
# loadModels = ["deepseek-r1:7b" "deepseek-r1:14b" "RobinBially/nomic-embed-text-8k" "qwen3:8b" "qwen3:14b"];
|
||||
port = 11434;
|
||||
acceleration = "cuda";
|
||||
environmentVariables = {
|
||||
|
||||
Reference in New Issue
Block a user