feat(nixos): enable immich ml; bind localhost; add ollama env
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
{device, ...}: {
|
||||
imports = [
|
||||
# ./immich-machine-learning.nix
|
||||
./immich-machine-learning.nix
|
||||
];
|
||||
virtualisation = {
|
||||
docker.enable = true;
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
port = 3003;
|
||||
in {
|
||||
virtualisation.oci-containers = {
|
||||
backend = "docker";
|
||||
containers = {
|
||||
immich-machine-learning = {
|
||||
image = "ghcr.io/immich-app/immich-machine-learning:v${pkgs.immich.version}-cuda";
|
||||
ports = [
|
||||
"0.0.0.0:${toString port}:3003"
|
||||
"127.0.0.1:${toString port}:3003"
|
||||
];
|
||||
volumes = [
|
||||
"model-cache:/cache"
|
||||
@@ -20,7 +19,4 @@ in {
|
||||
nvidia-docker
|
||||
nvidia-container-toolkit
|
||||
];
|
||||
# services.caddy.virtualHosts."ml.ryu.darksailor.dev".extraConfig = ''
|
||||
# reverse_proxy localhost:${toString port}
|
||||
# '';
|
||||
}
|
||||
|
||||
@@ -25,6 +25,8 @@
|
||||
OLLAMA_LLM_LIBRARY = "cuda";
|
||||
LD_LIBRARY_PATH = "run/opengl-driver/lib";
|
||||
HTTP_PROXY = "https://ollama.darksailor.dev";
|
||||
OLLAMA_CONTEXT_LENGTH = "32000";
|
||||
OLLAMA_KEEP_ALIVE = "30m";
|
||||
};
|
||||
package = pkgs.ollama-cuda;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user