Compare commits

...

6 Commits

Author SHA1 Message Date
15853f918a chore(services): delete command-runner.nix and its entry in default.nix
Some checks failed
Flake checker / Build Nix targets (push) Has been cancelled
2026-02-01 22:01:37 +05:30
22d619b3ce refactor(monitoring): dashboard provisioning, sops secret, exportarr off 2026-02-01 21:09:52 +05:30
d5396917c3 chore: fmt 2026-02-01 19:52:48 +05:30
a7d4a88741 feat: Update ironbar 2026-02-01 19:52:06 +05:30
2f901052f1 feat: Added grafana 2026-02-01 19:51:51 +05:30
56f1ba896c feat: Added opencode.nvim 2026-02-01 18:59:00 +05:30
13 changed files with 761 additions and 64 deletions

View File

@@ -13,13 +13,14 @@
"${device.monitors.secondary}" = { "${device.monitors.secondary}" = {
position = "bottom"; position = "bottom";
start = [ start = [
{ {type = "tray";}
type = "launcher"; # {
favourites = ["firefox" "discord"]; # type = "launcher";
show_names = false; # favourites = ["firefox" "discord"];
show_icons = true; # show_names = false;
} # show_icons = true;
{type = "focused";} # }
# {type = "focused";}
]; ];
end = [ end = [
{ {
@@ -53,25 +54,24 @@
# networks= 3; # networks= 3;
}; };
} }
{type = "tray";}
]; ];
start = [ start = [
{ {
type = "workspaces"; type = "workspaces";
name_map = { name_map = {
"1" = "icon:foot"; "1" = "icon:kitty";
"2" = "icon:code"; "2" = "icon:code";
"3" = "icon:firefox"; "3" = "icon:firefox";
"4" = "icon:slack"; "4" = "icon:slack";
"5" = "icon:steam"; # "5" = "icon:steam";
"6" = "icon:foot"; # "6" = "icon:foot";
"7" = "icon:foot"; # "7" = "icon:foot";
"8" = "icon:firefox"; # "8" = "icon:firefox";
"9" = "icon:discord"; # "9" = "icon:discord";
"10" = "icon:spotify"; # "10" = "icon:spotify";
}; };
favorites = ["1" "2" "3" "4" "5" "6" "7" "8" "9" "10"]; favorites = ["1" "2" "3" "4" "5" "6" "7" "8" "9" "10"];
all_monitors = true; all_monitors = false;
} }
]; ];
}; };

View File

@@ -20,6 +20,7 @@
}; };
in { in {
opts = { opts = {
autoread = true;
completeopt = "menu,menuone,popup,noselect"; completeopt = "menu,menuone,popup,noselect";
expandtab = true; expandtab = true;
foldenable = true; foldenable = true;
@@ -106,7 +107,7 @@ in {
"<C-q>x" = "[[<cmd>tabclose<cr>]]"; "<C-q>x" = "[[<cmd>tabclose<cr>]]";
"<C-q>n" = "[[<cmd>tabnext<cr>]]"; "<C-q>n" = "[[<cmd>tabnext<cr>]]";
"<C-q>p" = "[[<cmd>tabprevious<cr>]]"; "<C-q>p" = "[[<cmd>tabprevious<cr>]]";
"<c-.>" = "require('sidekick.cli').toggle"; "<C-.>" = "require('opencode').toggle";
}; };
terminal = { terminal = {
"<C-\\>" = "require('FTerm').toggle"; "<C-\\>" = "require('FTerm').toggle";
@@ -177,16 +178,19 @@ in {
trouble.enable = true; trouble.enable = true;
ts-context-commentstring.enable = true; ts-context-commentstring.enable = true;
which-key.enable = true; which-key.enable = true;
opencode = {
sidekick = {
enable = true; enable = true;
settings = {
nes = {
enabled = false;
};
};
}; };
# sidekick = {
# enable = true;
# settings = {
# nes = {
# enabled = false;
# };
# };
# };
conform-nvim = { conform-nvim = {
enable = true; enable = true;
settings = { settings = {
@@ -857,5 +861,6 @@ in {
pkgs.lua pkgs.lua
pkgs.ripgrep pkgs.ripgrep
pkgs.nodejs-slim pkgs.nodejs-slim
pkgs.lsof
]; ];
} }

View File

@@ -14,6 +14,11 @@
services = { services = {
caddy = { caddy = {
enable = true; enable = true;
globalConfig = ''
servers {
metrics
}
'';
extraConfig = '' extraConfig = ''
(cloudflare) { (cloudflare) {
tls { tls {

View File

@@ -1,27 +0,0 @@
{
config,
lib,
pkgs,
...
}: {
# services.command-runner = {
# enable = false;
# port = 5599;
# user = "servius";
# commands = let
# hyprctl = "${pkgs.hyprland}/bin/hyprctl";
# in
# {
# "display_on" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "on"];
# "display_off" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "off"];
# "display_toggle" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "toggle"];
# "display_status" = [hyprctl "-i" "{instance}" "-j" "monitors"];
# "hyprland_instance" = [hyprctl "-j" "instances"];
# }
# // (builtins.foldl' (acc: elem: acc // elem) {} (lib.map (name: {
# "display_on_${name}" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "on" name];
# "display_off_${name}" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "off" name];
# "display_toggle_${name}" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "toggle" name];
# }) ["HDMI-A-1" "DP-3" "DP-1"]));
# };
}

View File

@@ -4,7 +4,6 @@
# ./zerotier.nix # ./zerotier.nix
# ./dnscrypt.nix # ./dnscrypt.nix
./caddy.nix ./caddy.nix
./command-runner.nix
./dualsense.nix ./dualsense.nix
./fprintd.nix ./fprintd.nix
./fwupd.nix ./fwupd.nix

View File

@@ -1,11 +1,28 @@
{...}: { {pkgs, ...}: let
# Port configurations
ports = {
# System exporters
node = 9100;
systemd = 9558;
process = 9256;
nvidiagpu = 9835;
# Infrastructure exporters
cadvisor = 8080;
caddy = 2019;
};
in {
services = { services = {
prometheus = { prometheus = {
exporters = { exporters = {
systemd = { systemd = {
enable = true; enable = true;
port = ports.systemd;
};
nvidia-gpu = {
enable = true;
port = ports.nvidiagpu;
}; };
nvidia-gpu.enable = true;
node = { node = {
enable = true; enable = true;
enabledCollectors = [ enabledCollectors = [
@@ -19,7 +36,10 @@
"time" "time"
"uname" "uname"
"vmstat" "vmstat"
"diskstats"
"cpu"
]; ];
port = ports.node;
}; };
process = { process = {
enable = true; enable = true;
@@ -33,4 +53,34 @@
}; };
}; };
}; };
# Docker cAdvisor for container metrics
virtualisation.oci-containers.containers.cadvisor = {
image = "gcr.io/cadvisor/cadvisor:v0.49.1";
ports = ["${toString ports.cadvisor}:8080"];
volumes = [
"/:/rootfs:ro"
"/var/run:/var/run:ro"
"/sys:/sys:ro"
"/var/lib/docker/:/var/lib/docker:ro"
"/dev/disk/:/dev/disk:ro"
];
extraOptions = [
"--privileged"
"--device=/dev/kmsg"
];
};
# Open firewall ports for Prometheus exporters
networking.firewall = {
# Allow from Tailscale network
interfaces."tailscale0".allowedTCPPorts = [
ports.node
ports.systemd
ports.process
ports.nvidiagpu
ports.cadvisor
ports.caddy
];
};
} }

View File

@@ -2,6 +2,11 @@
services = { services = {
caddy = { caddy = {
enable = true; enable = true;
globalConfig = ''
servers {
metrics
}
'';
extraConfig = '' extraConfig = ''
(auth) { (auth) {
forward_auth localhost:5555 { forward_auth localhost:5555 {

View File

@@ -3,7 +3,7 @@
./games ./games
# ./headscale.nix # ./headscale.nix
./llms.nix ./llms.nix
# ./monitoring.nix ./monitoring.nix
# ./paperless.nix # ./paperless.nix
./navidrome.nix ./navidrome.nix
./shitpost.nix ./shitpost.nix

View File

@@ -63,6 +63,10 @@
# LFS_START_SERVER = true; # LFS_START_SERVER = true;
LFS_ALLOW_PURE_SSH = true; LFS_ALLOW_PURE_SSH = true;
}; };
metrics = {
ENABLED = true;
TOKEN = "";
};
oauth2_client = { oauth2_client = {
ENABLE_AUTO_REGISTRATION = true; ENABLE_AUTO_REGISTRATION = true;
ACCOUNT_LINKING = "auto"; ACCOUNT_LINKING = "auto";

View File

@@ -87,6 +87,14 @@
siteMonitor = "https://git.darksailor.dev"; siteMonitor = "https://git.darksailor.dev";
}; };
} }
{
"Grafana" = {
icon = "grafana.png";
description = "Grafana Monitoring & Metrics";
href = "https://grafana.darksailor.dev";
siteMonitor = "https://grafana.darksailor.dev";
};
}
{ {
"Nextcloud" = { "Nextcloud" = {
icon = "nextcloud.png"; icon = "nextcloud.png";

View File

@@ -0,0 +1,483 @@
{
config,
pkgs,
lib,
...
}: let
# Port configurations
ports = {
grafana = 3001; # Changed from 3000 to avoid clash with Gitea
prometheus = 9090;
# System exporters
node = 9100;
systemd = 9558;
process = 9256;
# Infrastructure exporters
postgres = 9187;
redis = 9121;
cadvisor = 8080;
# Application exporters
caddy = 2019;
};
in {
# Grafana configuration with Authelia integration
services.grafana = {
enable = true;
settings = {
server = {
http_addr = "127.0.0.1";
http_port = ports.grafana;
domain = "grafana.darksailor.dev";
root_url = "https://grafana.darksailor.dev";
};
# Disable Grafana's own auth since we use Authelia
auth.disable_login_form = true;
"auth.basic".enabled = false;
"auth.anonymous".enabled = false;
"auth.proxy" = {
enabled = true;
header_name = "REMOTE-USER";
header_property = "username";
auto_sign_up = true;
};
users = {
allow_sign_up = false;
auto_assign_org = true;
auto_assign_org_role = "Admin";
};
security = {
disable_gravatar = true;
cookie_secure = true;
};
analytics = {
reporting_enabled = false;
check_for_updates = false;
};
};
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "Prometheus";
type = "prometheus";
access = "proxy";
url = "http://localhost:${toString ports.prometheus}";
isDefault = true;
jsonData = {
timeInterval = "30s";
};
}
];
# Provision popular community dashboards
dashboards = {
settings = {
apiVersion = 1;
providers = [
{
name = "default";
orgId = 1;
folder = "";
type = "file";
disableDeletion = false;
updateIntervalSeconds = 10;
allowUiUpdates = true;
options.path = "/var/lib/grafana/dashboards";
}
];
};
};
};
};
# Caddy virtual host for Grafana with Authelia
services.caddy.virtualHosts."grafana.darksailor.dev".extraConfig = ''
import auth
reverse_proxy localhost:${toString ports.grafana}
'';
# Central Prometheus server
services.prometheus = {
enable = true;
port = ports.prometheus;
# Retention settings (90 days)
retentionTime = "90d";
# Global scrape config
globalConfig = {
scrape_interval = "30s";
evaluation_interval = "30s";
};
# System exporters for tako
exporters = {
node = {
enable = true;
port = ports.node;
enabledCollectors = [
"systemd"
"textfile"
"filesystem"
"loadavg"
"meminfo"
"netdev"
"netstat"
"stat"
"time"
"uname"
"vmstat"
"diskstats"
"cpu"
];
};
systemd = {
enable = true;
port = ports.systemd;
};
process = {
enable = true;
settings.process_names = [
{
name = "{{.Comm}}";
cmdline = [".*"];
}
];
};
postgres = {
enable = true;
port = ports.postgres;
runAsLocalSuperUser = true;
};
redis = {
enable = true;
port = ports.redis;
};
};
# Scrape configurations for all targets
scrapeConfigs = [
# System metrics - tako (local)
{
job_name = "tako-system";
static_configs = [
{
targets = [
"localhost:${toString ports.node}"
"localhost:${toString ports.systemd}"
"localhost:${toString ports.process}"
];
labels = {
instance = "tako";
machine = "tako";
role = "server";
};
}
];
}
# Infrastructure - tako
{
job_name = "tako-infrastructure";
static_configs = [
{
targets = [
"localhost:${toString ports.postgres}"
"localhost:${toString ports.redis}"
"localhost:${toString ports.cadvisor}"
];
labels = {
instance = "tako";
machine = "tako";
};
}
];
}
# Caddy metrics - tako
{
job_name = "tako-caddy";
static_configs = [
{
targets = ["localhost:${toString ports.caddy}"];
labels = {
instance = "tako";
machine = "tako";
service = "caddy";
};
}
];
}
# Application metrics - tako
{
job_name = "tako-applications";
static_configs = [
{
targets = [
"localhost:3000" # gitea
"localhost:5555" # authelia (if metrics enabled)
];
labels = {
instance = "tako";
machine = "tako";
};
}
];
}
# System metrics - tsuba (remote via Tailscale)
{
job_name = "tsuba-system";
static_configs = [
{
targets = [
"tsuba:9100"
"tsuba:9558"
"tsuba:9256"
];
labels = {
instance = "tsuba";
machine = "tsuba";
role = "server";
};
}
];
}
# Infrastructure - tsuba
{
job_name = "tsuba-infrastructure";
static_configs = [
{
targets = [
"tsuba:8080" # cadvisor
"tsuba:2019" # caddy
];
labels = {
instance = "tsuba";
machine = "tsuba";
};
}
];
}
# Media services - tsuba
{
job_name = "tsuba-media";
static_configs = [
{
targets = [
"tsuba:8096" # jellyfin (built-in /metrics endpoint)
"tsuba:8123" # homeassistant (configure prometheus integration)
"tsuba:9617" # pihole-exporter
];
labels = {
instance = "tsuba";
machine = "tsuba";
};
}
];
metrics_path = "/metrics";
relabel_configs = [
{
source_labels = ["__address__"];
regex = "tsuba:8096";
target_label = "__metrics_path__";
replacement = "/metrics";
}
{
source_labels = ["__address__"];
regex = "tsuba:8123";
target_label = "__metrics_path__";
replacement = "/api/prometheus";
}
];
}
# Servarr stack - tsuba (exportarr)
{
job_name = "tsuba-servarr";
static_configs = [
{
targets = [
"tsuba:9707" # sonarr
"tsuba:9708" # radarr
"tsuba:9709" # lidarr
"tsuba:9710" # bazarr
];
labels = {
instance = "tsuba";
machine = "tsuba";
stack = "servarr";
};
}
];
}
# Deluge - tsuba
{
job_name = "tsuba-deluge";
static_configs = [
{
targets = ["tsuba:9354"];
labels = {
instance = "tsuba";
machine = "tsuba";
service = "deluge";
};
}
];
}
# System metrics - ryu (remote via Tailscale)
{
job_name = "ryu-system";
static_configs = [
{
targets = [
"ryu:9100"
"ryu:9558"
"ryu:9256"
"ryu:9835" # nvidia-gpu
];
labels = {
instance = "ryu";
machine = "ryu";
role = "desktop";
};
}
];
}
# Infrastructure - ryu
{
job_name = "ryu-infrastructure";
static_configs = [
{
targets = [
"ryu:8080" # cadvisor
"ryu:2019" # caddy
];
labels = {
instance = "ryu";
machine = "ryu";
};
}
];
}
];
};
# Docker cAdvisor for container metrics
virtualisation.oci-containers.containers.cadvisor = {
image = "gcr.io/cadvisor/cadvisor:v0.49.1";
ports = ["127.0.0.1:${toString ports.cadvisor}:8080"];
volumes = [
"/:/rootfs:ro"
"/var/run:/var/run:ro"
"/sys:/sys:ro"
"/var/lib/docker/:/var/lib/docker:ro"
"/dev/disk/:/dev/disk:ro"
];
extraOptions = [
"--privileged"
"--device=/dev/kmsg"
];
};
# Link dashboard files from Nix store to Grafana's expected location
systemd.tmpfiles.rules = let
# Define dashboard files with proper hashes
nodeExporterFull = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/1860/revisions/37/download";
sha256 = "0qza4j8lywrj08bqbww52dgh2p2b9rkhq5p313g72i57lrlkacfl";
};
nvidiaDashboardRaw = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/14574/revisions/9/download";
sha256 = "170ijap5i99sapkxlf3k0lnvwmb6g9jkk7q66nwjwswkj2a7rqbr";
};
# Fix NVIDIA dashboard to use our Prometheus datasource
nvidiaDashboard = pkgs.runCommand "nvidia-gpu-fixed.json" {} ''
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${nvidiaDashboardRaw} > $out
'';
postgresqlDashboardRaw = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/9628/revisions/7/download";
sha256 = "0xmk68kqb9b8aspjj2f8wxv2mxiqk9k3xs0yal4szmzbv65c6k66";
};
# Fix PostgreSQL dashboard to use our Prometheus datasource
postgresqlDashboard = pkgs.runCommand "postgresql-fixed.json" {} ''
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${postgresqlDashboardRaw} > $out
'';
redisDashboard = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/11835/revisions/1/download";
sha256 = "15lbn4i8j5hiypl4dsg0d72jgrgjwpagkf5kcwx66gyps17jcrxx";
};
dockerDashboardRaw = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/193/revisions/1/download";
sha256 = "1lxbbl91fh0yfh8x53205b7nw5ivghlpfb0m308z2p6fzvz2iq2m";
};
# Fix Docker dashboard to use our Prometheus datasource
dockerDashboard = pkgs.runCommand "docker-cadvisor-fixed.json" {} ''
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${dockerDashboardRaw} > $out
'';
caddyDashboardRaw = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/14280/revisions/1/download";
sha256 = "0j3q68cq1nj8gcxkqz5h1kn1ds5kgq4jlkw73xp6yc88mbm5nyh4";
};
# Fix Caddy dashboard to use our Prometheus datasource
caddyDashboard = pkgs.runCommand "caddy-fixed.json" {} ''
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${caddyDashboardRaw} > $out
'';
piholeDashboardRaw = pkgs.fetchurl {
url = "https://grafana.com/api/dashboards/10176/revisions/3/download";
sha256 = "18f8w3l5k178agipfbimg29lkf2i32xynin1g1v5abiac3ahj7ih";
};
# Fix Pi-hole dashboard to use our Prometheus datasource
piholeDashboard = pkgs.runCommand "pihole-fixed.json" {} ''
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${piholeDashboardRaw} > $out
'';
in [
"d /var/lib/grafana/dashboards 0755 grafana grafana -"
"L+ /var/lib/grafana/dashboards/node-exporter-full.json - - - - ${nodeExporterFull}"
"L+ /var/lib/grafana/dashboards/nvidia-gpu.json - - - - ${nvidiaDashboard}"
"L+ /var/lib/grafana/dashboards/postgresql.json - - - - ${postgresqlDashboard}"
"L+ /var/lib/grafana/dashboards/redis.json - - - - ${redisDashboard}"
"L+ /var/lib/grafana/dashboards/docker-cadvisor.json - - - - ${dockerDashboard}"
"L+ /var/lib/grafana/dashboards/caddy.json - - - - ${caddyDashboard}"
"L+ /var/lib/grafana/dashboards/pihole.json - - - - ${piholeDashboard}"
];
# Open firewall ports for Prometheus to scrape exporters
networking.firewall = {
# allowedTCPPorts = [
# ports.node
# ports.systemd
# ports.process
# ];
# Allow Prometheus and Grafana access from Tailscale network
interfaces."tailscale0".allowedTCPPorts = [
ports.prometheus
ports.grafana
ports.node
ports.systemd
ports.process
ports.postgres
ports.redis
ports.cadvisor
];
};
}

View File

@@ -14,6 +14,11 @@
services = { services = {
caddy = { caddy = {
enable = true; enable = true;
globalConfig = ''
servers {
metrics
}
'';
extraConfig = '' extraConfig = ''
(cloudflare) { (cloudflare) {
tls { tls {

View File

@@ -1,10 +1,40 @@
{...}: { {
pkgs,
config,
...
}: let
# Port configurations
ports = {
# System exporters
node = 9100;
systemd = 9558;
process = 9256;
# Infrastructure exporters
cadvisor = 8080;
caddy = 2019;
# Media exporters
jellyfin = 9220;
pihole = 9617;
# Servarr exporters (via exportarr)
sonarr = 9707;
radarr = 9708;
lidarr = 9709;
bazarr = 9710;
# Torrent
deluge = 9354;
};
in {
sops.secrets."pihole/password" = {};
services = { services = {
prometheus = { prometheus = {
exporters = { exporters = {
systemd = { systemd = {
enable = true; enable = true;
port = 9558; port = ports.systemd;
}; };
node = { node = {
enable = true; enable = true;
@@ -19,8 +49,10 @@
"time" "time"
"uname" "uname"
"vmstat" "vmstat"
"diskstats"
"cpu"
]; ];
port = 9100; port = ports.node;
}; };
process = { process = {
enable = true; enable = true;
@@ -35,14 +67,142 @@
}; };
}; };
# Docker cAdvisor for container metrics
virtualisation.oci-containers.containers.cadvisor = {
image = "gcr.io/cadvisor/cadvisor:v0.49.1";
ports = ["${toString ports.cadvisor}:8080"];
volumes = [
"/:/rootfs:ro"
"/var/run:/var/run:ro"
"/sys:/sys:ro"
"/var/lib/docker/:/var/lib/docker:ro"
"/dev/disk/:/dev/disk:ro"
];
extraOptions = [
"--privileged"
"--device=/dev/kmsg"
];
};
# Jellyfin - use built-in metrics endpoint at http://localhost:8096/metrics
# No separate exporter needed - Prometheus will scrape directly
# Home Assistant - has built-in Prometheus integration
# Configure in Home Assistant configuration.yaml:
# prometheus:
# namespace: homeassistant
# Pi-hole exporter
# Uses sops-managed API token for authentication with Pi-hole v6
# To set the token: edit secrets/secrets.yaml and replace the placeholder at pihole.api_token
systemd.services.pihole-exporter = {
description = "Pi-hole Prometheus Exporter";
wantedBy = ["multi-user.target"];
after = ["network.target" "sops-nix.service"];
serviceConfig = {
Type = "simple";
DynamicUser = true;
# Load API token from sops secret file
LoadCredential = "ppassword:${config.sops.secrets."pihole/password".path}";
ExecStart = ''
${pkgs.bash}/bin/bash -c '${pkgs.prometheus-pihole-exporter}/bin/pihole-exporter \
-pihole_hostname pihole.darksailor.dev \
-pihole_port 8053 \
-port ${toString ports.pihole} \
-pihole_password $(cat ''${CREDENTIALS_DIRECTORY}/ppassword)'
'';
Restart = "on-failure";
};
};
# Exportarr for Sonarr
# Disabled: needs API key configuration
# systemd.services.exportarr-sonarr = {
# description = "Exportarr Prometheus Exporter for Sonarr";
# wantedBy = ["multi-user.target"];
# after = ["network.target"];
# serviceConfig = {
# Type = "simple";
# DynamicUser = true;
# ExecStart = "${pkgs.exportarr}/bin/exportarr sonarr --port ${toString ports.sonarr} --url http://localhost:8989";
# Restart = "on-failure";
# };
# };
# Exportarr for Radarr
# Disabled: needs API key configuration
# systemd.services.exportarr-radarr = {
# description = "Exportarr Prometheus Exporter for Radarr";
# wantedBy = ["multi-user.target"];
# after = ["network.target"];
# serviceConfig = {
# Type = "simple";
# DynamicUser = true;
# ExecStart = "${pkgs.exportarr}/bin/exportarr radarr --port ${toString ports.radarr} --url http://localhost:7878";
# Restart = "on-failure";
# };
# };
# Exportarr for Lidarr
# Disabled: needs API key configuration
# systemd.services.exportarr-lidarr = {
# description = "Exportarr Prometheus Exporter for Lidarr";
# wantedBy = ["multi-user.target"];
# after = ["network.target"];
# serviceConfig = {
# Type = "simple";
# DynamicUser = true;
# ExecStart = "${pkgs.exportarr}/bin/exportarr lidarr --port ${toString ports.lidarr} --url http://localhost:8686";
# Restart = "on-failure";
# };
# };
# Exportarr for Bazarr
# Disabled: needs API key configuration
# systemd.services.exportarr-bazarr = {
# description = "Exportarr Prometheus Exporter for Bazarr";
# wantedBy = ["multi-user.target"];
# after = ["network.target"];
# serviceConfig = {
# Type = "simple";
# DynamicUser = true;
# ExecStart = "${pkgs.exportarr}/bin/exportarr bazarr --port ${toString ports.bazarr} --url http://localhost:6767";
# Restart = "on-failure";
# };
# };
# Deluge exporter
systemd.services.deluge-exporter = {
description = "Deluge Prometheus Exporter";
wantedBy = ["multi-user.target"];
after = ["network.target"];
serviceConfig = {
Type = "simple";
DynamicUser = true;
ExecStart = "${pkgs.prometheus-deluge-exporter}/bin/deluge-exporter localhost:58846 --addr :${toString ports.deluge}";
Restart = "on-failure";
};
};
# Samba exporter - using a simple script to expose smbstatus metrics
# For now, we'll skip this and can add later if needed
# Open firewall ports for Prometheus exporters # Open firewall ports for Prometheus exporters
networking.firewall = { networking.firewall = {
allowedTCPPorts = [ # Allow from Tailscale network
9100 # node exporter interfaces."tailscale0".allowedTCPPorts = [
9256 # process exporter ports.node
9558 # systemd exporter ports.systemd
9134 # zfs exporter ports.process
9633 # smartctl exporter ports.cadvisor
ports.caddy
ports.jellyfin
ports.pihole
# ports.sonarr # Disabled - needs API key
# ports.radarr # Disabled - needs API key
# ports.lidarr # Disabled - needs API key
# ports.bazarr # Disabled - needs API key
ports.deluge
]; ];
}; };
} }