Compare commits
6 Commits
9607e8f3a4
...
15853f918a
| Author | SHA1 | Date | |
|---|---|---|---|
| 15853f918a | |||
| 22d619b3ce | |||
| d5396917c3 | |||
| a7d4a88741 | |||
| 2f901052f1 | |||
| 56f1ba896c |
@@ -13,13 +13,14 @@
|
||||
"${device.monitors.secondary}" = {
|
||||
position = "bottom";
|
||||
start = [
|
||||
{
|
||||
type = "launcher";
|
||||
favourites = ["firefox" "discord"];
|
||||
show_names = false;
|
||||
show_icons = true;
|
||||
}
|
||||
{type = "focused";}
|
||||
{type = "tray";}
|
||||
# {
|
||||
# type = "launcher";
|
||||
# favourites = ["firefox" "discord"];
|
||||
# show_names = false;
|
||||
# show_icons = true;
|
||||
# }
|
||||
# {type = "focused";}
|
||||
];
|
||||
end = [
|
||||
{
|
||||
@@ -53,25 +54,24 @@
|
||||
# networks= 3;
|
||||
};
|
||||
}
|
||||
{type = "tray";}
|
||||
];
|
||||
start = [
|
||||
{
|
||||
type = "workspaces";
|
||||
name_map = {
|
||||
"1" = "icon:foot";
|
||||
"1" = "icon:kitty";
|
||||
"2" = "icon:code";
|
||||
"3" = "icon:firefox";
|
||||
"4" = "icon:slack";
|
||||
"5" = "icon:steam";
|
||||
"6" = "icon:foot";
|
||||
"7" = "icon:foot";
|
||||
"8" = "icon:firefox";
|
||||
"9" = "icon:discord";
|
||||
"10" = "icon:spotify";
|
||||
# "5" = "icon:steam";
|
||||
# "6" = "icon:foot";
|
||||
# "7" = "icon:foot";
|
||||
# "8" = "icon:firefox";
|
||||
# "9" = "icon:discord";
|
||||
# "10" = "icon:spotify";
|
||||
};
|
||||
favorites = ["1" "2" "3" "4" "5" "6" "7" "8" "9" "10"];
|
||||
all_monitors = true;
|
||||
all_monitors = false;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
};
|
||||
in {
|
||||
opts = {
|
||||
autoread = true;
|
||||
completeopt = "menu,menuone,popup,noselect";
|
||||
expandtab = true;
|
||||
foldenable = true;
|
||||
@@ -106,7 +107,7 @@ in {
|
||||
"<C-q>x" = "[[<cmd>tabclose<cr>]]";
|
||||
"<C-q>n" = "[[<cmd>tabnext<cr>]]";
|
||||
"<C-q>p" = "[[<cmd>tabprevious<cr>]]";
|
||||
"<c-.>" = "require('sidekick.cli').toggle";
|
||||
"<C-.>" = "require('opencode').toggle";
|
||||
};
|
||||
terminal = {
|
||||
"<C-\\>" = "require('FTerm').toggle";
|
||||
@@ -177,16 +178,19 @@ in {
|
||||
trouble.enable = true;
|
||||
ts-context-commentstring.enable = true;
|
||||
which-key.enable = true;
|
||||
|
||||
sidekick = {
|
||||
opencode = {
|
||||
enable = true;
|
||||
settings = {
|
||||
nes = {
|
||||
enabled = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# sidekick = {
|
||||
# enable = true;
|
||||
# settings = {
|
||||
# nes = {
|
||||
# enabled = false;
|
||||
# };
|
||||
# };
|
||||
# };
|
||||
|
||||
conform-nvim = {
|
||||
enable = true;
|
||||
settings = {
|
||||
@@ -857,5 +861,6 @@ in {
|
||||
pkgs.lua
|
||||
pkgs.ripgrep
|
||||
pkgs.nodejs-slim
|
||||
pkgs.lsof
|
||||
];
|
||||
}
|
||||
|
||||
@@ -14,6 +14,11 @@
|
||||
services = {
|
||||
caddy = {
|
||||
enable = true;
|
||||
globalConfig = ''
|
||||
servers {
|
||||
metrics
|
||||
}
|
||||
'';
|
||||
extraConfig = ''
|
||||
(cloudflare) {
|
||||
tls {
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
# services.command-runner = {
|
||||
# enable = false;
|
||||
# port = 5599;
|
||||
# user = "servius";
|
||||
# commands = let
|
||||
# hyprctl = "${pkgs.hyprland}/bin/hyprctl";
|
||||
# in
|
||||
# {
|
||||
# "display_on" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "on"];
|
||||
# "display_off" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "off"];
|
||||
# "display_toggle" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "toggle"];
|
||||
# "display_status" = [hyprctl "-i" "{instance}" "-j" "monitors"];
|
||||
# "hyprland_instance" = [hyprctl "-j" "instances"];
|
||||
# }
|
||||
# // (builtins.foldl' (acc: elem: acc // elem) {} (lib.map (name: {
|
||||
# "display_on_${name}" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "on" name];
|
||||
# "display_off_${name}" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "off" name];
|
||||
# "display_toggle_${name}" = [hyprctl "-i" "{instance}" "dispatch" "dpms" "toggle" name];
|
||||
# }) ["HDMI-A-1" "DP-3" "DP-1"]));
|
||||
# };
|
||||
}
|
||||
@@ -4,7 +4,6 @@
|
||||
# ./zerotier.nix
|
||||
# ./dnscrypt.nix
|
||||
./caddy.nix
|
||||
./command-runner.nix
|
||||
./dualsense.nix
|
||||
./fprintd.nix
|
||||
./fwupd.nix
|
||||
|
||||
@@ -1,11 +1,28 @@
|
||||
{...}: {
|
||||
{pkgs, ...}: let
|
||||
# Port configurations
|
||||
ports = {
|
||||
# System exporters
|
||||
node = 9100;
|
||||
systemd = 9558;
|
||||
process = 9256;
|
||||
nvidiagpu = 9835;
|
||||
|
||||
# Infrastructure exporters
|
||||
cadvisor = 8080;
|
||||
caddy = 2019;
|
||||
};
|
||||
in {
|
||||
services = {
|
||||
prometheus = {
|
||||
exporters = {
|
||||
systemd = {
|
||||
enable = true;
|
||||
port = ports.systemd;
|
||||
};
|
||||
nvidia-gpu = {
|
||||
enable = true;
|
||||
port = ports.nvidiagpu;
|
||||
};
|
||||
nvidia-gpu.enable = true;
|
||||
node = {
|
||||
enable = true;
|
||||
enabledCollectors = [
|
||||
@@ -19,7 +36,10 @@
|
||||
"time"
|
||||
"uname"
|
||||
"vmstat"
|
||||
"diskstats"
|
||||
"cpu"
|
||||
];
|
||||
port = ports.node;
|
||||
};
|
||||
process = {
|
||||
enable = true;
|
||||
@@ -33,4 +53,34 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Docker cAdvisor for container metrics
|
||||
virtualisation.oci-containers.containers.cadvisor = {
|
||||
image = "gcr.io/cadvisor/cadvisor:v0.49.1";
|
||||
ports = ["${toString ports.cadvisor}:8080"];
|
||||
volumes = [
|
||||
"/:/rootfs:ro"
|
||||
"/var/run:/var/run:ro"
|
||||
"/sys:/sys:ro"
|
||||
"/var/lib/docker/:/var/lib/docker:ro"
|
||||
"/dev/disk/:/dev/disk:ro"
|
||||
];
|
||||
extraOptions = [
|
||||
"--privileged"
|
||||
"--device=/dev/kmsg"
|
||||
];
|
||||
};
|
||||
|
||||
# Open firewall ports for Prometheus exporters
|
||||
networking.firewall = {
|
||||
# Allow from Tailscale network
|
||||
interfaces."tailscale0".allowedTCPPorts = [
|
||||
ports.node
|
||||
ports.systemd
|
||||
ports.process
|
||||
ports.nvidiagpu
|
||||
ports.cadvisor
|
||||
ports.caddy
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
services = {
|
||||
caddy = {
|
||||
enable = true;
|
||||
globalConfig = ''
|
||||
servers {
|
||||
metrics
|
||||
}
|
||||
'';
|
||||
extraConfig = ''
|
||||
(auth) {
|
||||
forward_auth localhost:5555 {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
./games
|
||||
# ./headscale.nix
|
||||
./llms.nix
|
||||
# ./monitoring.nix
|
||||
./monitoring.nix
|
||||
# ./paperless.nix
|
||||
./navidrome.nix
|
||||
./shitpost.nix
|
||||
|
||||
@@ -63,6 +63,10 @@
|
||||
# LFS_START_SERVER = true;
|
||||
LFS_ALLOW_PURE_SSH = true;
|
||||
};
|
||||
metrics = {
|
||||
ENABLED = true;
|
||||
TOKEN = "";
|
||||
};
|
||||
oauth2_client = {
|
||||
ENABLE_AUTO_REGISTRATION = true;
|
||||
ACCOUNT_LINKING = "auto";
|
||||
|
||||
@@ -87,6 +87,14 @@
|
||||
siteMonitor = "https://git.darksailor.dev";
|
||||
};
|
||||
}
|
||||
{
|
||||
"Grafana" = {
|
||||
icon = "grafana.png";
|
||||
description = "Grafana Monitoring & Metrics";
|
||||
href = "https://grafana.darksailor.dev";
|
||||
siteMonitor = "https://grafana.darksailor.dev";
|
||||
};
|
||||
}
|
||||
{
|
||||
"Nextcloud" = {
|
||||
icon = "nextcloud.png";
|
||||
|
||||
483
nixos/tako/services/monitoring.nix
Normal file
483
nixos/tako/services/monitoring.nix
Normal file
@@ -0,0 +1,483 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
# Port configurations
|
||||
ports = {
|
||||
grafana = 3001; # Changed from 3000 to avoid clash with Gitea
|
||||
prometheus = 9090;
|
||||
|
||||
# System exporters
|
||||
node = 9100;
|
||||
systemd = 9558;
|
||||
process = 9256;
|
||||
|
||||
# Infrastructure exporters
|
||||
postgres = 9187;
|
||||
redis = 9121;
|
||||
cadvisor = 8080;
|
||||
|
||||
# Application exporters
|
||||
caddy = 2019;
|
||||
};
|
||||
in {
|
||||
# Grafana configuration with Authelia integration
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server = {
|
||||
http_addr = "127.0.0.1";
|
||||
http_port = ports.grafana;
|
||||
domain = "grafana.darksailor.dev";
|
||||
root_url = "https://grafana.darksailor.dev";
|
||||
};
|
||||
|
||||
# Disable Grafana's own auth since we use Authelia
|
||||
auth.disable_login_form = true;
|
||||
"auth.basic".enabled = false;
|
||||
"auth.anonymous".enabled = false;
|
||||
"auth.proxy" = {
|
||||
enabled = true;
|
||||
header_name = "REMOTE-USER";
|
||||
header_property = "username";
|
||||
auto_sign_up = true;
|
||||
};
|
||||
|
||||
users = {
|
||||
allow_sign_up = false;
|
||||
auto_assign_org = true;
|
||||
auto_assign_org_role = "Admin";
|
||||
};
|
||||
|
||||
security = {
|
||||
disable_gravatar = true;
|
||||
cookie_secure = true;
|
||||
};
|
||||
|
||||
analytics = {
|
||||
reporting_enabled = false;
|
||||
check_for_updates = false;
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
datasources.settings.datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
type = "prometheus";
|
||||
access = "proxy";
|
||||
url = "http://localhost:${toString ports.prometheus}";
|
||||
isDefault = true;
|
||||
jsonData = {
|
||||
timeInterval = "30s";
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
# Provision popular community dashboards
|
||||
dashboards = {
|
||||
settings = {
|
||||
apiVersion = 1;
|
||||
providers = [
|
||||
{
|
||||
name = "default";
|
||||
orgId = 1;
|
||||
folder = "";
|
||||
type = "file";
|
||||
disableDeletion = false;
|
||||
updateIntervalSeconds = 10;
|
||||
allowUiUpdates = true;
|
||||
options.path = "/var/lib/grafana/dashboards";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Caddy virtual host for Grafana with Authelia
|
||||
services.caddy.virtualHosts."grafana.darksailor.dev".extraConfig = ''
|
||||
import auth
|
||||
reverse_proxy localhost:${toString ports.grafana}
|
||||
'';
|
||||
|
||||
# Central Prometheus server
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = ports.prometheus;
|
||||
|
||||
# Retention settings (90 days)
|
||||
retentionTime = "90d";
|
||||
|
||||
# Global scrape config
|
||||
globalConfig = {
|
||||
scrape_interval = "30s";
|
||||
evaluation_interval = "30s";
|
||||
};
|
||||
|
||||
# System exporters for tako
|
||||
exporters = {
|
||||
node = {
|
||||
enable = true;
|
||||
port = ports.node;
|
||||
enabledCollectors = [
|
||||
"systemd"
|
||||
"textfile"
|
||||
"filesystem"
|
||||
"loadavg"
|
||||
"meminfo"
|
||||
"netdev"
|
||||
"netstat"
|
||||
"stat"
|
||||
"time"
|
||||
"uname"
|
||||
"vmstat"
|
||||
"diskstats"
|
||||
"cpu"
|
||||
];
|
||||
};
|
||||
|
||||
systemd = {
|
||||
enable = true;
|
||||
port = ports.systemd;
|
||||
};
|
||||
|
||||
process = {
|
||||
enable = true;
|
||||
settings.process_names = [
|
||||
{
|
||||
name = "{{.Comm}}";
|
||||
cmdline = [".*"];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
postgres = {
|
||||
enable = true;
|
||||
port = ports.postgres;
|
||||
runAsLocalSuperUser = true;
|
||||
};
|
||||
|
||||
redis = {
|
||||
enable = true;
|
||||
port = ports.redis;
|
||||
};
|
||||
};
|
||||
|
||||
# Scrape configurations for all targets
|
||||
scrapeConfigs = [
|
||||
# System metrics - tako (local)
|
||||
{
|
||||
job_name = "tako-system";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"localhost:${toString ports.node}"
|
||||
"localhost:${toString ports.systemd}"
|
||||
"localhost:${toString ports.process}"
|
||||
];
|
||||
labels = {
|
||||
instance = "tako";
|
||||
machine = "tako";
|
||||
role = "server";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Infrastructure - tako
|
||||
{
|
||||
job_name = "tako-infrastructure";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"localhost:${toString ports.postgres}"
|
||||
"localhost:${toString ports.redis}"
|
||||
"localhost:${toString ports.cadvisor}"
|
||||
];
|
||||
labels = {
|
||||
instance = "tako";
|
||||
machine = "tako";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Caddy metrics - tako
|
||||
{
|
||||
job_name = "tako-caddy";
|
||||
static_configs = [
|
||||
{
|
||||
targets = ["localhost:${toString ports.caddy}"];
|
||||
labels = {
|
||||
instance = "tako";
|
||||
machine = "tako";
|
||||
service = "caddy";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Application metrics - tako
|
||||
{
|
||||
job_name = "tako-applications";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"localhost:3000" # gitea
|
||||
"localhost:5555" # authelia (if metrics enabled)
|
||||
];
|
||||
labels = {
|
||||
instance = "tako";
|
||||
machine = "tako";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# System metrics - tsuba (remote via Tailscale)
|
||||
{
|
||||
job_name = "tsuba-system";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"tsuba:9100"
|
||||
"tsuba:9558"
|
||||
"tsuba:9256"
|
||||
];
|
||||
labels = {
|
||||
instance = "tsuba";
|
||||
machine = "tsuba";
|
||||
role = "server";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Infrastructure - tsuba
|
||||
{
|
||||
job_name = "tsuba-infrastructure";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"tsuba:8080" # cadvisor
|
||||
"tsuba:2019" # caddy
|
||||
];
|
||||
labels = {
|
||||
instance = "tsuba";
|
||||
machine = "tsuba";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Media services - tsuba
|
||||
{
|
||||
job_name = "tsuba-media";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"tsuba:8096" # jellyfin (built-in /metrics endpoint)
|
||||
"tsuba:8123" # homeassistant (configure prometheus integration)
|
||||
"tsuba:9617" # pihole-exporter
|
||||
];
|
||||
labels = {
|
||||
instance = "tsuba";
|
||||
machine = "tsuba";
|
||||
};
|
||||
}
|
||||
];
|
||||
metrics_path = "/metrics";
|
||||
relabel_configs = [
|
||||
{
|
||||
source_labels = ["__address__"];
|
||||
regex = "tsuba:8096";
|
||||
target_label = "__metrics_path__";
|
||||
replacement = "/metrics";
|
||||
}
|
||||
{
|
||||
source_labels = ["__address__"];
|
||||
regex = "tsuba:8123";
|
||||
target_label = "__metrics_path__";
|
||||
replacement = "/api/prometheus";
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Servarr stack - tsuba (exportarr)
|
||||
{
|
||||
job_name = "tsuba-servarr";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"tsuba:9707" # sonarr
|
||||
"tsuba:9708" # radarr
|
||||
"tsuba:9709" # lidarr
|
||||
"tsuba:9710" # bazarr
|
||||
];
|
||||
labels = {
|
||||
instance = "tsuba";
|
||||
machine = "tsuba";
|
||||
stack = "servarr";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Deluge - tsuba
|
||||
{
|
||||
job_name = "tsuba-deluge";
|
||||
static_configs = [
|
||||
{
|
||||
targets = ["tsuba:9354"];
|
||||
labels = {
|
||||
instance = "tsuba";
|
||||
machine = "tsuba";
|
||||
service = "deluge";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# System metrics - ryu (remote via Tailscale)
|
||||
{
|
||||
job_name = "ryu-system";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"ryu:9100"
|
||||
"ryu:9558"
|
||||
"ryu:9256"
|
||||
"ryu:9835" # nvidia-gpu
|
||||
];
|
||||
labels = {
|
||||
instance = "ryu";
|
||||
machine = "ryu";
|
||||
role = "desktop";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
||||
# Infrastructure - ryu
|
||||
{
|
||||
job_name = "ryu-infrastructure";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [
|
||||
"ryu:8080" # cadvisor
|
||||
"ryu:2019" # caddy
|
||||
];
|
||||
labels = {
|
||||
instance = "ryu";
|
||||
machine = "ryu";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Docker cAdvisor for container metrics
|
||||
virtualisation.oci-containers.containers.cadvisor = {
|
||||
image = "gcr.io/cadvisor/cadvisor:v0.49.1";
|
||||
ports = ["127.0.0.1:${toString ports.cadvisor}:8080"];
|
||||
volumes = [
|
||||
"/:/rootfs:ro"
|
||||
"/var/run:/var/run:ro"
|
||||
"/sys:/sys:ro"
|
||||
"/var/lib/docker/:/var/lib/docker:ro"
|
||||
"/dev/disk/:/dev/disk:ro"
|
||||
];
|
||||
extraOptions = [
|
||||
"--privileged"
|
||||
"--device=/dev/kmsg"
|
||||
];
|
||||
};
|
||||
|
||||
# Link dashboard files from Nix store to Grafana's expected location
|
||||
systemd.tmpfiles.rules = let
|
||||
# Define dashboard files with proper hashes
|
||||
nodeExporterFull = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/1860/revisions/37/download";
|
||||
sha256 = "0qza4j8lywrj08bqbww52dgh2p2b9rkhq5p313g72i57lrlkacfl";
|
||||
};
|
||||
nvidiaDashboardRaw = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/14574/revisions/9/download";
|
||||
sha256 = "170ijap5i99sapkxlf3k0lnvwmb6g9jkk7q66nwjwswkj2a7rqbr";
|
||||
};
|
||||
# Fix NVIDIA dashboard to use our Prometheus datasource
|
||||
nvidiaDashboard = pkgs.runCommand "nvidia-gpu-fixed.json" {} ''
|
||||
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${nvidiaDashboardRaw} > $out
|
||||
'';
|
||||
postgresqlDashboardRaw = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/9628/revisions/7/download";
|
||||
sha256 = "0xmk68kqb9b8aspjj2f8wxv2mxiqk9k3xs0yal4szmzbv65c6k66";
|
||||
};
|
||||
# Fix PostgreSQL dashboard to use our Prometheus datasource
|
||||
postgresqlDashboard = pkgs.runCommand "postgresql-fixed.json" {} ''
|
||||
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${postgresqlDashboardRaw} > $out
|
||||
'';
|
||||
redisDashboard = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/11835/revisions/1/download";
|
||||
sha256 = "15lbn4i8j5hiypl4dsg0d72jgrgjwpagkf5kcwx66gyps17jcrxx";
|
||||
};
|
||||
dockerDashboardRaw = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/193/revisions/1/download";
|
||||
sha256 = "1lxbbl91fh0yfh8x53205b7nw5ivghlpfb0m308z2p6fzvz2iq2m";
|
||||
};
|
||||
# Fix Docker dashboard to use our Prometheus datasource
|
||||
dockerDashboard = pkgs.runCommand "docker-cadvisor-fixed.json" {} ''
|
||||
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${dockerDashboardRaw} > $out
|
||||
'';
|
||||
caddyDashboardRaw = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/14280/revisions/1/download";
|
||||
sha256 = "0j3q68cq1nj8gcxkqz5h1kn1ds5kgq4jlkw73xp6yc88mbm5nyh4";
|
||||
};
|
||||
# Fix Caddy dashboard to use our Prometheus datasource
|
||||
caddyDashboard = pkgs.runCommand "caddy-fixed.json" {} ''
|
||||
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${caddyDashboardRaw} > $out
|
||||
'';
|
||||
piholeDashboardRaw = pkgs.fetchurl {
|
||||
url = "https://grafana.com/api/dashboards/10176/revisions/3/download";
|
||||
sha256 = "18f8w3l5k178agipfbimg29lkf2i32xynin1g1v5abiac3ahj7ih";
|
||||
};
|
||||
# Fix Pi-hole dashboard to use our Prometheus datasource
|
||||
piholeDashboard = pkgs.runCommand "pihole-fixed.json" {} ''
|
||||
${pkgs.gnused}/bin/sed 's/\''${DS_PROMETHEUS}/Prometheus/g' ${piholeDashboardRaw} > $out
|
||||
'';
|
||||
in [
|
||||
"d /var/lib/grafana/dashboards 0755 grafana grafana -"
|
||||
"L+ /var/lib/grafana/dashboards/node-exporter-full.json - - - - ${nodeExporterFull}"
|
||||
"L+ /var/lib/grafana/dashboards/nvidia-gpu.json - - - - ${nvidiaDashboard}"
|
||||
"L+ /var/lib/grafana/dashboards/postgresql.json - - - - ${postgresqlDashboard}"
|
||||
"L+ /var/lib/grafana/dashboards/redis.json - - - - ${redisDashboard}"
|
||||
"L+ /var/lib/grafana/dashboards/docker-cadvisor.json - - - - ${dockerDashboard}"
|
||||
"L+ /var/lib/grafana/dashboards/caddy.json - - - - ${caddyDashboard}"
|
||||
"L+ /var/lib/grafana/dashboards/pihole.json - - - - ${piholeDashboard}"
|
||||
];
|
||||
|
||||
# Open firewall ports for Prometheus to scrape exporters
|
||||
networking.firewall = {
|
||||
# allowedTCPPorts = [
|
||||
# ports.node
|
||||
# ports.systemd
|
||||
# ports.process
|
||||
# ];
|
||||
|
||||
# Allow Prometheus and Grafana access from Tailscale network
|
||||
interfaces."tailscale0".allowedTCPPorts = [
|
||||
ports.prometheus
|
||||
ports.grafana
|
||||
ports.node
|
||||
ports.systemd
|
||||
ports.process
|
||||
ports.postgres
|
||||
ports.redis
|
||||
ports.cadvisor
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -14,6 +14,11 @@
|
||||
services = {
|
||||
caddy = {
|
||||
enable = true;
|
||||
globalConfig = ''
|
||||
servers {
|
||||
metrics
|
||||
}
|
||||
'';
|
||||
extraConfig = ''
|
||||
(cloudflare) {
|
||||
tls {
|
||||
|
||||
@@ -1,10 +1,40 @@
|
||||
{...}: {
|
||||
{
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
# Port configurations
|
||||
ports = {
|
||||
# System exporters
|
||||
node = 9100;
|
||||
systemd = 9558;
|
||||
process = 9256;
|
||||
|
||||
# Infrastructure exporters
|
||||
cadvisor = 8080;
|
||||
caddy = 2019;
|
||||
|
||||
# Media exporters
|
||||
jellyfin = 9220;
|
||||
pihole = 9617;
|
||||
|
||||
# Servarr exporters (via exportarr)
|
||||
sonarr = 9707;
|
||||
radarr = 9708;
|
||||
lidarr = 9709;
|
||||
bazarr = 9710;
|
||||
|
||||
# Torrent
|
||||
deluge = 9354;
|
||||
};
|
||||
in {
|
||||
sops.secrets."pihole/password" = {};
|
||||
services = {
|
||||
prometheus = {
|
||||
exporters = {
|
||||
systemd = {
|
||||
enable = true;
|
||||
port = 9558;
|
||||
port = ports.systemd;
|
||||
};
|
||||
node = {
|
||||
enable = true;
|
||||
@@ -19,8 +49,10 @@
|
||||
"time"
|
||||
"uname"
|
||||
"vmstat"
|
||||
"diskstats"
|
||||
"cpu"
|
||||
];
|
||||
port = 9100;
|
||||
port = ports.node;
|
||||
};
|
||||
process = {
|
||||
enable = true;
|
||||
@@ -35,14 +67,142 @@
|
||||
};
|
||||
};
|
||||
|
||||
# Docker cAdvisor for container metrics
|
||||
virtualisation.oci-containers.containers.cadvisor = {
|
||||
image = "gcr.io/cadvisor/cadvisor:v0.49.1";
|
||||
ports = ["${toString ports.cadvisor}:8080"];
|
||||
volumes = [
|
||||
"/:/rootfs:ro"
|
||||
"/var/run:/var/run:ro"
|
||||
"/sys:/sys:ro"
|
||||
"/var/lib/docker/:/var/lib/docker:ro"
|
||||
"/dev/disk/:/dev/disk:ro"
|
||||
];
|
||||
extraOptions = [
|
||||
"--privileged"
|
||||
"--device=/dev/kmsg"
|
||||
];
|
||||
};
|
||||
|
||||
# Jellyfin - use built-in metrics endpoint at http://localhost:8096/metrics
|
||||
# No separate exporter needed - Prometheus will scrape directly
|
||||
|
||||
# Home Assistant - has built-in Prometheus integration
|
||||
# Configure in Home Assistant configuration.yaml:
|
||||
# prometheus:
|
||||
# namespace: homeassistant
|
||||
|
||||
# Pi-hole exporter
|
||||
# Uses sops-managed API token for authentication with Pi-hole v6
|
||||
# To set the token: edit secrets/secrets.yaml and replace the placeholder at pihole.api_token
|
||||
systemd.services.pihole-exporter = {
|
||||
description = "Pi-hole Prometheus Exporter";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target" "sops-nix.service"];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
DynamicUser = true;
|
||||
# Load API token from sops secret file
|
||||
LoadCredential = "ppassword:${config.sops.secrets."pihole/password".path}";
|
||||
ExecStart = ''
|
||||
${pkgs.bash}/bin/bash -c '${pkgs.prometheus-pihole-exporter}/bin/pihole-exporter \
|
||||
-pihole_hostname pihole.darksailor.dev \
|
||||
-pihole_port 8053 \
|
||||
-port ${toString ports.pihole} \
|
||||
-pihole_password $(cat ''${CREDENTIALS_DIRECTORY}/ppassword)'
|
||||
'';
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
# Exportarr for Sonarr
|
||||
# Disabled: needs API key configuration
|
||||
# systemd.services.exportarr-sonarr = {
|
||||
# description = "Exportarr Prometheus Exporter for Sonarr";
|
||||
# wantedBy = ["multi-user.target"];
|
||||
# after = ["network.target"];
|
||||
# serviceConfig = {
|
||||
# Type = "simple";
|
||||
# DynamicUser = true;
|
||||
# ExecStart = "${pkgs.exportarr}/bin/exportarr sonarr --port ${toString ports.sonarr} --url http://localhost:8989";
|
||||
# Restart = "on-failure";
|
||||
# };
|
||||
# };
|
||||
|
||||
# Exportarr for Radarr
|
||||
# Disabled: needs API key configuration
|
||||
# systemd.services.exportarr-radarr = {
|
||||
# description = "Exportarr Prometheus Exporter for Radarr";
|
||||
# wantedBy = ["multi-user.target"];
|
||||
# after = ["network.target"];
|
||||
# serviceConfig = {
|
||||
# Type = "simple";
|
||||
# DynamicUser = true;
|
||||
# ExecStart = "${pkgs.exportarr}/bin/exportarr radarr --port ${toString ports.radarr} --url http://localhost:7878";
|
||||
# Restart = "on-failure";
|
||||
# };
|
||||
# };
|
||||
|
||||
# Exportarr for Lidarr
|
||||
# Disabled: needs API key configuration
|
||||
# systemd.services.exportarr-lidarr = {
|
||||
# description = "Exportarr Prometheus Exporter for Lidarr";
|
||||
# wantedBy = ["multi-user.target"];
|
||||
# after = ["network.target"];
|
||||
# serviceConfig = {
|
||||
# Type = "simple";
|
||||
# DynamicUser = true;
|
||||
# ExecStart = "${pkgs.exportarr}/bin/exportarr lidarr --port ${toString ports.lidarr} --url http://localhost:8686";
|
||||
# Restart = "on-failure";
|
||||
# };
|
||||
# };
|
||||
|
||||
# Exportarr for Bazarr
|
||||
# Disabled: needs API key configuration
|
||||
# systemd.services.exportarr-bazarr = {
|
||||
# description = "Exportarr Prometheus Exporter for Bazarr";
|
||||
# wantedBy = ["multi-user.target"];
|
||||
# after = ["network.target"];
|
||||
# serviceConfig = {
|
||||
# Type = "simple";
|
||||
# DynamicUser = true;
|
||||
# ExecStart = "${pkgs.exportarr}/bin/exportarr bazarr --port ${toString ports.bazarr} --url http://localhost:6767";
|
||||
# Restart = "on-failure";
|
||||
# };
|
||||
# };
|
||||
|
||||
# Deluge exporter
|
||||
systemd.services.deluge-exporter = {
|
||||
description = "Deluge Prometheus Exporter";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
DynamicUser = true;
|
||||
ExecStart = "${pkgs.prometheus-deluge-exporter}/bin/deluge-exporter localhost:58846 --addr :${toString ports.deluge}";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
# Samba exporter - using a simple script to expose smbstatus metrics
|
||||
# For now, we'll skip this and can add later if needed
|
||||
|
||||
# Open firewall ports for Prometheus exporters
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [
|
||||
9100 # node exporter
|
||||
9256 # process exporter
|
||||
9558 # systemd exporter
|
||||
9134 # zfs exporter
|
||||
9633 # smartctl exporter
|
||||
# Allow from Tailscale network
|
||||
interfaces."tailscale0".allowedTCPPorts = [
|
||||
ports.node
|
||||
ports.systemd
|
||||
ports.process
|
||||
ports.cadvisor
|
||||
ports.caddy
|
||||
ports.jellyfin
|
||||
ports.pihole
|
||||
# ports.sonarr # Disabled - needs API key
|
||||
# ports.radarr # Disabled - needs API key
|
||||
# ports.lidarr # Disabled - needs API key
|
||||
# ports.bazarr # Disabled - needs API key
|
||||
ports.deluge
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user