Merged Config History to Remove not Secret Secrets

This is a combination of 129 commits:

Initial Server Configuration

Add Caddy

Add Jelly Bucket to Minio

Remove Podman DNS

Initialize Server Configuration Directory

Also replace Minio Pod w/ Nix Derivation

Remove Neko/WatchThingz User Configuration (Broken, See Issue)

Disable WatchThingz

Add cockpit

TODO: Add Cockpit Plugins

TODO: Add Performance Metrics

https://github.com/performancecopilot/pcp

Start adding Gitea

TODO: Gitea specific postgres config, determine global postgres

Add Second Mass Storage Drive

Add Gitea in Full

Mount Both Data Dirs for Minio

Add CUDA to Nvidia

Add OCI Based Servers

TODO: Organize into server arcitecture

Add Secrets

Add some nice to have packages

Massive Server Upgrade

Jelly s3fs mount

Stats for things like Minio Usage, Logs etc.

VirtualHost & Pod Cleanup

Move pod import inot oci services that use them

Have services define what virtualhost of caddy they belong to

Migrade homeassitant and jellyfin to new dir structure

Headscale and static files

Directory Reorganization

New Module Structure

Headscale is public facing

Headscale User Generation Module

Finish HeadScale PreAuth Module

TODO: Activation Script

sketch: (Tailscale & Container) Headscale integration

Add Local DNS Resolver & Local Domains

Add Path to Output of ensureUsers

Fix Path Setting

Add Services Dir

Local Join to Tailnet w/ Auth Gen

Togers Uses .tv ...

Move networking config

Add networking to configuration.nix

Update to Brdiged Networking Requirement for nspawn

Fix unit definitions

Cleanup defs for container support

Add Minio Containers to tailnet

Disable PostGresql, seems to break things

Migrate to LVM Disk

Fix not Using Headscale Containers

Re-add Nextcloud

Re Auth Prometheus for Minio

Pretty Graphs

Init: pre-office servers

Init: pre Pterodactyl server

Fix Jelly VPN

Disable Grafana for Now

Add VaultWarden

Add Anki

Add GC and Store Optimization

Correct Gitea's connection to postgresql

Add Vaultwarden, Remove Anki

Cleanup User Depsfor Recognize

Pterodactyl: Add Nspawn Service

Change to Flake System

Fix flake path pugs

Add Hydra

Add Build Machine

Wings: Migrate to Nix Directly... or do tun tap. Might do latter

Try to get Anki to Work

It passes args properly now, but not environment variables

Add NAT Passthrough on Ports

Disable for now, interferes b/c of NAT

Tried to enable actions

Nix Serve Cache

Hydra DynRun

Increase port range

Stop Using Pod

Patch Hydra

Video Group & Patches

libnvidia-container ldconfig patch

More patching

nvidia-podman fix && jellyfin nvidia

Nix cache domain

Update Flake

Container Deployment User & Script

Add Handy Helper Deploy-scheme

Forgetten Flake Update

2023-03-12 -> 2023-03-21

Update Flake

Update Nextcloud 25 -> 26

Update Flake & Nvidia-Podman

Update of flake broke nvidia podman, this fixes it, hopefully

Latest working version

Update Time!

Use new Gitea Config

Use new Gitea Config, properly

Currently borked, need to wait, or go back to earlier working version

Working now

Updates

Change Hydra Port

Whoops, Keyboard bad

Convert to String

Update Time

NodeJS InSecure for Now

OpenSSL1.1.1t InSecure

Disable Hydra Tests

More insecure

Update and Ethan

Basic AudioBookshelf impl

Add AudioBookShelf

Fix Group

Test Env Var

Environment Wrong Location

Remove TMP Env

Config Dir SystemDir: Audiobookshelf

Audiobook: getopt

ExecStart Args for Env

Correct Port

Add Domain: AudioBooks

Git LFS

Hauk Location Tracking

TODO: Change domain to whereis.chris.crompton.cc

Enable Hauk

Correct Hauk Port

Flake Update

Docker-compat

Disable Recognize Setup

Nextcloud 26 -> 27

Disable Podman-Nvidia

Environment is clouded for some reason™️ (nvidia-container-tools
makes a "docker" command visible)

OctoPrint & Prusa

Samba server

Reorganize for Config Merge

Move Nvidia Fix to File

Migrate to sops-nix

servers -> server

Remove Old Key Things for Agenix
This commit is contained in:
2023-08-14 20:13:41 -04:00
parent b84e86154a
commit 5ae9e2e777
56 changed files with 2506 additions and 1 deletions

View File

@@ -0,0 +1,41 @@
{ pkgs, lib, config, ... }:
let
s3fs = { mount, bucket }: {
age.secrets.jellyMount = {
file = /etc/nixos/secrets/jellyMountPass.age;
owner = "root";
group = "root";
mode = "0600";
};
systemd.services."s3fs-${bucket}" = {
description = "Jellyfin Bucket Storage";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStartPre = [
"${pkgs.coreutils}/bin/mkdir -m 0500 -pv ${mount}"
"${pkgs.e2fsprogs}/bin/chattr +i ${mount}" # Stop files being accidentally written to unmounted directory
];
ExecStart = let
options = [
"passwd_file=${config.age.secrets.jellyMount.path}"
"use_path_request_style"
"allow_other"
"url=http://localhost:7500"
"umask=0077"
];
in
"${pkgs.s3fs}/bin/s3fs ${bucket} ${mount} -f "
+ lib.concatMapStringsSep " " (opt: "-o ${opt}") options;
ExecStopPost = "-${pkgs.fuse}/bin/fusermount -u ${mount}";
KillMode = "process";
Restart = "on-failure";
};
};
};
in
s3fs {
mount = "/jelly";
bucket = "jellyfin";
}

View File

@@ -0,0 +1,27 @@
{
imports = [
# Local Network DNS
./private/unbound.nix
# System Stats and Monitoring
./private/cockpit.nix
# Track Stats of system
./private/prometheus.nix
# Pretty Visuals
# ./private/grafana.nix
# Home Monitoring and Control
./private/homeassistant.nix
# Minio S3 Object Storage
./private/miniio.nix
# OctoPrint
./private/octoprint.nix
# Samba Share
./private/samba.nix
];
}

View File

@@ -0,0 +1,25 @@
{ pkgs
, config
, fetchFromGitHub
, ...}:
{
services.cockpit = {
enable = true;
port = 9090;
};
# TODO: Performance Metrics:
# https://github.com/performancecopilot/pcp
# environment.systemPackages = let
# cockpit-machines = stdenv.mkDerivation {
# pname = "cockpit-machines";
# version = "283";
# src = fetchFromGitHub
# };
# in [
# cockpit-machines
# cockpit-containers
# ];
}

View File

@@ -0,0 +1,10 @@
{config, pkgs, ...}:
{
services.grafana = {
enable = true;
http_addr = "0.0.0.0";
http_port = 9998;
};
}

View File

@@ -0,0 +1,24 @@
{pkgs, config, ...}:
{
imports = [
../../oci/homeassistant.nix
];
services.unbound.settings.server = let
RECORD = ".assistant. IN A 192.168.1.20";
in {
local-zone = [
"assistant. static"
];
local-data = [
"'home${RECORD}'"
];
};
services.caddy.virtualHosts = {
"http://home.assistant" = {
extraConfig = ''
reverse_proxy 127.0.0.1:8123
'';
};
};
}

View File

@@ -0,0 +1,43 @@
{
services.unbound.settings.server = let
RECORD = ".tv. IN A 192.168.1.20";
in {
local-zone = [
"tv. transparent"
];
local-data = [
"'radarr${RECORD}'"
"'sonarr${RECORD}'"
"'prowlarr${RECORD}'"
"'deluge${RECORD}'"
"'bazarr${RECORD}'"
];
};
services.caddy.virtualHosts = {
"http://radarr.tv" = {
extraConfig = ''
reverse_proxy 127.0.0.1:7878
'';
};
"http://sonarr.tv" = {
extraConfig = ''
reverse_proxy 127.0.0.1:8989
'';
};
"http://prowlarr.tv" = {
extraConfig = ''
reverse_proxy 127.0.0.1:9696
'';
};
"http://deluge.tv" = {
extraConfig = ''
reverse_proxy 127.0.0.1:8112
'';
};
"http://bazarr.tv" = {
extraConfig = ''
reverse_proxy 127.0.0.1:6767
'';
};
};
}

View File

@@ -0,0 +1,88 @@
{ pkgs, config, lib, ...}: let
mkLocalMinio = {
path, n
}: {
autoStart = true;
privateNetwork = true;
hostBridge = "br0";
localAddress = "10.0.0.${toString (10+n)}/24";
# If true it registers a new node very time
# need to find where it stores the state
ephemeral = false;
bindMounts = {
"/mnt/disk1/minio" = {
hostPath = path;
isReadOnly = false;
};
"/rootCreds" = {
hostPath = config.sops.secrets.minioRoot.path;
isReadOnly = true;
};
};
config = {pkgs, config, ...}: {
system.stateVersion = "22.11";
networking.defaultGateway = "10.0.0.1";
networking.firewall = {
allowedTCPPorts = [
9000
7501
];
};
environment.systemPackages = with pkgs; [
minio
minio-client
];
services.minio = {
enable = true;
listenAddress = ":9000";
consoleAddress = ":7501";
dataDir = [
];
rootCredentialsFile = "/rootCreds";
};
systemd.services.minio.after = ["tailscale_autologin.service"];
systemd.services.minio.preStart = ''
sleep 2s
'';
systemd.services.minio.environment = {
MINIO_VOLUMES = "/mnt/disk1/minio";
# Expandable later, but each pool must have more than 1 disk.
# https://github.com/minio/minio/issues/16711
MINIO_SERVER_URL = "http://minio1.minio1.tailnet:9000";
MINIO_PROMETHEUS_URL = "http://100.64.0.5:9999";
MINIO_PROMETHEUS_JOB_ID = "minio-job";
};
};
};
in {
imports = [
../../modules/containerHeadscale.nix
];
sops.secrets.minioRoot = {
owner = "root";
mode = "0444";
};
containers = {
minio1 = mkLocalMinio {
path = "/mass/minio";
n = 1;
};
};
services.headscale.containers = {
minio1 = {
};
};
}

View File

@@ -0,0 +1,10 @@
{ pkgs
, config
, ...}:
{
services.octoprint = {
enable = true;
port = 7550;
};
}

View File

@@ -0,0 +1,24 @@
{config, pkgs, ...}:
{
services.prometheus = {
enable = true;
port = 9999;
scrapeConfigs = [
{
job_name = "minio-job";
metrics_path = "/minio/v2/metrics/cluster";
scheme = "http";
# Turn into secret with bearer_token_file
bearer_token = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJwcm9tZXRoZXVzIiwic3ViIjoiaGlwcG9jYW1wdXMiLCJleHAiOjQ4MzA5ODA0MjB9.C-Y5lCDcpcHPWu87CXcqFdQF3nZ55neNVL-QVhf2NxGaqGQ1GL5AW7svbFZVjLJy1yMzgNn7wlAXB23d7q0GYA";
static_configs = [
{
targets = [
"100.64.0.4:9000"
];
}
];
}
];
};
}

View File

@@ -0,0 +1,28 @@
{ config, lib, pkgs, ... }: {
services.samba-wsdd.enable = true;
services.samba = {
enable = true;
securityType = "user";
extraConfig = ''
workgroup = WORKGROUP
server string = smbnix
netbios name = smbnix
security = user
#use sendfile = yes
#max protocol = smb2
# note: localhost is the ipv6 localhost ::1
hosts allow = 192.168.0. 127.0.0.1 localhost
hosts deny = 0.0.0.0/0
guest account = nobody
map to guest = bad user
'';
shares = {
public = {
path = "/mass/jelly/media";
browseable = "yes";
"read only" = "yes";
"guest ok" = "yes";
};
};
};
}

View File

@@ -0,0 +1,31 @@
{config, pkgs, lib, ...}:
{
services.unbound = {
enable = false;
settings = {
server = {
interface = [
"0.0.0.0" "::"
];
private-address = "192.168.1.0/24";
access-control = [
"127.0.0.0/8 allow"
"192.168.1.0/24 allow"
];
};
forward-zone = [
{
name = ".";
forward-addr = [
"1.1.1.1"
"1.0.0.1"
"8.8.8.8"
"8.8.4.4"
];
}
];
};
};
}

View File

@@ -0,0 +1,42 @@
{
imports = [
# Reverse Proxy
./public/caddy.nix
# Entrace to Control Pane of Private Network
./public/headscale.nix
# Location tracking of my Dad in Saskatchewan
./public/hauk.nix
# Self Hosted Git Server
./public/gitea.nix
# Hydra Build Server
./public/hydra.nix
# Self Hosted Netflix
./public/jellyfin.nix
# Audio Books
./public/audiobookshelf.nix
# Static Website
./public/syzygial.nix
# Self Hosted Cloud Storage & Services
./public/nextcloud.nix
# Rabb.it at home
./public/watchthingz.nix
# Pterodactyl Game Server
./public/pterodactyl.nix
# Vaultwarden
./public/vaultwarden.nix
# Anki Sync Server
./public/anki.nix
];
}

View File

@@ -0,0 +1,25 @@
{config, pkgs, ...}:
{
systemd.services.ankisync = {
enable = false;
wantedBy = ["network-online.target"];
script = ''
${pkgs.anki-bin}/bin/anki --syncserver
'';
serviceConfig = {
Type = "simple";
DynamicUser = true;
PrivateTmp = true;
StateDirectory = "foo";
StateDirectoryMode = "0750";
};
};
services.caddy.virtualHosts = {
"anki.syzygial.cc" = {
extraConfig = ''
reverse_proxy 127.0.0.1:4000
'';
};
};
}

View File

@@ -0,0 +1,41 @@
{ config, pkgs, ... }: let
stateDir = "/var/lib/audiobookshelf";
in {
users.users.audiobookshelf = {
group = config.users.groups.audiobookshelf.name;
isSystemUser = true;
};
users.groups.audiobookshelf = { };
systemd.services.audiobookshelf = {
after = [ "network.target" ];
environment = {
};
path = with pkgs; [
util-linux
];
serviceConfig = {
user = config.users.users.audiobookshelf.name;
group = config.users.groups.audiobookshelf.name;
ExecStart = "${pkgs.audiobookshelf}/bin/audiobookshelf --port ${toString 7991}";
WorkingDirectory = "${stateDir}";
PrivateTmp = "true";
PrivateDevices = "true";
ProtectHome = "true";
ProtectSystem = "strict";
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
StateDirectory = "audiobookshelf";
StateDirectoryMode = "0700";
Restart = "always";
};
wantedBy = [ "multi-user.target" ];
};
services.caddy.virtualHosts = {
"books.syzygial.cc" = {
extraConfig = ''
reverse_proxy 127.0.0.1:${toString 7991}
'';
};
};
}

View File

@@ -0,0 +1,16 @@
{ config, pkgs, ... }:
{
services.caddy = {
enable = true;
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
email = "davidcrompton1192@gmail.com";
};
services.caddy.virtualHosts = {
"star.zlinger.syzygial.cc" = {
extraConfig = ''
reverse_proxy 3.145.117.46:4000
'';
};
};
}

View File

@@ -0,0 +1,41 @@
{ pkgs, config, ...}: let
davesDomain = "syzygial.cc";
in {
services.gitea = {
enable = true;
database = {
type = "postgres";
socket = "/run/postgresql";
};
settings = {
server = {
HTTP_PORT = 5000;
ROOT_URL = "https://git.${davesDomain}";
};
actions = {
ENABLED = true;
};
};
};
services.postgresql = {
enable = true;
port = 5432;
ensureUsers = [{
name = "gitea";
ensurePermissions = {
"DATABASE \"gitea\"" = "ALL PRIVILEGES";
};
ensureClauses = {
createdb = true;
};
}];
};
services.caddy.virtualHosts = {
"git.${davesDomain}" = {
extraConfig = ''
reverse_proxy 127.0.0.1:${toString config.services.gitea.settings.server.HTTP_PORT}
'';
};
};
}

View File

@@ -0,0 +1,14 @@
{pkgs, config, ...}:
{
imports = [
../../oci/hauk.nix
];
services.caddy.virtualHosts = {
"crompton.cc" = {
extraConfig = ''
reverse_proxy 127.0.0.1:7888
'';
};
};
}

View File

@@ -0,0 +1,57 @@
{config, pkgs, ...}: {
imports = [
../../modules/headscale.nix
];
services.headscale = {
enable = true;
# 7000 port addresses are for internal network
port = 7000;
settings = {
server_url = "https://headscale.syzygial.cc";
# TODO: Generate keys??
# Postgres seems to be broken
# db_type = "postgres";
# db_host = "/var/run/postgresql";
# db_name = "headscale";
# db_user = "headscale";
# Tailscale IP Base:
ip_prefixes = [
"100.64.0.0/10"
];
# Give a name to each device
dns_config = {
base_domain = "tailnet";
magic_dns = true;
};
};
};
# Temporary until systemd units are made
# TODO: Create automatic systemd units for provisioning auth keys
environment.systemPackages = with pkgs; [
headscale
];
services.caddy.virtualHosts = {
"headscale.syzygial.cc" = {
extraConfig = ''
reverse_proxy localhost:7000
'';
};
};
# services.postgresql = {
# enable = true;
# port = 5432;
# ensureDatabases = [
# "headscale"
# ];
# ensureUsers = [{
# name = "headscale";
# ensurePermissions = {
# "DATABASE \"headscale\"" = "ALL PRIVILEGES";
# };
# }];
# };
}

View File

@@ -0,0 +1,89 @@
{config, pkgs, ...}: let
deploy-container = pkgs.writeScriptBin "deploy-nixos-container" ''
pushd $2
nixos-container update $1 --flake $2#$3
git reset --hard HEAD
git clean -fdx
git reflog expire --expire=now --all
git repack -ad # Remove dangling objects from packfiles
git prune # Remove dangling loose objects
popd
'';
in {
imports = [
./nix-serve.nix
];
services.hydra = {
enable = true;
hydraURL = "https://hydra.syzygial.cc";
port = 3500;
notificationSender = "hydra@localhost";
buildMachinesFiles = [];
useSubstitutes = true;
extraConfig = ''
<dynamicruncommand>
enable = 1
</dynamicruncommand>
'';
};
systemd.services.hydra = {
serviceConfig = {
RestartSec = "20s";
};
};
users.users."hydra" = {
openssh.authorizedKeys.keys = [
];
packages = [
];
};
# Deployment User
users.users.hydra-deploy = {
isNormalUser = true;
home = "/var/lib/hydra/deploy";
description = "Hydra Deployment User";
extraGroups = [ "hydra" ];
packages = [
deploy-container
];
};
# TODO: Configure authorizedKeys between
# hydra-queue-runner and hydra-deploy
security.sudo.extraRules = [
{
users = ["hydra-deploy"];
commands = [
{
command = "${deploy-container}/bin/deploy-nixos-container *";
options = ["NOPASSWD"];
}
];
}
];
networking.nat = {
enable = true;
internalInterfaces = [
"ve-newalan"
"ve-handyhelper"
];
externalInterface = "enp0s25";
enableIPv6 = true;
};
nix.buildMachines = [
{ hostName = "localhost";
system = "x86_64-linux";
supportedFeatures = ["kvm" "nixos-test" "big-parallel" "benchmark"];
maxJobs = 8;
}
];
services.caddy.virtualHosts = {
"hydra.syzygial.cc" = {
extraConfig = ''
reverse_proxy localhost:${toString config.services.hydra.port}
'';
};
};
}

View File

@@ -0,0 +1,30 @@
{ pkgs, config, ...}:
{
imports = [
# ./jelly-mount.nix
# Server component is container based
../../oci/jelly.nix
# Load local network DNS resolution
../private/jellyfin.nix
];
services.caddy.virtualHosts = {
"jelly.syzygial.cc" = {
serverAliases = [
"jelly.crompton.cc"
];
extraConfig = ''
reverse_proxy 127.0.0.1:8096
'';
};
"add.jelly.crompton.cc" = {
serverAliases = [
# "add.jelly.syzygial.cc"
];
extraConfig = ''
reverse_proxy 127.0.0.1:5055
'';
};
};
}

View File

@@ -0,0 +1,114 @@
{ pkgs, config, ...}: let
nxperm = {
owner = "nextcloud";
group = "nextcloud";
mode = "0440";
};
in {
imports = [
./nextcloud/collobara.nix
];
sops.secrets."nextcloud/adminPass" = nxperm;
sops.secrets."nextcloud/s3secret" = nxperm;
services.nextcloud = {
enable = true;
package = pkgs.nextcloud27;
hostName = "localhost";
config = {
adminuser = "CromptonAdmin";
adminpassFile = config.sops.secrets."nextcloud/adminPass".path;
extraTrustedDomains = [
"cloud.crompton.cc"
"nextcloud.syzygial.cc"
];
trustedProxies = [
"cloud.crompton.cc"
"nextcloud.syzygial.cc"
];
dbtype = "pgsql";
dbname = "nextcloud";
dbuser = "nextcloud";
dbhost = "/run/postgresql";
overwriteProtocol = "https";
objectstore.s3 = {
enable = true;
bucket = "nextcloud";
autocreate = false;
key = "nextcloud";
secretFile = config.sops.secrets."nextcloud/s3secret".path;
region = "us-east-1";
hostname = "100.64.0.4";
port = 9000;
useSsl = false;
usePathStyle = true;
};
};
};
# systemd.services.nextcloud-setup = {
# requires = [ "postgresql.service" ];
# after = [ "postgresql.service" ];
# path = config.users.users.nextcloud.packages;
# script = ''
# if [[ ! -e /var/lib/nextcloud/store-apps/recognize/node_modules/@tensorflow/tfjs-node/lib/napi-v8/tfjs_binding.node ]]; then
# if [[ -d /var/lib/nextcloud/store-apps/recognize/node_modules/ ]]; then
# cd /var/lib/nextcloud/store-apps/recognize/node_modules/
# npm rebuild @tensorflow/tfjs-node --build-addon-from-source
# fi
# fi
# '';
# };
systemd.services.phpfpm-nextcloud = {
path = config.users.users.nextcloud.packages;
};
users.users.nextcloud = {
shell = pkgs.bashInteractive;
packages = with pkgs; [
# generate video thumbnails with preview generator
ffmpeg_5-headless
# required for recognize app
nodejs-14_x # runtime and installation requirement
nodejs-14_x.pkgs.node-pre-gyp # installation requirement
util-linux # runtime requirement for taskset
];
};
services.nginx.virtualHosts."localhost".listen = [ { addr = "127.0.0.1"; port = 8000; } ];
services.caddy.virtualHosts = {
"cloud.crompton.cc" = {
serverAliases = [
"nextcloud.syzygial.cc"
];
extraConfig = ''
reverse_proxy 127.0.0.1:8000
'';
};
};
services.postgresql = {
enable = true;
port = 5432;
ensureDatabases = [
"nextcloud"
];
ensureUsers = [{
name = "nextcloud";
ensurePermissions = {
"DATABASE \"nextcloud\"" = "ALL PRIVILEGES";
};
ensureClauses = {
createdb = true;
};
}];
};
}

View File

@@ -0,0 +1,5 @@
{config, pkgs, ...}:
{
}

View File

@@ -0,0 +1,50 @@
{config, pkgs, ...}:
{
services.onlyoffice = {
enable = true;
port = 7001;
hostname = "only.office";
postgresHost = "/run/postgresql";
postgresName = "onlyoffice";
postgresUser = "onlyoffice";
};
services.nginx.virtualHosts."${config.services.onlyoffice.hostname}".listen = [ { addr = "127.0.0.1"; port = 7002; } ];
services.unbound.settings.server = let
RECORD = ".office. IN A 192.168.1.20";
in {
local-zone = [
"office. transparent"
];
local-data = [
"'only${RECORD}'"
];
};
services.caddy.virtualHosts = {
"https://only.office" = {
extraConfig = ''
tls internal
reverse_proxy 127.0.0.1:7001
'';
};
};
services.postgresql = {
enable = true;
port = 5432;
ensureDatabases = [
"onlyoffice"
];
ensureUsers = [{
name = "onlyoffice";
ensurePermissions = {
"DATABASE \"onlyoffice\"" = "ALL PRIVILEGES";
};
}];
};
}

View File

@@ -0,0 +1,16 @@
{config, pkgs, ...}:
{
services.nix-serve = {
enable = true;
port = 5050;
secretKeyFile = "/etc/nixos/secrets/cache-priv-key.pem";
};
services.caddy.virtualHosts = {
"nixcache.syzygial.cc" = {
extraConfig = ''
reverse_proxy 127.0.0.1:${toString config.services.nix-serve.port}
'';
};
};
}

View File

@@ -0,0 +1,75 @@
{config, pkgs, ...}:
{
systemd.targets.machines.enable = true;
systemd.services."pterodactyl-container" = {
enable = true;
wantedBy = ["machines.target"];
environment = {
# SYSTEMD_NSPAWN_USE_CGNS = "0";
};
script = ''
exec ${config.systemd.package}/bin/systemd-nspawn --hostname pterodactyl \
--resolv-conf=off --system-call-filter="add_key keyctl bpf" --bind /dev/fuse \
-nbD /var/lib/machines/pterodactyl --machine pterodactyl
'';
postStart = ''
${pkgs.iproute2}/bin/ip link set ve-pterodactyl up || true
${pkgs.iproute2}/bin/ip addr add 10.1.0.0 dev ve-pterodactyl || true
${pkgs.iproute2}/bin/ip route add 10.1.0.1 dev ve-pterodactyl || true
'';
serviceConfig = {
Type = "notify";
Slice = "machine.slice";
Delegate = true;
DeviceAllow = "/dev/fuse rwm";
};
};
networking.nat = {
enable = true;
# Check for hostBridge use vb instead of ve
internalInterfaces = ["ve-pterodactyl"];
externalInterface = "enp0s25";
enableIPv6 = true;
forwardPorts = [
{ sourcePort = "25565:28000";
destination = "10.1.0.1:25565-25600";
proto = "tcp";
}
{ sourcePort = "25565:28000";
destination = "10.1.0.1:25565-25600";
proto = "udp";
}
{ sourcePort = 2022;
destination = "10.1.0.1:2022";
proto = "tcp";
}
{ sourcePort = 2022;
destination = "10.1.0.1:2022";
proto = "udp";
}
];
};
services.caddy.virtualHosts = {
"games.syzygial.cc:443" = {
extraConfig = ''
reverse_proxy 10.1.0.1:80
'';
};
"games.syzygial.cc:9000" = {
extraConfig = ''
reverse_proxy 10.1.0.1:9000
'';
};
"pnode.syzygial.cc:443" = {
extraConfig = ''
reverse_proxy 10.1.0.1:9000
'';
};
"pnode.syzygial.cc:9000" = {
extraConfig = ''
reverse_proxy 10.1.0.1:9000
'';
};
};
}

View File

@@ -0,0 +1,14 @@
{config, pkgs, ...}:
{
services.caddy.virtualHosts = {
"syzygial.cc" = {
extraConfig = ''
file_server {
root /srv/www/syzygial
browse
}
'';
};
};
}

View File

@@ -0,0 +1,37 @@
{config, pkgs, ...}:
{
sops.secrets.vaultenv = {
owner = config.systemd.services.vaultwarden.serviceConfig.User;
};
services.vaultwarden = {
enable = true;
dbBackend = "postgresql";
environmentFile = config.sops.secrets.vaultenv.path;
config = {
DOMAIN = "https://vault.crompton.cc";
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = 8222;
};
};
services.postgresql = {
enable = true;
port = 5432;
ensureDatabases = [
"vaultwarden"
];
ensureUsers = [{
name = "vaultwarden";
ensurePermissions = {
"DATABASE \"vaultwarden\"" = "ALL PRIVILEGES";
};
}];
};
services.caddy.virtualHosts = {
"vault.crompton.cc" = {
extraConfig = ''
reverse_proxy 127.0.0.1:${toString config.services.vaultwarden.config.ROCKET_PORT}
'';
};
};
}

View File

@@ -0,0 +1,19 @@
{pkgs, config, ...}:
{
imports = [
../../oci/watchthingz.nix
];
services.caddy.virtualHosts = {
"watchthingz.syzygial.cc" = {
extraConfig = ''
reverse_proxy 127.0.0.1:8080 {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
'';
};
};
}