Compare commits

...

247 Commits

Author SHA1 Message Date
3251639b39 coblemmon
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Cleanup Docker / cleanup-docker (push) Successful in 0s
Cleanup Docker / notify-on-failure (push) Has been skipped
Cleanup NixOS Generations / cleanup-generations (push) Successful in 2s
Cleanup NixOS Generations / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 50s
ZFS Backup / notify-on-failure (push) Has been skipped
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-11 19:08:13 -06:00
1b21f2c962 coblemmon
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-11 19:07:19 -06:00
04509ceade Merge branch 'main' of github.com:alexmickelson/infrastructure
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 53s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 57s
ZFS Backup / notify-on-failure (push) Has been skipped
2026-03-11 18:58:08 -06:00
8c2143c3b2 coblemmon 2026-03-11 18:58:07 -06:00
5a3a8e053d lazydocker fixed now 2026-03-10 10:06:29 -06:00
5fb34c7188 fish updates 2026-03-10 09:29:17 -06:00
75d1bcf15f minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
ZFS Backup / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 50s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 48s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-07 20:03:16 -07:00
a62d07ca6c minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 20:01:46 -07:00
3f5c9b24a4 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 20:01:12 -07:00
4f26431fcb minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 51s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-07 19:59:07 -07:00
d9083651c2 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 19:55:30 -07:00
5fc9da84d3 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 19:54:55 -07:00
0ca2ab2401 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 19:53:46 -07:00
d24a905516 more elixir dev tools 2026-03-05 12:31:49 -07:00
9bf0cabd8d landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Libation / update-repo (push) Successful in 0s
ZFS Backup / update-infrastructure (push) Successful in 1m17s
ZFS Backup / notify-on-failure (push) Has been skipped
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Cleanup NixOS Generations / cleanup-generations (push) Successful in 8s
Cleanup NixOS Generations / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-05 11:00:58 -07:00
ceb89d92fe landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-05 10:59:46 -07:00
ee5966306a landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:56:38 -07:00
01b92733c3 landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:53:05 -07:00
e52ae3f451 landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:51:01 -07:00
567a59f9b1 landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:47:55 -07:00
4d6357cc74 landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:45:09 -07:00
ab27bb1183 landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:36:32 -07:00
a5e2ce944e landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:33:56 -07:00
9f9a2fdc2c more trying to change comfigmap
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:27:27 -07:00
169dc7e2bf landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:24:54 -07:00
7e1ed7cf54 color vars
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:20:12 -07:00
dad37e8015 create configmap from files 2026-03-05 10:19:56 -07:00
e32f08391b new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:17:15 -07:00
60c633a1db new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:15:40 -07:00
61fa5e4e33 new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:13:57 -07:00
a4f49c42f7 new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:08:51 -07:00
1611df4ec8 trying landing page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:06:45 -07:00
641c6bd5c3 gitea stuff 2026-03-05 09:41:01 -07:00
95beb54b32 ntfy
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-05 09:39:59 -07:00
b6a8d96585 ntfy 2026-03-05 09:36:24 -07:00
d37726fcc9 cloudflare and ntfy
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 09:35:13 -07:00
cce76cdbc2 no healthcheck
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 09:33:18 -07:00
7242f64b0c cloudflare tunnel ingress
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 09:28:52 -07:00
b074a02edf Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-03-04 10:02:54 -07:00
d36486c935 more packages 2026-03-04 10:02:53 -07:00
906b6d6c0d back in time 2026-03-03 21:48:53 -07:00
e08252dc17 no wayland again 2026-03-03 21:46:41 -07:00
695a6723ce no wayland again 2026-03-03 21:43:33 -07:00
b2fdc5a3c4 other kernel 2026-03-03 21:41:16 -07:00
7ec08abcb2 other kernel 2026-03-03 21:41:09 -07:00
f0b6b7b08f try without wayland 2026-03-03 21:38:44 -07:00
768a7cf235 try without wayland 2026-03-03 21:37:54 -07:00
b0f36e989c mesa 2026-03-03 21:34:44 -07:00
bfc60bf27c node path 2026-03-03 15:39:02 -07:00
6301d82dff node path 2026-03-03 15:37:33 -07:00
fe10f7615c node path 2026-03-03 15:35:57 -07:00
b6b19a3950 node path 2026-03-03 15:34:24 -07:00
a79f524b6c Merge branch 'main' of github.com:alexmickelson/infrastructure
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 2m9s
ZFS Backup / notify-on-failure (push) Has been skipped
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 4s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 46s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-03 15:32:19 -07:00
2e9b40fba0 node 2026-03-03 15:32:18 -07:00
6eeaed33a4 Update tv-computer.nix 2026-03-02 18:00:50 -07:00
6e6c1dc530 Update tv-computer.nix 2026-03-02 17:56:44 -07:00
c9fc909727 merging 2026-03-02 16:57:15 -07:00
ffc69352fa tv-computer 2026-03-02 16:55:25 -07:00
660be9736b no fprint 2026-03-02 15:33:47 -07:00
5daa737dab Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-02-28 15:02:58 -07:00
80b48ca458 fprintd 2026-02-28 15:02:57 -07:00
096cf1cc2d watchman 2026-02-26 11:16:07 -07:00
5d2f7b5ce0 inotify 2026-02-26 11:14:36 -07:00
4470db7960 updates 2026-02-24 19:16:44 -07:00
f7accecaae elixir at work 2026-02-23 16:04:30 -07:00
758e0fb3ba cleanup docker
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Cleanup NixOS Generations / cleanup-generations (push) Successful in 2s
Cleanup NixOS Generations / notify-on-failure (push) Has been skipped
Cleanup Docker / cleanup-docker (push) Successful in 1s
Cleanup Docker / notify-on-failure (push) Has been skipped
ZFS Backup / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 1m43s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 52s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-02-18 21:11:55 -07:00
e1fb378cd3 updates 2026-02-18 21:09:09 -07:00
cedd54f901 updates 2026-02-18 21:08:00 -07:00
49156df8b4 updates 2026-02-18 21:06:47 -07:00
f7990beee6 updates 2026-02-18 21:05:55 -07:00
6deeb3d2a7 updates 2026-02-18 21:04:54 -07:00
e1673c5f10 updates 2026-02-18 21:04:19 -07:00
e42a65cc6e updates 2026-02-18 21:01:50 -07:00
f72966f229 automated garbage collection
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-02-18 21:00:32 -07:00
b64dd151ff more notifications
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 48s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-02-18 20:48:17 -07:00
cb1cfa5c78 removed artificial failure
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-02-18 20:46:56 -07:00
870e26f0e7 new notification updates
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:45:24 -07:00
02bbb0e425 new notification updates
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:43:20 -07:00
0fe208cce1 secrets
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:40:54 -07:00
206f2671a6 secrets
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:38:54 -07:00
36db78a8bd real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:38:27 -07:00
fafdcae679 real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:37:14 -07:00
893f20663a real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:36:20 -07:00
8a6ec2fe5e real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:35:34 -07:00
6fb8c6c6f6 fix fail
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:35:05 -07:00
2657217d93 force fail
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:31:26 -07:00
ffda56c3e9 curl 2026-02-18 20:29:15 -07:00
fab75ba547 try again
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 3s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:26:00 -07:00
3b397600fe force fail
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 3s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 1s
2026-02-18 20:24:44 -07:00
659849a652 notify ntfy on fail
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-02-18 20:24:02 -07:00
a12f1dd9fe updates 2026-02-16 21:17:37 -07:00
2443e4383c trying again
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 4s
ZFS Backup / update-infrastructure (push) Successful in 52s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 47s
2026-02-15 19:09:09 -07:00
72734bd734 homepage migration
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
2026-02-15 19:07:46 -07:00
b768860289 cleaning up
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 49s
2026-02-15 18:23:18 -07:00
b8a80d9290 no proxy
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
2026-02-15 18:22:12 -07:00
183ae6f91f kube
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 0s
2026-02-15 18:21:20 -07:00
46b710252e jellyfin 2026-02-15 18:20:55 -07:00
76708b98da no proxy
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 49s
ZFS Backup / update-infrastructure (push) Successful in 1m12s
2026-02-15 17:55:27 -07:00
ea983af9a4 copilot
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
2026-02-15 17:54:51 -07:00
e6d53e33df more kubernetes apply
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-15 17:49:48 -07:00
7f7309d2a3 kubeconfig
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-15 17:48:37 -07:00
c4273f5e63 kubernetes apply
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 1s
2026-02-15 17:47:54 -07:00
f8006a4595 audiobook 2026-02-15 17:40:00 -07:00
108cfa79b7 musicassistant 2026-02-15 17:30:12 -07:00
7b0148696c remove zwave 2026-02-15 17:18:55 -07:00
d531f8c44a zwave 2026-02-15 17:18:35 -07:00
6e83dea4a3 home assistant in kube
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 50s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
2026-02-15 10:42:27 -07:00
90076edfac updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-10 14:34:24 -07:00
721ae13de2 creds
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
ZFS Backup / update-infrastructure (push) Successful in 48s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 51s
2026-02-07 15:26:55 -07:00
41876c6347 playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:25:57 -07:00
4268297107 playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:25:26 -07:00
b6d48e8f3c playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:25:04 -07:00
92e3915d94 playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:24:32 -07:00
d121a5f179 no servie
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:21:46 -07:00
8dfc29071e libation sync
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:20:47 -07:00
faf0ac890a runs-on
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
2026-02-07 15:14:18 -07:00
b9ec61015b runs-on
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 2s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:13:31 -07:00
040a7e50ce runs-on
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 2s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
2026-02-07 15:12:47 -07:00
8bf14fbdf6 capacity
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:11:40 -07:00
54a0a804a9 capacity 2026-02-07 15:10:41 -07:00
0dc1f3e7aa zfs permissions 2026-02-07 15:00:14 -07:00
2df709af93 zfs permissions 2026-02-07 15:00:00 -07:00
cae3fdf479 zfs backup via gitea
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 2s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:54:31 -07:00
3ce79c4d5b gitea updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:49:27 -07:00
cbd2f12189 split out repo
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1m7s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:44:52 -07:00
ac3c221711 simplify 2026-02-07 14:42:07 -07:00
7e30a419db kubectl
All checks were successful
Apply Kuberentes Configs / test-environment (push) Successful in 0s
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:26:02 -07:00
7951a86cb3 environment
Some checks failed
Apply Kuberentes Configs / test-environment (push) Successful in 0s
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 0s
2026-02-07 14:15:00 -07:00
ef3002e328 environment
Some checks failed
Apply Kuberentes Configs / test-environment (push) Failing after 1s
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-02-07 14:12:24 -07:00
6e9d586b9c gitea runner 2026-02-07 14:10:20 -07:00
78bf6e2cce gitea runner 2026-02-07 14:08:10 -07:00
91e94da379 gitea runner 2026-02-07 14:07:16 -07:00
3d9a162b1c gitea runner 2026-02-07 14:05:25 -07:00
2f176f9474 gitea runner 2026-02-07 14:03:42 -07:00
bc9d243c28 workflow
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-02-07 14:02:23 -07:00
5484553a87 environment 2026-02-07 14:02:00 -07:00
5f55fe11a3 environment 2026-02-07 13:59:39 -07:00
eb111fb5f8 environment 2026-02-07 13:54:49 -07:00
0ce24bad4e environment 2026-02-07 13:46:35 -07:00
d8ab3f161f environment 2026-02-07 13:45:35 -07:00
4117e4d46e environment 2026-02-07 13:44:09 -07:00
576a404aaf environment 2026-02-07 13:43:57 -07:00
cbf2241895 environment 2026-02-07 13:43:15 -07:00
cb8bff7c0a environment 2026-02-07 13:41:21 -07:00
a308b23380 environment 2026-02-07 13:39:59 -07:00
d39ab4b5f2 environment 2026-02-07 13:38:57 -07:00
5b50211103 environment 2026-02-07 13:37:39 -07:00
1f92a821fb environment 2026-02-07 13:37:27 -07:00
0ab0e939f3 environment 2026-02-07 13:36:49 -07:00
1a32a3d826 environment 2026-02-07 13:36:01 -07:00
474b0ac5ad environment 2026-02-07 13:34:47 -07:00
01f0524153 only gitea apply kube
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-02-07 13:32:27 -07:00
109d31e210 Update tv-computer.nix 2026-01-31 09:33:19 -07:00
36bafd2602 Update tv-computer.nix 2026-01-31 09:32:49 -07:00
c9646e20ae Update tv-computer.nix 2026-01-31 09:32:15 -07:00
438c4dcb2d test 2026-01-31 09:31:02 -07:00
a29d1d15a6 Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-01-31 09:30:29 -07:00
62158a3cdb test 2026-01-31 09:30:08 -07:00
4fe32a1600 Update tv-computer.nix 2026-01-31 09:27:40 -07:00
6d638e6fed trying again 2026-01-27 22:57:38 -07:00
8aab928228 trying again 2026-01-27 22:57:20 -07:00
af23a4089a trying again 2026-01-27 22:55:26 -07:00
1c3e28612f trying again 2026-01-27 22:54:30 -07:00
8948970733 restore esential github workflows
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
Libation / sync-audiobooks (push) Failing after 0s
Manage Jellyfin Playlists / run-python (push) Failing after 0s
ZFS Backup / update-infrastructure (push) Successful in 1m10s
2026-01-27 22:51:11 -07:00
2596129600 more stuff 2026-01-27 22:49:25 -07:00
13b2351075 more stuff 2026-01-27 22:46:17 -07:00
565572c869 more stuff
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-01-27 22:44:40 -07:00
dcd8a8590d more stuff 2026-01-27 22:42:43 -07:00
7575c9c974 more stuff 2026-01-27 22:40:07 -07:00
90df48ccee more stuff 2026-01-27 22:38:09 -07:00
6b516697e2 more stuff 2026-01-27 22:37:01 -07:00
e7c403e35c labels
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-01-27 22:35:41 -07:00
b6a6a7ebe1 labels 2026-01-27 22:34:16 -07:00
ada066dbec host stuff i guess
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Has been cancelled
Apply Kuberentes Configs / update-infrastructure (push) Has been cancelled
2026-01-27 22:31:53 -07:00
758371ff22 updates 2026-01-27 22:28:54 -07:00
05e2c6f95d updates
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
2026-01-27 22:24:40 -07:00
05c5d1a1e5 host
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Has been cancelled
2026-01-27 21:48:40 -07:00
b8ac104002 updates to paths
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
2026-01-27 21:46:44 -07:00
bc5921c3a1 url
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
2026-01-27 21:41:33 -07:00
db4ca56cae user stuff 2026-01-27 21:34:34 -07:00
859f2f653e labels 2026-01-27 21:29:30 -07:00
e35c43027a labels 2026-01-27 21:28:55 -07:00
776e3dcc3b labels 2026-01-27 21:24:48 -07:00
22da6ac0c3 token 2026-01-27 21:24:20 -07:00
1337ce06ab adding gitea runner 2026-01-27 21:22:42 -07:00
726edef18b adding gitea runner 2026-01-27 21:21:42 -07:00
cefe7caa1c adding gitea runner 2026-01-27 21:17:34 -07:00
fb0376e0b9 home videos
Some checks failed
Apply Kuberentes Configs / update-repo (push) Has been cancelled
Apply Kuberentes Configs / update-infrastructure (push) Has been cancelled
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
2026-01-24 15:36:48 -07:00
5a7679d53d gitea with a runner 2026-01-24 15:35:39 -07:00
653200201f updates 2026-01-24 14:54:10 -07:00
a8668c325d kubectl 2026-01-24 14:22:55 -07:00
eb6c9e7b10 firewall chanegs, kube changes 2026-01-24 14:18:13 -07:00
adc40a0ab3 url change 2026-01-24 13:07:27 -07:00
4ebc3e93c2 Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-01-24 12:47:43 -07:00
20358f4e3b updates 2026-01-24 12:47:39 -07:00
d58661ebd4 trying 2026-01-23 14:34:13 -07:00
fdd7420fdb Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-01-22 14:10:58 -07:00
929d32724f lfs 2026-01-22 14:07:56 -07:00
4d49f57aa2 amd 2026-01-16 14:50:23 -07:00
86bf7971b2 updates 2026-01-10 16:20:16 -07:00
dadabdb1bb adb stuff 2026-01-10 14:44:40 -07:00
293ec63b75 testing 2026-01-08 10:05:48 -07:00
88c1b9eb68 ghostty configs 2026-01-07 20:10:20 -07:00
f8f793fea3 ghostty home 2026-01-07 19:57:58 -07:00
7766fd10b9 zen stuff 2026-01-07 15:03:37 -07:00
409074f3bf k3s 2026-01-07 11:56:57 -07:00
ec0b25779f ghostty 2026-01-07 11:49:25 -07:00
096d8c7a3e updates 2026-01-07 09:17:19 -07:00
909c72a310 ghostty config 2026-01-06 14:25:41 -07:00
ee632f7ea9 updates 2026-01-06 12:32:17 -07:00
b527582b9d zip 2026-01-05 22:53:52 -07:00
7e5ff0be42 yubal 2026-01-05 19:51:45 -07:00
8019972d36 no metatube 2026-01-05 19:50:29 -07:00
2bfe7ddbc2 Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-01-05 19:43:36 -07:00
ccad019fdc metatube 2026-01-05 19:43:31 -07:00
7d901d47da updates 2026-01-05 16:04:41 -07:00
2dd792206b nexusmods 2026-01-04 18:44:05 -07:00
7afbdaa5d9 fwupd 2026-01-04 16:50:04 -07:00
9c13eaf3b3 Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-01-03 19:34:33 -07:00
d12f4f87f8 updates 2026-01-03 19:34:23 -07:00
64fd6707d5 updates 2026-01-03 12:59:00 -07:00
661d781e78 updated note 2026-01-02 17:54:22 -07:00
0b798efb68 working helm config 2026-01-02 17:50:38 -07:00
97ac6d224b refactoring proxy ingress to use endpointslice 2026-01-02 16:29:49 -07:00
dae82f8971 helm ingress noted 2026-01-02 16:23:03 -07:00
84340e86cd send to jellyfin 2026-01-02 16:19:36 -07:00
bd04e3a2d1 send to jellyfin 2026-01-02 16:18:22 -07:00
b765566f94 helm ingress noted 2026-01-02 16:13:45 -07:00
34d9be2c20 helm stuff 2026-01-02 16:03:20 -07:00
52718cc43b helm stuff 2026-01-02 16:02:05 -07:00
b882fe4a20 helm stuff 2026-01-02 16:00:31 -07:00
3b8e6410ef copilot ingress 2026-01-02 15:36:40 -07:00
e5d7725ced ingressclass 2026-01-02 14:34:19 -07:00
bc803bd624 paths 2026-01-02 14:31:32 -07:00
de71f8ec2a env 2026-01-02 14:09:50 -07:00
5197568e43 amature mispell 2026-01-02 14:06:18 -07:00
6c88dd243d pathing 2026-01-02 14:05:29 -07:00
00ffb6dfbc zfs 2026-01-02 14:03:02 -07:00
d29c5edf47 kubeclt for runner 2026-01-02 14:00:48 -07:00
094aa7efd2 changes 2026-01-02 13:57:00 -07:00
e0093b0e53 changes 2025-12-31 10:02:57 -07:00
2ab9f380ae vars 2025-12-19 16:23:16 -07:00
385a18445b ovmf 2025-12-19 16:20:03 -07:00
508e1c8a11 ovmf 2025-12-19 16:19:19 -07:00
1d8d287a1e ovmf 2025-12-19 16:09:03 -07:00
c9ecf78f73 Merge branch 'main' of github.com:alexmickelson/infrastructure 2025-12-19 15:46:14 -07:00
a5855d61c9 bluetui 2025-12-19 15:46:08 -07:00
70 changed files with 2695 additions and 1788 deletions

View File

@@ -0,0 +1,84 @@
name: Apply Kuberentes Configs
on: [push, workflow_dispatch]
jobs:
update-repo:
uses: ./.gitea/workflows/update-repo.yml
runs-on: home-server
update-infrastructure:
runs-on: home-server
needs: update-repo-folder
env:
KUBECONFIG: /home/gitea-runner/.kube/config
defaults:
run:
working-directory: /home/gitea-runner/infrastructure
steps:
- name: update home server containers
run: |
kubectl apply -f kubernetes/proxy-ingress
kubectl annotate ingressclass nginx \
ingressclass.kubernetes.io/is-default-class="true" --overwrite
- name: audiobookshelf
run: |
kubectl apply -f kubernetes/audiobookshelf/
- name: home assistant
run: |
kubectl apply -f kubernetes/homeassistant/
- name: copilot
run: |
kubectl create secret generic copilot-secret \
-n copilot \
--from-literal=token=${{ secrets.COPILOT_SECRET }} \
--dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f kubernetes/copilot/
- name: jellyfin
run: |
kubectl apply -f kubernetes/jellyfin/
- name: minecraft
run: |
kubectl apply -f kubernetes/minecraft/
- name: homepage
run: |
kubectl apply -f kubernetes/homepage/
kubectl rollout restart deployment/homepage -n homepage
- name: gitea
env:
CLOUDFLARED_GITEA_TOKEN: ${{ secrets.CLOUDFLARED_GITEA_TOKEN }}
run: |
kubectl apply -f kubernetes/gitea/namespace.yml
kubectl create configmap gitea-landing-page \
-n gitea \
--from-file=home.tmpl=kubernetes/gitea/landingpage.html \
--from-file=custom-landing.css=kubernetes/gitea/landingpage.css \
--from-file=custom-landing.js=kubernetes/gitea/landingpage.js \
--dry-run=client -o yaml | kubectl apply -f -
for file in kubernetes/gitea/*.yml; do
cat "$file" | envsubst | kubectl apply -f -
done
kubectl rollout restart deployment/gitea-web -n gitea
notify-on-failure:
runs-on: home-server
needs: update-infrastructure
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Kubernetes Apply Failed"
message: |
Failed to apply kubernetes configs
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,kubernetes"

View File

@@ -0,0 +1,41 @@
name: ZFS Backup
on:
schedule:
- cron: 0 1 * * *
workflow_dispatch:
jobs:
update-infrastructure:
runs-on: [home-server]
steps:
- name: run syncoid
run: |
zpool status
echo ""
zfs list
echo ""
syncoid \
--recursive \
--no-privilege-elevation \
data-ssd/data \
backup/data
syncoid \
--recursive \
--no-privilege-elevation \
data-ssd/media \
backup/media
notify-on-failure:
runs-on: home-server
needs: update-infrastructure
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "ZFS Backup Failed"
message: |
Failed to backup ZFS datasets
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,backup"

View File

@@ -0,0 +1,31 @@
name: Cleanup Docker
on:
schedule:
- cron: '0 3 1,15 * *' # 1st and 15th of every month at 3am
workflow_dispatch:
jobs:
cleanup-docker:
runs-on: [home-server]
steps:
- name: Cleanup Docker resources
run: |
echo ""
echo "Removing unused images..."
docker image prune -a -f --filter "until=336h"
notify-on-failure:
runs-on: home-server
needs: cleanup-docker
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Docker Cleanup Failed"
message: |
Failed to cleanup Docker resources
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,docker"

View File

@@ -0,0 +1,29 @@
name: Cleanup NixOS Generations
on:
schedule:
- cron: '0 2 * * 0' # Every Sunday at 2am
workflow_dispatch:
jobs:
cleanup-generations:
runs-on: [home-server]
steps:
- name: Cleanup old NixOS generations
run: |
echo "Deleting generations older than 7 days..."
sudo nix-collect-garbage --delete-older-than 7d
notify-on-failure:
runs-on: home-server
needs: cleanup-generations
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "NixOS Cleanup Failed"
message: |
Failed to cleanup old NixOS generations
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,nixos"

View File

@@ -0,0 +1,35 @@
name: Libation
on:
schedule:
- cron: '0 6,12,18,0 * * *'
workflow_dispatch:
jobs:
update-repo:
uses: ./.gitea/workflows/update-repo.yml
runs-on: home-server
sync-audiobooks:
runs-on: [home-server]
steps:
- name: Run Libation sync
working-directory: /home/gitea-runner/infrastructure/home-server/libation
run: |
echo "Starting Libation audiobook sync at $(date)"
docker compose pull -q
docker compose run --rm libation
echo "Libation sync completed at $(date)"
notify-on-failure:
runs-on: home-server
needs: sync-audiobooks
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Libation Sync Failed"
message: |
Failed to sync audiobooks with Libation
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,audiobooks"

View File

@@ -0,0 +1,29 @@
name: deploy minecraft
on: [workflow_dispatch]
jobs:
minecraft:
runs-on: home-server
env:
KUBECONFIG: /home/gitea-runner/.kube/config
defaults:
run:
working-directory: /home/gitea-runner/infrastructure
steps:
- name: checkout repo
working-directory: /home/gitea-runner
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }}.git
fi
- name: deploy minecraft
env:
CF_API_KEY: ${{ secrets.CF_API_KEY }}
run: |
for file in kubernetes/minecraft/*.yml; do
cat "$file" | envsubst | kubectl apply -f -
done

View File

@@ -0,0 +1,51 @@
name: Notify NTFY
on:
workflow_dispatch:
inputs:
title:
required: true
type: string
message:
required: true
type: string
priority:
required: false
type: string
default: "default"
tags:
required: false
type: string
default: "warning"
action_url:
required: false
type: string
default: ""
jobs:
send-notification:
runs-on: [home-server]
env:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
steps:
- name: Send ntfy notification
working-directory: /home/gitea-runner
run: |
set -e
if [ -n "${{ inputs.action_url }}" ]; then
cat <<EOF | curl -f -H "Title: ${{ inputs.title }}" \
-H "Priority: ${{ inputs.priority }}" \
-H "Tags: ${{ inputs.tags }}" \
-H "Actions: view, View Logs, ${{ inputs.action_url }}" \
--data-binary "@-" \
"https://ntfy.sh/$NTFY_CHANNEL"
${{ inputs.message }}
EOF
else
cat <<EOF | curl -f -H "Title: ${{ inputs.title }}" \
-H "Priority: ${{ inputs.priority }}" \
-H "Tags: ${{ inputs.tags }}" \
--data-binary "@-" \
"https://ntfy.sh/$NTFY_CHANNEL"
${{ inputs.message }}
EOF
fi

View File

@@ -0,0 +1,45 @@
name: Manage Jellyfin Playlists
on:
workflow_dispatch:
schedule:
- cron: "0 * * * *"
jobs:
update-repo:
uses: ./.gitea/workflows/update-repo.yml
runs-on: home-server
run-python:
runs-on: home-server
steps:
- name: Run Python script
env:
JELLYFIN_USER: ${{ secrets.JELLYFIN_USER }}
JELLYFIN_PASSWORD: ${{ secrets.JELLYFIN_PASSWORD }}
working-directory: /home/gitea-runner/infrastructure
run: |
echo "$JELLYFIN_USER $JELLYFIN_PASSWORD" > /home/gitea-runner/jellyfin_credentials.txt
docker build -t jellyfin_management -f jellyfin/Dockerfile .
docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \
-m jellyfin.update_all_songs_playlist
docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \
-m jellyfin.update_unindexed
notify-on-failure:
runs-on: home-server
needs: run-python
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Jellyfin Playlist Update Failed"
message: |
Failed to update Jellyfin playlists
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,jellyfin"

View File

@@ -0,0 +1,18 @@
name: Update Repository
on:
workflow_call:
jobs:
update-repo-folder:
runs-on: [home-server]
steps:
- name: checkout repo
working-directory: /home/gitea-runner
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }}.git
fi

View File

@@ -1,46 +0,0 @@
name: ZFS Backup
on:
schedule:
- cron: 0 1 * * *
workflow_dispatch:
jobs:
update-infrastructure:
runs-on: [self-hosted, home-server]
steps:
- name: run syncoid
run: |
zpool status
echo ""
zfs list
echo ""
syncoid \
--recursive \
--no-privilege-elevation \
data-ssd/data \
backup/data
syncoid \
--recursive \
--no-privilege-elevation \
data-ssd/media \
backup/media
# steps:
# - name: run syncoid
# run: |
# zpool status
# echo ""
# zfs list
# echo ""
# syncoid \
# --recursive \
# --no-privilege-elevation \
# --no-rollback \
# data-ssd/data \
# backup/data
# syncoid \
# --recursive \
# --no-privilege-elevation \
# --no-rollback \
# data-ssd/media \
# backup/media

View File

@@ -1,23 +0,0 @@
name: Beets
on:
schedule:
# Run 4 times a day: 6am, 12pm, 6pm, 12am UTC
- cron: '0 6,12,18,0 * * *'
workflow_dispatch: # Allow manual trigger
jobs:
sync-beets:
runs-on: [home-server]
steps:
- name: Run Beets sync
working-directory: /home/github/infrastructure/infrastructure/home-server/beets
run: |
git pull
docker compose pull -q
docker compose up -d
docker compose restart
sleep 2
docker exec -u 1000 beets bash -c 'beet -v import -i -q /managed/*' || true
# Clean up empty directories after import (but not /managed itself)
docker exec -u 1000 beets bash -c 'find /managed -mindepth 1 -type d -empty -delete' || true
echo "Beets sync completed"

View File

@@ -1,18 +0,0 @@
name: Libation
on:
schedule:
# Run 4 times a day: 6am, 12pm, 6pm, 12am UTC
- cron: '0 6,12,18,0 * * *'
workflow_dispatch: # Allow manual trigger
jobs:
sync-audiobooks:
runs-on: [home-server]
steps:
- name: Run Libation sync
working-directory: /home/github/infrastructure/infrastructure/home-server/libation
run: |
echo "Starting Libation audiobook sync at $(date)"
docker compose pull -q
docker compose run --rm libation
echo "Libation sync completed at $(date)"

View File

@@ -1,37 +0,0 @@
name: Manage Jellyfin Playlists
on:
workflow_dispatch:
schedule:
- cron: '0 * * * *'
jobs:
run-python:
runs-on: [self-hosted, home-server]
steps:
- name: checkout repo
working-directory: /home/github/infrastructure
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
fi
- name: Run Python script
env:
JELLYFIN_USER: ${{ secrets.JELLYFIN_USER }}
JELLYFIN_PASSWORD: ${{ secrets.JELLYFIN_PASSWORD }}
working-directory: /home/github/infrastructure/infrastructure
run: |
docker build -t jellyfin_management -f jellyfin/Dockerfile .
docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \
-m jellyfin.update_all_songs_playlist
docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \
-m jellyfin.update_unindexed

View File

@@ -1,6 +1,10 @@
![home server update](https://github.com/alexmickelson/infrastructure/actions/workflows/update-home-server.yml/badge.svg)
[![ZFS Backup](https://github.com/alexmickelson/infrastructure/actions/workflows/backup-zfs.yml/badge.svg)](https://github.com/alexmickelson/infrastructure/actions/workflows/backup-zfs.yml)
[![Manage Jellyfin Playlists](https://github.com/alexmickelson/infrastructure/actions/workflows/update-playlist.yml/badge.svg)](https://github.com/alexmickelson/infrastructure/actions/workflows/update-playlist.yml)

1
gitea/.gitignore vendored
View File

@@ -1 +0,0 @@
data/

View File

@@ -1,98 +0,0 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: host
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@@ -1,47 +0,0 @@
services:
server:
image: gitea/gitea:1.22.2
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
restart: always
volumes:
- ./data/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 0.0.0.0:3000:3000
- 0.0.0.0:222:22
depends_on:
- db
db:
image: postgres:14
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
volumes:
- ./data/postgres:/var/lib/postgresql/data
runner:
image: gitea/act_runner:nightly
environment:
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: http://0.0.0.0:3000/
GITEA_RUNNER_REGISTRATION_TOKEN: SMANpMfJk5G4fTFmuEZ9zleTBcdrj4M3k3eDCW6e
GITEA_RUNNER_NAME: test-runner
GITEA_RUNNER_LABELS: label1
network_mode: host
volumes:
- ./config.yaml:/config.yaml
- ./data/runner:/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- server

View File

@@ -1,22 +1,23 @@
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
user: 1000:1000
network_mode: "host"
volumes:
- /data/jellyfin/config:/config
- /data/jellyfin/cache:/cache
- /data/media/music/tagged:/music
- /data/media/movies:/movies
- /data/media/tvshows:/tvshows
restart: "unless-stopped"
group_add:
- "303" # getent group render | cut -d: -f3
devices:
- /dev/dri/renderD128:/dev/dri/renderD128
environment:
- JELLYFIN_PublishedServerUrl=https://jellyfin.alexmickelson.guru
# jellyfin:
# image: jellyfin/jellyfin
# container_name: jellyfin
# user: 1000:1000
# network_mode: "host"
# volumes:
# - /data/jellyfin/config:/config
# - /data/jellyfin/cache:/cache
# - /data/media/music/tagged:/music
# - /data/media/movies:/movies
# - /data/media/tvshows:/tvshows
# - /data/nextcloud/html/data/alex/files/Documents/home-video:/home-videos:ro
# restart: "unless-stopped"
# group_add:
# - "303" # getent group render | cut -d: -f3
# devices:
# - /dev/dri/renderD128:/dev/dri/renderD128
# environment:
# - JELLYFIN_PublishedServerUrl=https://jellyfin.alexmickelson.guru
nextcloud:
build:
@@ -101,43 +102,6 @@ services:
restart: always
network_mode: host
zwave-js-ui:
container_name: zwave-js-ui
image: zwavejs/zwave-js-ui:latest
restart: always
tty: true
stop_signal: SIGINT
environment:
- SESSION_SECRET=iqpwoeinf9384bw3p48gbwer
- TZ=America/Denver
devices:
# Do not use /dev/ttyUSBX serial devices, as those mappings can change over time.
# Instead, use the /dev/serial/by-id/X serial device for your Z-Wave stick.
# - '/dev/serial/by-id/insert_stick_reference_here:/dev/zwave'
- /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0:/dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0
- /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0:/dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0
volumes:
- /data/zwave:/usr/src/app/store
ports:
- '3050:8091'
- '3051:3051'
music-assistant-server:
image: ghcr.io/music-assistant/server:2
container_name: music-assistant-server
restart: unless-stopped
network_mode: host
volumes:
- /data/music-assistant-server/data:/data/
# cap_add:
# - SYS_ADMIN
# - DAC_READ_SEARCH
# security_opt:
# - apparmor:unconfined
environment:
- LOG_LEVEL=info
prometheus:
image: public.ecr.aws/bitnami/prometheus:2
container_name: prometheus
@@ -172,71 +136,6 @@ services:
ports:
- 3000:3000
# acpupsd_exporter:
# image: sfudeus/apcupsd_exporter:master_1.19
# container_name: apcupsd_exporter
# restart: always
# extra_hosts:
# - host.docker.internal:host-gateway
# command: -apcupsd.addr host.docker.internal:3551
# ports:
# - 0.0.0.0:9162:9162
# docker run -it --rm -p 9162:9162 --net=host sfudeus/apcupsd_exporter:master_1.19
reverse-proxy:
image: ghcr.io/linuxserver/swag
container_name: reverse-proxy
restart: unless-stopped
cap_add:
- NET_ADMIN
environment:
- PUID=1000
- PGID=1000
- TZ=America/Denver
- URL=alexmickelson.guru
- SUBDOMAINS=wildcard
- VALIDATION=dns
- DNSPLUGIN=cloudflare
volumes:
- ./nginx.conf:/config/nginx/site-confs/default.conf
- /data/swag:/config
- /data/cloudflare/cloudflare.ini:/config/dns-conf/cloudflare.ini
ports:
- 0.0.0.0:80:80
- 0.0.0.0:443:443
# - 0.0.0.0:7080:80
# - 0.0.0.0:7443:443
extra_hosts:
- host.docker.internal:host-gateway
networks:
- proxy
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:latest
restart: unless-stopped
ports:
- 13378:80
volumes:
- /data/media/audiobooks:/audiobooks
- /data/media/audiobooks-libation:/audiobooks-libation
# - </path/to/podcasts>:/podcasts
- /data/audiobookshelf/config:/config
- /data/audiobookshelf/metadata:/metadata
networks:
- proxy
copilot-api:
image: node:latest
working_dir: /app
command: sh -c "npm cache clean --force && npx copilot-api@latest start --github-token $COPILOT_TOKEN --port 4444"
environment:
- COPILOT_TOKEN=${COPILOT_TOKEN}
ports:
- "4444:4444"
restart: unless-stopped
networks:
- proxy
esphome:
container_name: esphome
@@ -262,6 +161,7 @@ services:
environment:
- SEARXNG_BASE_URL=http://server.alexmickelson.guru:4446/
restart: unless-stopped
networks:
proxy:
name: proxy

View File

@@ -1,5 +1,3 @@
version: "3.8"
services:
libation:
image: rmcrackan/libation:latest

View File

@@ -6,24 +6,24 @@ server {
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name ha.alexmickelson.guru;
include /config/nginx/ssl.conf;
include /config/nginx/proxy.conf;
include /config/nginx/resolver.conf;
# server {
# listen 443 ssl;
# listen [::]:443 ssl;
# server_name ha.alexmickelson.guru;
# include /config/nginx/ssl.conf;
# include /config/nginx/proxy.conf;
# include /config/nginx/resolver.conf;
location / {
proxy_pass http://host.docker.internal:8123;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
# location / {
# proxy_pass http://host.docker.internal:8123;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_http_version 1.1;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection $connection_upgrade;
# }
# }
server {
listen 443 ssl;
@@ -55,18 +55,6 @@ server {
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name plex.alexmickelson.guru;
location / {
proxy_pass http://host.docker.internal:32400;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
@@ -106,23 +94,23 @@ server {
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name audiobook.alexmickelson.guru;
# server {
# listen 443 ssl;
# listen [::]:443 ssl;
# server_name audiobook.alexmickelson.guru;
location / {
proxy_pass http://audiobookshelf:80;
# location / {
# proxy_pass http://audiobookshelf:80;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header Host $host;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
}
}
# proxy_http_version 1.1;
# }
# }
# server {
# listen 443 ssl;
@@ -188,24 +176,24 @@ server {
proxy_pass http://immich_server:2283;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name sound.alexmickelson.guru;
location / {
proxy_pass http://host.docker.internal:8095;
# server {
# listen 443 ssl;
# listen [::]:443 ssl;
# server_name sound.alexmickelson.guru;
# location / {
# proxy_pass http://host.docker.internal:8095;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header X-Forwarded-Protocol $scheme;
# proxy_set_header X-Forwarded-Host $http_host;
# proxy_set_header Host $host;
# proxy_set_header Upgrade $http_upgrade;
# proxy_set_header Connection "upgrade";
# }
# }
server {
listen 443 ssl;

View File

@@ -0,0 +1,95 @@
apiVersion: v1
kind: Namespace
metadata:
name: audiobookshelf
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
namespace: audiobookshelf
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: audiobookshelf
template:
metadata:
labels:
app: audiobookshelf
spec:
containers:
- name: audiobookshelf
image: ghcr.io/advplyr/audiobookshelf:latest
imagePullPolicy: Always
ports:
- containerPort: 80
hostPort: 13378
volumeMounts:
- name: audiobooks
mountPath: /audiobooks
- name: audiobooks-libation
mountPath: /audiobooks-libation
- name: config
mountPath: /config
- name: metadata
mountPath: /metadata
volumes:
- name: audiobooks
hostPath:
path: /data/media/audiobooks
type: DirectoryOrCreate
- name: audiobooks-libation
hostPath:
path: /data/media/audiobooks-libation
type: DirectoryOrCreate
- name: config
hostPath:
path: /data/audiobookshelf/config
type: DirectoryOrCreate
- name: metadata
hostPath:
path: /data/audiobookshelf/metadata
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
namespace: audiobookshelf
spec:
selector:
app: audiobookshelf
ports:
- name: http
protocol: TCP
port: 13378
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: audiobookshelf-ingress
namespace: audiobookshelf
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- audiobook.alexmickelson.guru
secretName: audiobookshelf-tls-cert
rules:
- host: audiobook.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: audiobookshelf
port:
number: 13378
---

View File

@@ -0,0 +1,77 @@
apiVersion: v1
kind: Namespace
metadata:
name: copilot
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: copilot-api
namespace: copilot
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: copilot-api
template:
metadata:
labels:
app: copilot-api
spec:
containers:
- name: copilot-api
image: node:latest
imagePullPolicy: Always
workingDir: /app
command: ["sh", "-c"]
args: ["npm cache clean --force && npx copilot-api@latest start --github-token $COPILOT_TOKEN --port 4444"]
env:
- name: COPILOT_TOKEN
valueFrom:
secretKeyRef:
name: copilot-secret
key: token
ports:
- containerPort: 4444
---
apiVersion: v1
kind: Service
metadata:
name: copilot-api
namespace: copilot
spec:
selector:
app: copilot-api
ports:
- name: http
protocol: TCP
port: 4444
targetPort: 4444
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: copilot-api-ingress
namespace: copilot
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- copilot.alexmickelson.guru
secretName: copilot-api-tls-cert
rules:
- host: copilot.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: copilot-api
port:
number: 4444
---

View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: Secret
metadata:
name: cloudflared-gitea-token
namespace: gitea
type: Opaque
stringData:
token: $CLOUDFLARED_GITEA_TOKEN
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cloudflared-gitea
namespace: gitea
spec:
replicas: 1
selector:
matchLabels:
app: cloudflared-gitea
template:
metadata:
labels:
app: cloudflared-gitea
spec:
containers:
- name: cloudflared
image: cloudflare/cloudflared:latest
imagePullPolicy: Always
args:
- tunnel
- run
env:
- name: TUNNEL_TOKEN
valueFrom:
secretKeyRef:
name: cloudflared-gitea-token
key: token

View File

@@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: projects
namespace: gitea
name: gitea-db
spec:
replicas: 1
@@ -38,7 +38,7 @@ apiVersion: v1
kind: Service
metadata:
name: gitea-db-svc
namespace: projects
namespace: gitea
labels:
app: gitea-db
spec:

View File

@@ -0,0 +1,358 @@
:root {
--color-bg-page: #0d1117;
--color-bg-card: #161b22;
--color-bg-card-hover: #1c2128;
--color-border: #21262d;
--color-border-muted: #30363d;
--color-text: #e6edf3;
--color-text-muted: #8b949e;
--color-text-subtle: #6e7681;
--color-text-faint: #484f58;
--color-accent: #58a6ff;
--color-accent-subtle: #58a6ff11;
--color-accent-shadow: #58a6ff1a;
--color-success: #238636;
--color-success-hover: #2ea043;
--color-white: #fff;
/* Spacing */
--space-xl: 80px;
--space-lg: 24px;
--space-md: 20px;
--space-sm: 16px;
--space-xs: 12px;
--space-2xs: 10px;
/* Border radius */
--radius-lg: 12px;
--radius-md: 8px;
--radius-sm: 4px;
/* Font sizes */
--text-hero: 3rem;
--text-heading: 1.5rem;
--text-btn: 0.95rem;
--text-base: 0.875rem;
--text-sm: 0.8rem;
--text-xs: 0.75rem;
}
/* override gitea defaults */
.page-content > :first-child:not(.secondary-nav) {
margin-top: 0 !important;
margin: 0 !important;
}
#alex-landing {
min-height: 100vh;
background: var(--color-bg-page);
color: var(--color-text);
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
}
/* Hero */
.hero {
display: flex;
justify-content: center;
align-items: center;
padding-top: var(--space-xl);
padding-right: var(--space-lg);
padding-bottom: 60px;
padding-left: var(--space-lg);
text-align: center;
}
.hero-inner {
max-width: 640px;
}
.hero h1 {
font-size: var(--text-hero);
font-weight: 800;
margin: 0 0 var(--space-xs);
background: linear-gradient(135deg, var(--color-text) 0%, var(--color-accent) 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
/* Projects section */
.projects-section {
max-width: 1100px;
margin: 0 auto;
padding: 0 var(--space-lg) var(--space-xl);
}
.section-header {
margin-bottom: var(--space-lg);
display: flex;
align-items: baseline;
gap: var(--space-xs);
flex-wrap: wrap;
}
.section-header h2 {
font-size: var(--text-heading);
font-weight: 700;
margin: 0;
color: var(--color-text);
}
/* Grid */
.repo-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(320px, 1fr));
gap: var(--space-sm);
}
/* Skeleton loaders */
.skeleton-card {
height: 160px;
border-radius: var(--radius-lg);
background: linear-gradient(90deg, var(--color-bg-card) 25%, var(--color-border) 50%, var(--color-bg-card) 75%);
background-size: 200% 100%;
animation: shimmer 1.4s infinite;
}
@keyframes shimmer {
0% { background-position: 200% 0; }
100% { background-position: -200% 0; }
}
/* Repo cards */
.repo-card {
background: var(--color-bg-card);
border: 1px solid var(--color-border);
border-radius: var(--radius-lg);
padding: var(--space-md);
text-decoration: none;
color: inherit;
display: flex;
flex-direction: column;
gap: var(--space-2xs);
transition: border-color 0.2s, transform 0.2s, box-shadow 0.2s;
cursor: pointer;
}
.repo-card:hover {
border-color: var(--color-accent);
transform: translateY(-2px);
box-shadow: 0 8px 24px var(--color-accent-shadow);
}
.repo-card-header {
display: flex;
align-items: center;
gap: var(--space-2xs);
}
.repo-icon {
font-size: 1.1rem;
flex-shrink: 0;
}
.repo-name {
font-size: 1rem;
font-weight: 600;
color: var(--color-accent);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.repo-private {
font-size: 0.7rem;
background: var(--color-border);
border: 1px solid var(--color-border-muted);
border-radius: var(--radius-sm);
padding: 1px 6px;
color: var(--color-text-muted);
flex-shrink: 0;
}
.repo-desc {
font-size: var(--text-base);
color: var(--color-text-muted);
line-height: 1.5;
flex: 1;
overflow: hidden;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
}
.repo-commit {
font-size: var(--text-sm);
color: var(--color-text-subtle);
border-top: 1px solid var(--color-border);
padding-top: var(--space-2xs);
display: flex;
align-items: center;
gap: 6px;
overflow: hidden;
}
.commit-dot {
width: 6px;
height: 6px;
border-radius: 50%;
background: var(--color-success);
flex-shrink: 0;
}
.commit-msg {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
flex: 1;
}
.commit-time {
color: var(--color-text-faint);
flex-shrink: 0;
}
.repo-meta {
display: flex;
gap: 14px;
font-size: var(--text-sm);
color: var(--color-text-subtle);
}
.repo-meta span { display: flex; align-items: center; gap: 4px; }
/* Error state */
.error-msg {
grid-column: 1 / -1;
text-align: center;
padding: 40px;
color: var(--color-text-muted);
font-size: var(--text-btn);
}
/* Activity section */
.activity-section {
max-width: 1100px;
margin: 0 auto;
padding: 0 var(--space-lg) var(--space-xl);
}
.view-all-link {
font-size: 0.85rem;
color: var(--color-accent);
text-decoration: none;
margin-left: auto;
}
.view-all-link:hover { text-decoration: underline; }
.activity-feed {
display: flex;
flex-direction: column;
gap: 0;
border: 1px solid var(--color-border);
border-radius: var(--radius-lg);
overflow: hidden;
}
.skeleton-activity {
height: 52px;
background: linear-gradient(90deg, var(--color-bg-card) 25%, var(--color-border) 50%, var(--color-bg-card) 75%);
background-size: 200% 100%;
animation: shimmer 1.4s infinite;
border-top: 1px solid var(--color-bg-page);
}
.skeleton-activity:first-child { border-top: none; }
.activity-item {
display: flex;
align-items: flex-start;
gap: var(--space-xs);
padding: var(--space-sm) var(--space-sm);
background: var(--color-bg-card);
border-top: 1px solid var(--color-border);
font-size: var(--text-base);
transition: background 0.15s;
}
.activity-item:first-child { border-top: none; }
.activity-item:hover { background: var(--color-bg-card-hover); }
.activity-op-icon {
flex-shrink: 0;
width: 28px;
height: 28px;
border-radius: 50%;
background: var(--color-border);
display: flex;
align-items: center;
justify-content: center;
font-size: var(--text-sm);
margin-top: 2px;
}
.activity-body { flex: 1; min-width: 0; }
.activity-headline-row {
display: flex;
align-items: baseline;
gap: var(--space-xs);
min-width: 0;
}
.activity-headline {
color: var(--color-text);
line-height: 1.5;
flex: 1;
min-width: 0;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.activity-headline a {
color: var(--color-accent);
text-decoration: none;
font-weight: 500;
}
.activity-headline a:hover { text-decoration: underline; }
.activity-commits {
margin-top: 5px;
display: flex;
flex-direction: column;
gap: 3px;
padding-left: 2px;
}
.activity-commit-line {
display: flex;
align-items: center;
gap: 8px;
font-size: var(--text-sm);
color: var(--color-text-muted);
overflow: hidden;
}
.activity-commit-sha {
font-family: monospace;
font-size: var(--text-xs);
color: var(--color-text-subtle);
background: var(--color-border);
border-radius: var(--radius-sm);
padding: 1px 5px;
flex-shrink: 0;
text-decoration: none;
}
.activity-commit-sha:hover { color: var(--color-accent); text-decoration: none; }
.activity-commit-text {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
color: var(--color-text-muted);
}
.activity-time {
flex-shrink: 0;
font-size: 0.75rem;
color: var(--color-text-faint);
white-space: nowrap;
padding-top: 3px;
}
/* ── Heatmap ─────────────────────────────────────────────── */
.heatmap-section {
max-width: 1100px;
margin: 0 auto;
padding: 0 var(--space-lg) var(--space-xl);
}
.activity-heatmap {
background: var(--color-bg-card);
border: 1px solid var(--color-border);
border-radius: var(--radius-lg);
padding: var(--space-md) var(--space-lg);
overflow-x: auto;
}
.heatmap-svg {
display: block;
}
.heatmap-month {
font-size: 9px;
fill: var(--color-text-muted, #8b949e);
font-family: inherit;
}
.heatmap-day {
font-size: 9px;
fill: var(--color-text-muted, #8b949e);
font-family: inherit;
}

View File

@@ -0,0 +1,51 @@
{{template "base/head" .}}
<div class="page-content home" id="alex-landing">
<section class="hero">
<div class="hero-inner">
<h1>Alex Mickelson</h1>
</div>
</section>
<section class="projects-section">
<div class="section-header">
<h2>Recent Projects</h2>
</div>
<div id="repo-grid" class="repo-grid">
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
</div>
</section>
<section class="heatmap-section">
<div class="section-header">
<h2>Activity</h2>
</div>
<div id="activity-heatmap" class="activity-heatmap"></div>
</section>
<section class="activity-section">
<div class="section-header">
<h2>Recent Activity</h2>
<a href="/alex" class="view-all-link">View full profile →</a>
</div>
<div id="activity-feed" class="activity-feed">
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
</div>
</section>
</div>
<script>
window.GITEA_APP_URL = "{{AppUrl}}";
window.GITEA_SUB_URL = "{{AppSubUrl}}";
</script>
<!-- update version when changed to reset cloudflare cache -->
<script src="{{AppSubUrl}}/assets/js/custom-landing.js?v=9"></script>
<link href="{{AppSubUrl}}/assets/css/custom-landing.css?v=9" rel="stylesheet" />
{{template "base/footer" .}}

View File

@@ -0,0 +1,396 @@
const baseUrl = window.GITEA_SUB_URL || "";
const httpService = {
async fetchRss() {
const resp = await fetch(`${baseUrl}/alex.rss`);
if (!resp.ok) throw new Error(`HTTP ${resp.status}`);
const text = await resp.text();
return new DOMParser().parseFromString(text, "application/xml");
},
async fetchHeatmap(username = "alex") {
const resp = await fetch(`${baseUrl}/api/v1/users/${username}/heatmap`);
if (!resp.ok) throw new Error(`HTTP ${resp.status}`);
return resp.json(); // [{timestamp: unix_seconds, contributions: number}]
},
};
const dataDomain = {
timeAgo(dateStr) {
const diff = (Date.now() - new Date(dateStr)) / 1000;
if (diff < 60) return "just now";
if (diff < 3600) return Math.floor(diff / 60) + "m ago";
if (diff < 86400) return Math.floor(diff / 3600) + "h ago";
if (diff < 2592000) return Math.floor(diff / 86400) + "d ago";
if (diff < 31536000) return Math.floor(diff / 2592000) + "mo ago";
return Math.floor(diff / 31536000) + "y ago";
},
esc(str) {
return (str || "")
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;");
},
safeTitleHtml(rawTitleText) {
const doc = new DOMParser().parseFromString(rawTitleText, "text/html");
doc.body
.querySelectorAll("*:not(a)")
.forEach((el) => el.replaceWith(el.textContent));
return doc.body.innerHTML;
},
titlePlainText(rawTitleText) {
const doc = new DOMParser().parseFromString(rawTitleText, "text/html");
return doc.body.textContent || rawTitleText;
},
activityIcon(titleText) {
const t = titleText.toLowerCase();
if (t.includes("push") || t.includes("commit")) return "📤";
if (t.includes("creat") && t.includes("repo")) return "📁";
if (t.includes("fork")) return "🍴";
if (t.includes("open") && t.includes("issue")) return "🔴";
if (t.includes("clos") && t.includes("issue")) return "🟢";
if (t.includes("pull request") || t.includes("merge")) return "🔀";
if (t.includes("tag")) return "🏷️";
if (t.includes("branch")) return "🌿";
if (t.includes("comment")) return "💬";
if (t.includes("release")) return "🚀";
return "⚡";
},
parseCommits(descriptionText) {
const doc = new DOMParser().parseFromString(descriptionText, "text/html");
return Array.from(doc.querySelectorAll("a")).map((anchor) => {
const sha = anchor.textContent.trim().slice(0, 7);
const href = anchor.getAttribute("href") || "#";
let msg = "";
let node = anchor.nextSibling;
while (node) {
const t = (node.textContent || "").trim();
if (t) {
msg = t;
break;
}
node = node.nextSibling;
}
return { sha, href, msg };
});
},
parseRepos(xmlDoc) {
const items = Array.from(xmlDoc.querySelectorAll("channel > item"));
const seen = new Map();
for (const item of items) {
const titleHtml = item.querySelector("title")?.textContent || "";
const titleDoc = new DOMParser().parseFromString(titleHtml, "text/html");
const anchors = titleDoc.querySelectorAll("a");
if (anchors.length < 2) continue;
const repoAnchor = anchors[anchors.length - 1];
const repoName = repoAnchor.textContent.trim();
if (!repoName || seen.has(repoName)) continue;
seen.set(repoName, {
repoName,
repoUrl: repoAnchor.getAttribute("href") || "#",
shortName: repoName.includes("/")
? repoName.split("/").pop()
: repoName,
pubDate: item.querySelector("pubDate")?.textContent || "",
firstCommit:
dataDomain.parseCommits(
item.querySelector("description")?.textContent || "",
)[0] || null,
});
}
return Array.from(seen.values());
},
parseAllActivityDates(xmlDoc) {
const counts = new Map();
for (const item of Array.from(xmlDoc.querySelectorAll("channel > item"))) {
const pubDate = item.querySelector("pubDate")?.textContent || "";
if (!pubDate) continue;
const d = new Date(pubDate);
if (isNaN(d)) continue;
const key = d.toISOString().slice(0, 10);
counts.set(key, (counts.get(key) || 0) + 1);
}
return counts;
},
parseActivity(xmlDoc, limit = 20) {
return Array.from(xmlDoc.querySelectorAll("channel > item"))
.slice(0, limit)
.map((item) => {
const rawTitle = item.querySelector("title")?.textContent || "";
const titleText = dataDomain.titlePlainText(rawTitle);
return {
titleHtmlSafe: dataDomain.safeTitleHtml(rawTitle),
titleText,
link: item.querySelector("link")?.textContent || "#",
pubDate: item.querySelector("pubDate")?.textContent || "",
icon: dataDomain.activityIcon(titleText),
commits: dataDomain.parseCommits(
item.querySelector("description")?.textContent || "",
).slice(0, 3),
};
});
},
};
const uiRendering = {
async renderRepos(xmlDoc) {
const grid = document.getElementById("repo-grid");
if (!grid) return;
const repos = dataDomain.parseRepos(xmlDoc);
if (repos.length === 0) {
grid.innerHTML = `<div class="error-msg">No repositories found in feed.</div>`;
return;
}
grid.innerHTML = "";
for (const {
shortName,
repoName,
repoUrl,
pubDate,
firstCommit,
} of repos) {
const when = dataDomain.timeAgo(pubDate);
const commitMsg = firstCommit?.msg || firstCommit?.sha || "";
const card = document.createElement("a");
card.className = "repo-card";
card.href = dataDomain.esc(repoUrl);
card.innerHTML = `
<div class="repo-card-header">
<span class="repo-icon">📦</span>
<span class="repo-name">${dataDomain.esc(shortName)}</span>
</div>
<div class="repo-desc">${dataDomain.esc(repoName)}</div>
<div class="repo-commit">
<span class="commit-dot"></span>
<span class="commit-msg">${dataDomain.esc(commitMsg)}</span>
<span class="commit-time">${dataDomain.esc(when)}</span>
</div>
`.trim();
grid.appendChild(card);
}
},
async renderActivity(xmlDoc) {
const feed = document.getElementById("activity-feed");
if (!feed) return;
const items = dataDomain.parseActivity(xmlDoc);
if (items.length === 0) {
feed.innerHTML = `<div class="error-msg">No public activity yet.</div>`;
return;
}
feed.innerHTML = "";
for (const { titleHtmlSafe, icon, pubDate, commits } of items) {
const when = dataDomain.timeAgo(pubDate);
const commitsHtml =
commits.length === 0
? ""
: `<div class="activity-commits">` +
commits
.map(
({ sha, href, msg }) => `
<div class="activity-commit-line">
<a class="activity-commit-sha" href="${dataDomain.esc(href)}">${dataDomain.esc(sha)}</a>
<span class="activity-commit-text">${dataDomain.esc(msg)}</span>
</div>`,
)
.join("") +
`</div>`;
const el = document.createElement("div");
el.className = "activity-item";
el.innerHTML = `
<div class="activity-op-icon">${icon}</div>
<div class="activity-body">
<div class="activity-headline-row">
<div class="activity-headline">${titleHtmlSafe}</div>
<span class="activity-time">${when}</span>
</div>
${commitsHtml}
</div>
`;
feed.appendChild(el);
}
},
async activityMapRender() {
const container = document.getElementById("activity-heatmap");
if (!container) return;
let heatmapData;
try {
heatmapData = await httpService.fetchHeatmap();
} catch (e) {
container.innerHTML = `<div class="error-msg">Could not load heatmap (${e.message})</div>`;
return;
}
// Build counts map from API data
const counts = new Map();
for (const { timestamp, contributions } of heatmapData) {
const d = new Date(timestamp * 1000);
const key = d.toISOString().slice(0, 10);
counts.set(key, (counts.get(key) || 0) + (contributions || 1));
}
const today = new Date();
today.setHours(0, 0, 0, 0);
// Align start to Sunday 52 weeks ago
const startDate = new Date(today);
startDate.setDate(startDate.getDate() - 52 * 7);
startDate.setDate(startDate.getDate() - startDate.getDay());
const cellSize = 11;
const gap = 2;
const step = cellSize + gap;
const cols = 53;
const rows = 7;
const padLeft = 28;
const padTop = 20;
const svgW = padLeft + cols * step;
const svgH = padTop + rows * step;
const LEVELS = ["#2d333b", "#0e4429", "#006d32", "#26a641", "#39d353"];
const countToLevel = (n) =>
n === 0 ? 0 : n === 1 ? 1 : n <= 3 ? 2 : n <= 6 ? 3 : 4;
// Collect month labels (one per column where the month changes)
const monthLabels = new Map();
let lastMonth = -1;
for (let col = 0; col < cols; col++) {
const d = new Date(startDate);
d.setDate(d.getDate() + col * 7);
if (d.getMonth() !== lastMonth) {
lastMonth = d.getMonth();
monthLabels.set(col, d.toLocaleString("default", { month: "short" }));
}
}
const ns = "http://www.w3.org/2000/svg";
const svg = document.createElementNS(ns, "svg");
svg.setAttribute("width", svgW);
svg.setAttribute("height", svgH);
svg.setAttribute("class", "heatmap-svg");
svg.setAttribute("aria-label", "Activity heatmap");
// Month labels
for (const [col, name] of monthLabels) {
const t = document.createElementNS(ns, "text");
t.setAttribute("x", padLeft + col * step);
t.setAttribute("y", 12);
t.setAttribute("class", "heatmap-month");
t.textContent = name;
svg.appendChild(t);
}
// Day-of-week labels (Sun / Tue / Thu / Sat)
["Sun", "", "Tue", "", "Thu", "", "Sat"].forEach((label, i) => {
if (!label) return;
const t = document.createElementNS(ns, "text");
t.setAttribute("x", 0);
t.setAttribute("y", padTop + i * step + cellSize - 2);
t.setAttribute("class", "heatmap-day");
t.textContent = label;
svg.appendChild(t);
});
// Day cells
for (let col = 0; col < cols; col++) {
for (let row = 0; row < rows; row++) {
const d = new Date(startDate);
d.setDate(d.getDate() + col * 7 + row);
if (d > today) continue;
const key = d.toISOString().slice(0, 10);
const count = counts.get(key) || 0;
const rect = document.createElementNS(ns, "rect");
rect.setAttribute("x", padLeft + col * step);
rect.setAttribute("y", padTop + row * step);
rect.setAttribute("width", cellSize);
rect.setAttribute("height", cellSize);
rect.setAttribute("rx", 2);
rect.setAttribute("fill", LEVELS[countToLevel(count)]);
rect.setAttribute("data-date", key);
rect.setAttribute("data-count", count);
const title = document.createElementNS(ns, "title");
title.textContent = count > 0
? `${count} activit${count === 1 ? "y" : "ies"} on ${key}`
: `No activity on ${key}`;
rect.appendChild(title);
svg.appendChild(rect);
}
}
// Legend
const legendY = svgH + 6;
const legendG = document.createElementNS(ns, "g");
const legendLabel = document.createElementNS(ns, "text");
legendLabel.setAttribute("x", padLeft);
legendLabel.setAttribute("y", legendY + cellSize - 2);
legendLabel.setAttribute("class", "heatmap-day");
legendLabel.textContent = "Less";
legendG.appendChild(legendLabel);
LEVELS.forEach((color, i) => {
const r = document.createElementNS(ns, "rect");
r.setAttribute("x", padLeft + 32 + i * step);
r.setAttribute("y", legendY);
r.setAttribute("width", cellSize);
r.setAttribute("height", cellSize);
r.setAttribute("rx", 2);
r.setAttribute("fill", color);
legendG.appendChild(r);
});
const moreLabel = document.createElementNS(ns, "text");
moreLabel.setAttribute("x", padLeft + 32 + LEVELS.length * step + 4);
moreLabel.setAttribute("y", legendY + cellSize - 2);
moreLabel.setAttribute("class", "heatmap-day");
moreLabel.textContent = "More";
legendG.appendChild(moreLabel);
svg.setAttribute("height", svgH + cellSize + 12);
svg.appendChild(legendG);
container.innerHTML = "";
container.appendChild(svg);
},
async render() {
const baseUrl = httpService.baseUrl;
try {
const xmlDoc = await httpService.fetchRss();
await Promise.all([
uiRendering.renderRepos(xmlDoc),
uiRendering.renderActivity(xmlDoc),
uiRendering.activityMapRender(),
]);
} catch (e) {
console.error("Gitea landing: RSS fetch failed", e);
const grid = document.getElementById("repo-grid");
const feed = document.getElementById("activity-feed");
if (grid)
grid.innerHTML = `<div class="error-msg">Could not load feed (${e.message}). <a href="${baseUrl}/explore/repos" style="color:#58a6ff">Browse manually →</a></div>`;
if (feed)
feed.innerHTML = `<div class="error-msg">Could not load activity (${e.message})</div>`;
return;
}
},
};
document.addEventListener("DOMContentLoaded", uiRendering.render);

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: gitea
labels:
name: gitea

View File

@@ -0,0 +1,59 @@
# apiVersion: v1
# kind: Secret
# metadata:
# name: gitea-runner-secret
# namespace: gitea
# type: Opaque
# stringData:
# RUNNER_TOKEN: "<REPLACE_WITH_GITEA_RUNNER_TOKEN>"
# kubectl create secret generic gitea-runner-secret \
# --namespace gitea \
# --from-literal=RUNNER_TOKEN=<REPLACE_WITH_GITEA_RUNNER_TOKEN>
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-actions-runner
namespace: gitea
spec:
replicas: 1
selector:
matchLabels:
app: gitea-actions-runner
template:
metadata:
labels:
app: gitea-actions-runner
spec:
containers:
- name: runner
image: gitea/act_runner:latest
env:
- name: GITEA_INSTANCE_URL
value: "https://git.alexmickelson.guru"
- name: GITEA_RUNNER_REGISTRATION_TOKEN
valueFrom:
secretKeyRef:
name: gitea-runner-secret
key: RUNNER_TOKEN
- name: GITEA_RUNNER_NAME
value: kubernetes-runner
- name: GITEA_RUNNER_LABELS
value: "docker,kubernetes"
- name: DOCKER_HOST
value: "unix:///var/run/docker.sock"
# - name: GITEA_RUNNER_EPHEMERAL
# value: "1"
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
- name: runner-data
mountPath: /data
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
- name: runner-data
emptyDir: {}

View File

@@ -2,7 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea-web
namespace: projects
namespace: gitea
spec:
replicas: 1
selector:
@@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: gitea
image: docker.io/gitea/gitea:1.23
image: docker.io/gitea/gitea:1.25
ports:
- containerPort: 3000
- containerPort: 22
@@ -34,6 +34,33 @@ spec:
value: "gitea"
- name: GITEA__database__PASSWD
value: wauiofnasufnweaiufbsdklfjb23456
- name: GITEA__server__PROTOCOL
value: "http"
- name: GITEA__server__DOMAIN
value: "git.alexmickelson.guru"
- name: GITEA__server__PUBLIC_URL_DETECTION
value: "auto"
- name: GITEA__server__LOCAL_ROOT_URL
value: "http://gitea-web-svc.gitea.svc.cluster.local:3000/"
- name: GITEA__server__SSH_DOMAIN
value: "gitea-gitea-web-svc.beefalo-newton.ts.net"
- name: GITEA__server__SSH_PORT
value: "22"
# security
- name: GITEA__service__ENABLE_BASIC_AUTHENTICATION
value: "false"
- name: GITEA__service__DISABLE_REGISTRATION
value: "true"
- name: GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION
value: "false"
- name: GITEA__openid__ENABLE_OPENID_SIGNIN
value: "false"
- name: GITEA__openid__ENABLE_OPENID_SIGNUP
value: "false"
- name: GITEA__ui__DEFAULT_THEME
value: "gitea-dark"
- name: GITEA__ui__THEMES
value: "gitea-dark"
volumeMounts:
- name: gitea-data
mountPath: /data
@@ -43,6 +70,18 @@ spec:
- name: localtime
mountPath: /etc/localtime
readOnly: true
- name: landing-page
mountPath: /data/gitea/templates/home.tmpl
subPath: home.tmpl
readOnly: true
- name: landing-page
mountPath: /data/gitea/public/assets/css/custom-landing.css
subPath: custom-landing.css
readOnly: true
- name: landing-page
mountPath: /data/gitea/public/assets/js/custom-landing.js
subPath: custom-landing.js
readOnly: true
volumes:
- name: gitea-data
hostPath:
@@ -54,13 +93,16 @@ spec:
- name: localtime
hostPath:
path: /etc/localtime
- name: landing-page
configMap:
name: gitea-landing-page
---
apiVersion: v1
kind: Service
metadata:
name: gitea-web-svc
namespace: projects
namespace: gitea
annotations:
tailscale.com/expose: "true" # exposes IP directly
spec:
@@ -79,17 +121,18 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea
namespace: projects
namespace: gitea
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer # not really working with tailscale
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: tailscale
ingressClassName: nginx
tls:
- hosts:
- gitea.alexmickelson.guru
secretName: gitea-tls-cert
- git.alexmickelson.guru
secretName: git-tls-cert2
rules:
- http:
- host: git.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix

View File

@@ -0,0 +1,97 @@
apiVersion: v1
kind: Namespace
metadata:
name: homeassistant
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homeassistant
namespace: homeassistant
spec:
replicas: 1
selector:
matchLabels:
app: homeassistant
template:
metadata:
labels:
app: homeassistant
spec:
hostNetwork: true
containers:
- name: homeassistant
image: homeassistant/home-assistant:stable
imagePullPolicy: Always
env:
- name: TZ
value: "America/Denver"
- name: OPENAI_BASE_URL
value: "http://openwebui.beefalo-newton.ts.net/v1"
volumeMounts:
- name: config
mountPath: /config
- name: localtime
mountPath: /etc/localtime
readOnly: true
- name: zigbee-dongle
mountPath: /dev/serial/by-id/usb-Itead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_V2_0cad0783fc73ef11b46be21e313510fd-if00-port0
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumes:
- name: config
hostPath:
path: /data/homeAssistant/config
type: Directory
- name: localtime
hostPath:
path: /etc/localtime
type: File
- name: zigbee-dongle
hostPath:
path: /dev/serial/by-id/usb-Itead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_V2_0cad0783fc73ef11b46be21e313510fd-if00-port0
type: CharDevice
---
apiVersion: v1
kind: Service
metadata:
name: home-assistant
namespace: homeassistant
spec:
selector:
app: homeassistant
ports:
- name: http
protocol: TCP
port: 8123
targetPort: 8123
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: home-assistant-ingress
namespace: homeassistant
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- ha.alexmickelson.guru
secretName: ha-tls-cert
rules:
- host: ha.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: home-assistant
port:
number: 8123

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: music-assistant-server
namespace: homeassistant
spec:
replicas: 1
selector:
matchLabels:
app: music-assistant-server
template:
metadata:
labels:
app: music-assistant-server
spec:
hostNetwork: true
containers:
- name: music-assistant-server
image: ghcr.io/music-assistant/server:2
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: "info"
- name: TZ
value: "America/Denver"
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
hostPath:
path: /data/music-assistant-server/data
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: music-assistant
namespace: homeassistant
# annotations:
# tailscale.com/expose: "true"
spec:
selector:
app: music-assistant-server
ports:
- name: http
protocol: TCP
port: 8095
targetPort: 8095
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: music-assistant-ingress
namespace: homeassistant
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- sound.alexmickelson.guru
secretName: music-assistant-tls-cert
rules:
- host: sound.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: music-assistant
port:
number: 8095
---

View File

@@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zwave-js-ui
namespace: homeassistant
spec:
replicas: 1
selector:
matchLabels:
app: zwave-js-ui
template:
metadata:
labels:
app: zwave-js-ui
spec:
hostNetwork: true
containers:
- name: zwave-js-ui
image: zwavejs/zwave-js-ui:latest
imagePullPolicy: Always
tty: true
env:
- name: SESSION_SECRET
value: "iqpwoeinf9384bw3p48gbwer"
- name: TZ
value: "America/Denver"
volumeMounts:
- name: zwave-data
mountPath: /usr/src/app/store
- name: zwave-device-if00
mountPath: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0
- name: zwave-device-if01
mountPath: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0
securityContext:
privileged: true
ports:
- containerPort: 8091
name: http
- containerPort: 3051
name: websocket
volumes:
- name: zwave-data
hostPath:
path: /data/zwave
type: DirectoryOrCreate
- name: zwave-device-if00
hostPath:
path: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0
type: CharDevice
- name: zwave-device-if01
hostPath:
path: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0
type: CharDevice
---
apiVersion: v1
kind: Service
metadata:
name: zwave-js-ui
namespace: homeassistant
annotations:
tailscale.com/expose: "true"
spec:
selector:
app: zwave-js-ui
ports:
- name: http
protocol: TCP
port: 8091
targetPort: 8091
- name: websocket
protocol: TCP
port: 3051
targetPort: 3051
---

View File

@@ -0,0 +1,79 @@
apiVersion: v1
kind: Namespace
metadata:
name: homepage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: homepage
spec:
replicas: 1
selector:
matchLabels:
app: homepage
template:
metadata:
labels:
app: homepage
spec:
restartPolicy: Always
containers:
- name: homepage
image: ghcr.io/gethomepage/homepage:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: "server.alexmickelson.guru:3001,home.alexmickelson.guru"
volumeMounts:
- name: host-configs
mountPath: /app/config/
volumes:
- name: host-configs
hostPath:
path: /data/homepage
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: homepage
spec:
selector:
app: homepage
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: homepage
namespace: homepage
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- home.alexmickelson.guru
secretName: homepage-tls
rules:
- host: home.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: homepage
port:
number: 3000

View File

@@ -1,782 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
name: ingress-nginx
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resourceNames:
- ingress-nginx-leader
resources:
- leases
verbs:
- get
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: v1
data:
allow-snippet-annotations: "false"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: "true"
# http-snippet: |
# proxy_cache_path /tmp/nginx-cache levels=1:2 keys_zone=static-cache:2m max_size=100m inactive=7d use_temp_path=off;
# proxy_cache_key $scheme$proxy_host$request_uri;
# proxy_cache_lock on;
# proxy_cache_use_stale updating;
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: NodePort
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
ports:
- appProtocol: https
name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: ClusterIP
# ---
# apiVersion: apps/v1
# kind: Deployment
# metadata:
# labels:
# app.kubernetes.io/component: controller
# app.kubernetes.io/instance: ingress-nginx
# app.kubernetes.io/name: ingress-nginx
# app.kubernetes.io/part-of: ingress-nginx
# app.kubernetes.io/version: 1.10.0
# name: ingress-nginx-controller
# namespace: ingress-nginx
# spec:
# minReadySeconds: 0
# revisionHistoryLimit: 10
# selector:
# matchLabels:
# app.kubernetes.io/component: controller
# app.kubernetes.io/instance: ingress-nginx
# app.kubernetes.io/name: ingress-nginx
# strategy:
# rollingUpdate:
# maxUnavailable: 1
# type: RollingUpdate
# template:
# metadata:
# labels:
# app.kubernetes.io/component: controller
# app.kubernetes.io/instance: ingress-nginx
# app.kubernetes.io/name: ingress-nginx
# app.kubernetes.io/part-of: ingress-nginx
# app.kubernetes.io/version: 1.10.0
# spec:
# hostNetwork: true
# containers:
# - args:
# - /nginx-ingress-controller
# - --election-id=ingress-nginx-leader
# - --controller-class=k8s.io/ingress-nginx
# - --ingress-class=nginx
# - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
# - --validating-webhook=:8443
# - --validating-webhook-certificate=/usr/local/certificates/cert
# - --validating-webhook-key=/usr/local/certificates/key
# - --enable-metrics=false
# env:
# - name: POD_NAME
# valueFrom:
# fieldRef:
# fieldPath: metadata.name
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: metadata.namespace
# - name: LD_PRELOAD
# value: /usr/local/lib/libmimalloc.so
# image: registry.k8s.io/ingress-nginx/controller:v1.10.0@sha256:42b3f0e5d0846876b1791cd3afeb5f1cbbe4259d6f35651dcc1b5c980925379c
# imagePullPolicy: IfNotPresent
# lifecycle:
# preStop:
# exec:
# command:
# - /wait-shutdown
# livenessProbe:
# failureThreshold: 5
# httpGet:
# path: /healthz
# port: 10254
# scheme: HTTP
# initialDelaySeconds: 10
# periodSeconds: 10
# successThreshold: 1
# timeoutSeconds: 1
# name: controller
# ports:
# - containerPort: 80
# name: http
# protocol: TCP
# - containerPort: 443
# name: https
# protocol: TCP
# - containerPort: 8443
# name: webhook
# protocol: TCP
# readinessProbe:
# failureThreshold: 3
# httpGet:
# path: /healthz
# port: 10254
# scheme: HTTP
# initialDelaySeconds: 10
# periodSeconds: 10
# successThreshold: 1
# timeoutSeconds: 1
# resources:
# requests:
# cpu: 100m
# memory: 90Mi
# securityContext:
# allowPrivilegeEscalation: false
# capabilities:
# add:
# - NET_BIND_SERVICE
# drop:
# - ALL
# readOnlyRootFilesystem: false
# runAsNonRoot: true
# runAsUser: 101
# seccompProfile:
# type: RuntimeDefault
# volumeMounts:
# - mountPath: /usr/local/certificates/
# name: webhook-cert
# readOnly: true
# dnsPolicy: ClusterFirst
# nodeSelector:
# kubernetes.io/os: linux
# serviceAccountName: ingress-nginx
# terminationGracePeriodSeconds: 300
# volumes:
# - name: webhook-cert
# secret:
# secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-create
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-create
spec:
containers:
- args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.0@sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334
imagePullPolicy: IfNotPresent
name: create
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
spec:
hostNetwork: true
containers:
- args:
- /nginx-ingress-controller
- --election-id=ingress-nginx-leader
- --controller-class=k8s.io/ingress-nginx
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
- --enable-metrics=false
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
image: registry.k8s.io/ingress-nginx/controller:v1.10.0@sha256:42b3f0e5d0846876b1791cd3afeb5f1cbbe4259d6f35651dcc1b5c980925379c
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
name: controller
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 8443
name: webhook
protocol: TCP
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
resources:
requests:
cpu: 100m
memory: 90Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 101
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /usr/local/certificates/
name: webhook-cert
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
# kubernetes.io/hostname: alex-office2
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
apiVersion: batch/v1
kind: Job
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-patch
namespace: ingress-nginx
spec:
template:
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission-patch
spec:
containers:
- args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.4.0@sha256:44d1d0e9f19c63f58b380c5fddaca7cf22c7cee564adeff365225a5df5ef3334
imagePullPolicy: IfNotPresent
name: patch
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
nodeSelector:
kubernetes.io/os: linux
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: nginx
spec:
controller: k8s.io/ingress-nginx
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/component: admission-webhook
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.10.0
name: ingress-nginx-admission
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: ingress-nginx-controller-admission
namespace: ingress-nginx
path: /networking/v1/ingresses
failurePolicy: Fail
matchPolicy: Equivalent
name: validate.nginx.ingress.kubernetes.io
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
sideEffects: None

View File

@@ -1,8 +1,13 @@
apiVersion: v1
kind: Namespace
metadata:
name: jellyfin
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
namespace: projects
namespace: jellyfin
spec:
replicas: 1
selector:
@@ -13,6 +18,11 @@ spec:
labels:
app: jellyfin
spec:
hostNetwork: true
securityContext:
fsGroup: 1000
supplementalGroups:
- 303 # render group for GPU access
containers:
- name: jellyfin
image: jellyfin/jellyfin
@@ -20,6 +30,8 @@ spec:
runAsUser: 1000
runAsGroup: 1000
volumeMounts:
- name: dri-device
mountPath: /dev/dri/renderD128
- name: config-volume
mountPath: /config
- name: cache-volume
@@ -30,6 +42,9 @@ spec:
mountPath: /movies
- name: tvshows-volume
mountPath: /tvshows
- name: home-videos-volume
mountPath: /home-videos
readOnly: true
ports:
- containerPort: 8096
name: jellyfin
@@ -45,11 +60,58 @@ spec:
path: /data/jellyfin/cache
- name: music-volume
hostPath:
path: /data/jellyfin/music
path: /data/media/music/tagged
- name: movies-volume
hostPath:
path: /data/jellyfin/movies
path: /data/media/movies
- name: tvshows-volume
hostPath:
path: /data/jellyfin/tvshows
path: /data/media/tvshows
- name: home-videos-volume
hostPath:
path: /data/nextcloud/html/data/alex/files/Documents/home-video
- name: dri-device
hostPath:
path: /dev/dri/renderD128
type: CharDevice
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: jellyfin
spec:
selector:
app: jellyfin
ports:
- name: http
protocol: TCP
port: 8096
targetPort: 8096
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin
namespace: jellyfin
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- jellyfin.alexmickelson.guru
secretName: jellyfin-tls
rules:
- host: jellyfin.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
number: 8096

View File

@@ -1,14 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin-ingress
namespace: projects
spec:
rules:
- host: jellyfin.alexmickelson.guru
http:
paths:
- path: /
backend:
service: jellyfin
port: 8096

View File

@@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: projects
spec:
selector:
app: jellyfin
ports:
- protocol: TCP
port: 8096
targetPort: 8096
type: ClusterIP

View File

@@ -0,0 +1,126 @@
apiVersion: v1
kind: Namespace
metadata:
name: minecraft
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: minecraft
namespace: minecraft
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: minecraft
template:
metadata:
labels:
app: minecraft
spec:
containers:
- name: tailscale
image: tailscale/tailscale:latest
env:
- name: TS_STATE_DIR
value: "/var/lib/tailscale"
- name: TS_KUBE_SECRET
value: ""
- name: TS_AUTHKEY
value: "tskey-auth-REPLACE_ME"
- name: TS_HOSTNAME
value: "minecraft"
volumeMounts:
- name: tailscale-data
mountPath: /var/lib/tailscale
- name: dev-tun
mountPath: /dev/net/tun
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- name: minecraft
image: itzg/minecraft-server:java21
stdin: true
tty: true
env:
- name: EULA
value: "true"
- name: MEMORY
value: "6G"
- name: CF_OVERRIDES_EXCLUSIONS
value: |
# Not applicable for server side
shaderpacks/**
volumeMounts:
- name: minecraft-data
mountPath: /data
- name: modpacks
mountPath: /modpacks
readOnly: true
resources:
requests:
memory: "6Gi"
limits:
memory: "8Gi"
- name: minecraft-cobblemon
image: itzg/minecraft-server:java21
stdin: true
tty: true
env:
- name: EULA
value: "true"
- name: TYPE
value: "AUTO_CURSEFORGE"
- name: CF_SLUG
value: "cobbleverse-cobblemon"
- name: CF_MODPACK_ZIP
value: "/modpacks/COBBLEVERSE-1.7.30-CF.zip"
- name: CF_API_KEY
value: "$CF_API_KEY"
- name: MEMORY
value: "4G"
- name: SERVER_PORT
value: "2222"
- name: RCON_PORT
value: "25576"
- name: CF_OVERRIDES_EXCLUSIONS
value: |
# Not applicable for server side
shaderpacks/**
resourcepacks/**
volumeMounts:
- name: cobblemon-data
mountPath: /data
- name: modpacks
mountPath: /modpacks
readOnly: true
resources:
requests:
memory: "4Gi"
limits:
memory: "6Gi"
volumes:
- name: minecraft-data
hostPath:
path: /data/minecraft/data
type: DirectoryOrCreate
- name: modpacks
hostPath:
path: /data/minecraft/modpacks
type: DirectoryOrCreate
- name: tailscale-data
hostPath:
path: /data/minecraft/tailscale
type: DirectoryOrCreate
- name: dev-tun
hostPath:
path: /dev/net/tun
type: CharDevice
- name: cobblemon-data
hostPath:
path: /data/minecraft/cobblemon-data
type: DirectoryOrCreate

View File

@@ -1 +0,0 @@
test/

View File

@@ -1,13 +0,0 @@
FROM alpine:latest
RUN apk add --no-cache nfs-utils bash
RUN mkdir -p /exports
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
EXPOSE 2049 20048
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]

View File

@@ -1,24 +0,0 @@
#!/bin/bash
set -e
ALLOWED_CLIENTS="${ALLOWED_CLIENTS:-*}"
echo "/exports $ALLOWED_CLIENTS(rw,sync,no_subtree_check,no_root_squash)" > /etc/exports
rpcbind || true
rpc.statd || true
echo "Starting NFS server..."
mount -t nfsd nfsd /proc/fs/nfsd
rpc.nfsd -N 3 -V 4 --grace-time 10 $nfsd_debug_opt &
rpc.mountd -N 2 -N 3 -V 4 --foreground $mountd_debug_opt &
wait
# rpc.mountd -N 2 -N 3 -V 4 --foreground
# wait

View File

@@ -1,19 +0,0 @@
<https://wiki.alpinelinux.org/wiki/Setting_up_an_NFS_server>
example docker run
```bash
docker run --rm -it \
--name nfs-server \
--cap-add SYS_ADMIN \
-e ALLOWED_CLIENTS="127.0.0.1.0/24" \
-v (pwd)/test:/exports \
--network host \
nfs-server
```
currently not working, i like the idea of running the nfs server in a docker container, but doing it as a nixos module is probably better

View File

@@ -1,33 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: audiobookshelf-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- audiobook.alexmickelson.guru
secretName: audiobookshelf-tls-cert
rules:
- host: audiobook.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: audiobookshelf-service
port:
number: 13378
---
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf-service
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107

View File

@@ -19,15 +19,35 @@ spec:
pathType: Prefix
backend:
service:
name: grafana-service
name: grafana
port:
number: 3000
---
apiVersion: v1
kind: Service
metadata:
name: grafana-service
name: grafana
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107
ports:
- port: 3000
targetPort: 3000
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: grafana
namespace: projects
labels:
kubernetes.io/service-name: grafana
addressType: IPv4
ports:
- name: http
port: 3000
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,33 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: home-assistant-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- ha.alexmickelson.guru
secretName: ha-tls-cert
rules:
- host: ha.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: home-assistant-service
port:
number: 8123
---
apiVersion: v1
kind: Service
metadata:
name: home-assistant-service
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107

View File

@@ -1,33 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: homepage-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- home.alexmickelson.guru
secretName: home-tls-cert
rules:
- host: home.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: homepage-service
port:
number: 3001
---
apiVersion: v1
kind: Service
metadata:
name: homepage-service
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107

View File

@@ -23,15 +23,35 @@ spec:
pathType: Prefix
backend:
service:
name: immich-service
name: immich
port:
number: 2283
---
apiVersion: v1
kind: Service
metadata:
name: immich-service
name: immich
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107
ports:
- port: 2283
targetPort: 2283
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: immich
namespace: projects
labels:
kubernetes.io/service-name: immich
addressType: IPv4
ports:
- name: http
port: 2283
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,33 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin-proxy-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- jellyfin.alexmickelson.guru
secretName: jellyfin-tls-cert
rules:
- host: jellyfin.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin-service
port:
number: 8096
---
apiVersion: v1
kind: Service
metadata:
name: jellyfin-service
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107

View File

@@ -1,33 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: musicassistant-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- sound.alexmickelson.guru
secretName: sound-tls-cert
rules:
- host: sound.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: musicassistant-service
port:
number: 8095
---
apiVersion: v1
kind: Service
metadata:
name: musicassistant-service
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107

View File

@@ -13,9 +13,9 @@ metadata:
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/proxy-buffer-size: 225m
nginx.ingress.kubernetes.io/proxy-buffering: "on"
nginx.ingress.kubernetes.io/proxy-connect-timeout: 60s
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"
nginx.ingress.kubernetes.io/proxy-request-buffering: "on"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "60"
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"
nginx.ingress.kubernetes.io/proxy-send-timeout: "1800"
spec:
ingressClassName: nginx
@@ -31,15 +31,35 @@ spec:
pathType: Prefix
backend:
service:
name: nextcloud-service
name: nextcloud
port:
number: 9001
---
apiVersion: v1
kind: Service
metadata:
name: nextcloud-service
name: nextcloud
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107
ports:
- port: 9001
targetPort: 9001
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: nextcloud
namespace: projects
labels:
kubernetes.io/service-name: nextcloud
addressType: IPv4
ports:
- name: http
port: 9001
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -19,15 +19,35 @@ spec:
pathType: Prefix
backend:
service:
name: prometheus-service
name: prometheus
port:
number: 9091
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-service
name: prometheus
namespace: projects
spec:
type: ExternalName
externalName: 100.122.128.107
ports:
- port: 9091
targetPort: 9091
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: prometheus
namespace: projects
labels:
kubernetes.io/service-name: prometheus
addressType: IPv4
ports:
- name: http
port: 9091
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -34,8 +34,33 @@ Currently clouflare domains cannot be CNAME'd to tailscale domains:
## Kubernetes ingress controller
I had to modify the base ingress to allow for use on 80 and 443. There should be a way to do this with helm, but I can never quite get it to work
<!-- I had to modify the base ingress to allow for use on 80 and 443. There should be a way to do this with helm, but I can never quite get it to work
this is the original: https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.0/deploy/static/provider/baremetal/deploy.yaml
the `ingress-nginx-controller` was changed to a daemonset rather than an deployment
-->
ingress
```bash
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
helm upgrade --install ingress-nginx ingress-nginx/ingress-nginx \
--namespace ingress-nginx \
--create-namespace \
--set controller.kind=DaemonSet \
--set controller.hostPort.enabled=true \
--set controller.hostPort.ports.http=80 \
--set controller.hostPort.ports.https=443 \
--set controller.service.type=NodePort \
--set controller.allowSnippetAnnotations=true \
--set controller.config.annotations-risk-level=Critical \
--set controller.metrics.enabled=false \
--set controller.ingressClassResource.default=true
```
<!-- https://github.com/kubernetes/ingress-nginx/issues/12618 for why anotation risk needs to be critical-->

View File

@@ -122,7 +122,7 @@
dbus
# protontricks stuff?
freetype
# freetype.bin
fontconfig
@@ -131,6 +131,8 @@
zlib
quickemu
git-lfs
];
programs.nix-ld.enable = true;

View File

@@ -72,6 +72,7 @@
git
tmux
vscode
zip
];
};
home-manager.users.alex = { pgks, ...}: {

View File

@@ -9,8 +9,7 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "alex-desktop"; # Define your hostname.
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
networking.hostName = "alex-desktop";
nix.settings.experimental-features = [ "nix-command" "flakes" ];
networking.networkmanager.enable = true;
@@ -50,7 +49,20 @@
alsa.enable = true;
alsa.support32Bit = true;
pulse.enable = true;
wireplumber = {
enable = true;
extraConfig = {
"disable-x11" = {
"wireplumber.settings" = {
"support.x11" = false;
};
};
};
};
};
users.users.alex = {
isNormalUser = true;
@@ -73,6 +85,7 @@
services.fwupd.enable = true;
hardware.enableAllFirmware = true;
hardware.firmware = with pkgs; [ linux-firmware ];
programs.nix-ld.enable = true;
nixpkgs.config.allowUnfree = true;
environment.systemPackages = with pkgs; [
@@ -90,29 +103,20 @@
iperf
mangohud
mlocate
kdePackages.kdeconnect-kde
# wineWowPackages.stable
# wine
# (wine.override { wineBuild = "wine64"; })
# wine64
# wineWowPackages.staging
# winetricks
# wineWowPackages.waylandFull
wineWowPackages.stable
wine
(wine.override { wineBuild = "wine64"; })
wine64
wineWowPackages.staging
winetricks
wineWowPackages.waylandFull
# woeusb ntfs3g
# (lutris.override {
# extraLibraries = pkgs: [
# # List library dependencies here
# ];
# extraPkgs = pkgs: [
# # List package dependencies here
# ];
# })
mesa-gl-headers
mesa
driversi686Linux.mesa
# mesa-gl-headers
# mesa
# driversi686Linux.mesa
# mesa-demos
];
services.tailscale.enable = true;
services.openssh.enable = true;
@@ -122,20 +126,6 @@
programs.fish.enable = true;
services.flatpak.enable = true;
hardware.steam-hardware.enable = true;
programs.adb.enable = true; # graphene
# programs.gamescope = {
# enable = true;
# capSysNice = true;
# };
# programs.gamemode.enable = true;
# programs.steam = {
# enable = true;
# gamescopeSession.enable = true;
# remotePlay.openFirewall = true; # Open ports in the firewall for Steam Remote Play
# dedicatedServer.openFirewall = true; # Open ports in the firewall for Source Dedicated Server
# localNetworkGameTransfers.openFirewall = true; # Open ports in the firewall for Steam Local Network Game Transfers
# };
networking.firewall.enable = false;
hardware.graphics = {
@@ -143,7 +133,6 @@
enable = true;
};
fileSystems."/steam-data" =
{
device = "/dev/disk/by-uuid/437358fd-b9e4-46e2-bd45-f6b368acaac1";
@@ -155,6 +144,40 @@
boot.zfs.extraPools = [ "data" "data2" ];
systemd.timers."nix-garbage-collect-weekly" = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "weekly";
Persistent = true;
};
};
systemd.services."nix-garbage-collect-weekly" = {
serviceConfig = {
Type = "oneshot";
ExecStart = "/run/current-system/sw/bin/nix-collect-garbage --delete-older-than 7d";
};
};
# fingerprint
# services.fprintd = {
# enable = true;
# package = pkgs.fprintd.override {
# libfprint = pkgs.libfprint;
# };
# };
# services.gnome.gnome-keyring.enable = true;
# security.polkit.enable = true;
# security.pam.services.gdm.fprintAuth = true;
# security.pam.services.gdm.enableGnomeKeyring = true;
# security.pam.services.sudo.fprintAuth = true;
# services.udev.extraRules = ''
# ACTION=="add", SUBSYSTEM=="usb", ATTR{idVendor}=="04f3", ATTR{idProduct}=="0c3d", TEST=="power/control", ATTR{power/control}="on"
# '';
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave

View File

@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1759520764,
"narHash": "sha256-jERdfBm1rQc9qAdPi1lMEv9inEl7kvvnXCst//ZD2Yc=",
"lastModified": 1767726775,
"narHash": "sha256-mpA/pevxXJzu/5rbdb7u0BzgEJCDDQd1EZ3oyyOo8VI=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "bcbcd4e5a8cb24199859dd73e448494c8c7d55cb",
"rev": "f8ce89e3edbc488a5b17c559ad55f083282420e9",
"type": "github"
},
"original": {

View File

@@ -19,6 +19,7 @@
};
models = {
"gpt-oss-120b" = { };
"devstral-123b" = { };
};
};
home = {
@@ -30,6 +31,15 @@
"gpt-oss-20b" = { };
};
};
office = {
npm = "@ai-sdk/openai-compatible";
options = {
baseURL = "http://ai-office-server:8081/v1";
};
models = {
"gpt-oss-20b" = { };
};
};
};
mcp = {
playwright = {

View File

@@ -1,5 +1,12 @@
{ pkgs, ... }:
{
imports = [ ./fish.home.nix ];
customFish = {
dotnetPackage = pkgs.dotnetCorePackages.sdk_8_0;
bitwardenSshAgent = true;
};
home.packages = with pkgs; [
vscode-fhs
gnome-software
@@ -43,37 +50,7 @@
package = pkgs.gnome-themes-extra;
};
};
programs.fish = {
enable = true;
shellInit = ''
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_ROOT=${pkgs.dotnetCorePackages.sdk_8_0}
set -x LIBVIRT_DEFAULT_URI qemu:///system
set -x TERM xterm-256color # ghostty
export SSH_AUTH_SOCK=/home/alex/.bitwarden-ssh-agent.sock # ssh agent
'';
};
home.file = {
".config/lazydocker/config.yml".text = ''
gui:

View File

@@ -1,5 +1,7 @@
{ pkgs, ... }:
{
imports = [ ./fish.home.nix ];
home.packages = with pkgs; [
vscode-fhs
gnome-software
@@ -38,31 +40,7 @@
package = pkgs.gnome-themes-extra;
};
};
programs.fish = {
enable = true;
shellInit = ''
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
set -x LIBVIRT_DEFAULT_URI qemu:///system
'';
};
home.file = {
".config/lazydocker/config.yml".text = ''
gui:

View File

@@ -1,5 +1,10 @@
{ pkgs, ... }:
{
imports = [ ./fish.home.nix ];
customFish = {
bluetuiAliases = true;
};
home.packages = with pkgs; [
k9s
jwt-cli
@@ -29,37 +34,14 @@
programs.direnv = {
enable = true;
};
programs.ghostty = {
enable = true;
enableFishIntegration = true;
};
home.sessionVariables = {
EDITOR = "vim";
};
programs.fish = {
enable = true;
shellInit = ''
alias blue="bluetui"
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
set -x LIBVIRT_DEFAULT_URI qemu:///system
'';
};
home.file = {
".config/lazydocker/config.yml".text = ''
gui:

View File

@@ -4,27 +4,39 @@
vscode-fhs
gnome-software
gnome-tweaks
# nvtopPackages.nvidia
nerd-fonts.fira-code
nerd-fonts.droid-sans-mono
# fira-code
# (nerdfonts.override { fonts = [ "FiraCode" "DroidSansMono" ]; })
kubernetes-helm
busybox
ghostty
elixir_1_18
inotify-tools # needed for elixir hot-reloading
nodejs_24
pnpm
legcord
ffmpeg
gh
bitwarden-desktop
jellyfin-tui
bluetui
# bitwarden-desktop
lazydocker
elixir
elixir-ls
inotify-tools
watchman
];
programs.ghostty = {
enable = true;
enableFishIntegration = true;
settings = {
window-inherit-working-directory = "false";
theme = "Atom";
font-size = 14;
window-height = 30;
window-width = 100;
};
};
fonts.fontconfig.enable = true;

View File

@@ -0,0 +1,75 @@
{ pkgs, lib, config, ... }:
let
cfg = config.customFish;
in {
options.customFish = {
# Opt-in: only enable if the relevant tools are installed on this machine
bluetuiAliases = lib.mkEnableOption "bluetui/jellyfin-tui shell aliases";
dotnetPackage = lib.mkOption {
type = lib.types.nullOr lib.types.package;
default = null;
description = "Enable dotnet env vars and PATH entries. Set to the desired SDK package (e.g. pkgs.dotnetCorePackages.sdk_8_0).";
};
bitwardenSshAgent = lib.mkEnableOption "Bitwarden SSH agent (sets SSH_AUTH_SOCK)";
};
config = {
programs.fish = {
enable = true;
shellInit = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
# https://gist.github.com/thomd/7667642
''
export LS_COLORS=':di=95'
function commit
git add --all
git commit -m "$argv"
for remote in (git remote)
git pull $remote
git push $remote
end
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin ~/bin ~/.dotnet ~/.dotnet/tools $fish_user_paths
set fish_pager_color_selected_background --background='00399c'
export VISUAL=vim
export EDITOR="$VISUAL"
set -x LIBVIRT_DEFAULT_URI qemu:///system
set -x TERM xterm-256color
if test -f "$HOME/.cargo/env.fish"
source "$HOME/.cargo/env.fish"
end
''
(lib.optionalString cfg.bluetuiAliases ''
alias blue="bluetui"
alias jelly="jellyfin-tui"
'')
(lib.optionalString (cfg.dotnetPackage != null) ''
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_ROOT=${cfg.dotnetPackage}
'')
(lib.optionalString cfg.bitwardenSshAgent ''
export SSH_AUTH_SOCK=$HOME/.bitwarden-ssh-agent.sock
'')
]);
};
};
}

View File

@@ -5,5 +5,6 @@
opencode
quickemu
tree
kubernetes-helm
];
}

View File

@@ -2,10 +2,20 @@
let
opencodeFlake = builtins.getFlake (toString ../flakes/opencode);
monitorTuiFlake = builtins.getFlake (toString ../../monitors/monitor-tui-rs);
zenBrowserFlake = builtins.getFlake "github:youwen5/zen-browser-flake";
nixgl = import
(fetchTarball "https://github.com/nix-community/nixGL/archive/main.tar.gz")
{ };
in {
imports = [ ./fish.home.nix ];
customFish = {
bluetuiAliases = true;
dotnetPackage = pkgs.dotnetCorePackages.sdk_8_0;
bitwardenSshAgent = true;
};
home.username = "alexm";
home.homeDirectory = "/home/alexm";
nixpkgs.config.allowUnfree = true;
@@ -14,16 +24,16 @@ in {
jwt-cli
fish
kubectl
(lazydocker.overrideAttrs (oldAttrs: rec {
version = "0.24.1";
src = pkgs.fetchFromGitHub {
owner = "jesseduffield";
repo = "lazydocker";
rev = "v${version}";
hash = "sha256-cVjDdrxmGt+hj/WWP9B3BT739k9SSr4ryye5qWb3XNM=";
};
}))
# lazydocker
# (lazydocker.overrideAttrs (oldAttrs: rec {
# version = "0.24.4";
# src = pkgs.fetchFromGitHub {
# owner = "jesseduffield";
# repo = "lazydocker";
# rev = "v${version}";
# hash = "sha256-cW90/yblSLBkcR4ZdtcSI9MXFjOUxyEectjRn9vZwvg=";
# };
# }))
lazydocker
traceroute
(with dotnetCorePackages; combinePackages [ sdk_8_0 sdk_9_0 ])
nodejs_22
@@ -39,7 +49,6 @@ in {
iperf
#makemkv
#elixir_1_18
#inotify-tools
# gnome-themes-extra
uv
ghostty
@@ -50,16 +59,25 @@ in {
firefoxpwa
bluetui
#nixfmt-classic
opencodeFlake.packages.${system}.opencode
opencodeFlake.packages.${pkgs.stdenv.hostPlatform.system}.opencode
monitorTuiFlake.packages.${pkgs.stdenv.hostPlatform.system}.default
(config.lib.nixGL.wrap zenBrowserFlake.packages.${pkgs.stdenv.hostPlatform.system}.default)
bitwarden-desktop
wiremix
moonlight-qt
(config.lib.nixGL.wrap moonlight-qt)
nvtopPackages.amd
# jan
# texlivePackages.jetbrainsmono-otf
# nerd-fonts.fira-code
# dejavu_fonts
# vscode-fhs
# aider-chat-full
# codex
elixir
elixir-ls
inotify-tools
watchman
];
fonts.fontconfig.enable = true;
programs.firefox = {
@@ -69,46 +87,18 @@ in {
};
programs.direnv = { enable = true; };
programs.ghostty = { enable = true; };
programs.fish = {
programs.ghostty = {
enable = true;
shellInit = ''
# https://gist.github.com/thomd/7667642
export LS_COLORS=':di=95'
function commit
git add --all
git commit -m "$argv"
git pull
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
alias blue="bluetui"
alias jelly="jellyfin-tui"
set -U fish_user_paths ~/.local/bin $fish_user_paths
set -U fish_user_paths ~/bin $fish_user_paths
set -U fish_user_paths ~/.dotnet $fish_user_paths
set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
set fish_pager_color_selected_background --background='00399c'
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_ROOT=${pkgs.dotnetCorePackages.sdk_8_0}
set -x LIBVIRT_DEFAULT_URI qemu:///system
set -x TERM xterm-256color # ghostty
export SSH_AUTH_SOCK=/home/alexm/.bitwarden-ssh-agent.sock # ssh agent
'';
enableFishIntegration = true;
settings = {
window-inherit-working-directory = "false";
theme = "Atom";
font-size = "18";
window-height = "30";
window-width = "120";
};
};
home.file = {
".config/lazydocker/config.yml".text = ''
gui:
@@ -193,6 +183,28 @@ in {
Terminal=false
Categories=Network;WebBrowser;
'';
".local/share/applications/zen-browser.desktop".text = ''
[Desktop Entry]
Version=1.0
Type=Application
Name=Zen Browser
Comment=A calmer Firefox-based browser
Exec=nixGLIntel zen
Icon=${zenBrowserFlake.packages.${pkgs.stdenv.hostPlatform.system}.default}/share/icons/hicolor/128x128/apps/zen.png
Terminal=false
Categories=Network;WebBrowser;
MimeType=text/html;text/xml;application/xhtml+xml;x-scheme-handler/http;x-scheme-handler/https;
StartupWMClass=zen
Actions=new-window;new-private-window;
[Desktop Action new-window]
Name=Open a New Window
Exec=nixGLIntel zen --new-window
[Desktop Action new-private-window]
Name=Open a New Private Window
Exec=nixGLIntel zen --private-window
'';
};
home.sessionVariables = { EDITOR = "vim"; };
@@ -222,6 +234,5 @@ in {
package = pkgs.gnome-themes-extra;
};
};
# Let Home Manager install and manage itself.
programs.home-manager.enable = true;
}

View File

@@ -7,6 +7,7 @@
<home-manager/nixos>
./modules/k3s.nix
./modules/pci-passthrough.nix
./modules/gitea-runner.nix
];
security.pam.loginLimits = [
{
@@ -58,6 +59,9 @@
description = "github";
extraGroups = [ "docker" ];
shell = pkgs.fish;
packages = with pkgs; [
kubernetes-helm
];
};
users.users.alex = {
isNormalUser = true;
@@ -92,6 +96,7 @@
nixpkgs.config.allowUnfree = true;
environment.systemPackages = with pkgs; [
bash
vim
wget
curl
@@ -123,6 +128,11 @@
];
services.envfs.enable = true;
security.sudo = {
enable = true;
wheelNeedsPassword = true;
};
# printing
services.printing = {
enable = true;
@@ -167,13 +177,6 @@
package = pkgs.qemu_kvm;
runAsRoot = true;
swtpm.enable = true;
ovmf = {
enable = true;
packages = [ pkgs.OVMFFull.fd ];
# packages = [
# (pkgs.OVMF.override { secureBoot = true; tpmSupport = true; }).fd
# ];
};
};
};
networking.interfaces.enp5s0.useDHCP = true;
@@ -184,18 +187,13 @@
};
};
# not working yet, in theory simplifies xml for vm
# environment.etc."qemu/edk2-x86_64-secure-code.fd".source = "${pkgs.OVMF.fd}/FV/OVMF_CODE.secboot.fd";
# environment.etc."qemu/edk2-i386-vars.fd".source = "${pkgs.OVMF.fd}/FV/OVMF_VARS.fd";
# environment.etc."qemu/edk2-x86_64-secure-code.fd".source = "${pkgs.OVMF.fd}/FV/OVMF_CODE.secboot.fd";
# environment.etc."qemu/edk2-x86_64-secure-vars.fd".source = "${pkgs.OVMF.fd}/FV/OVMF_VARS.secboot.fd";
environment.etc = {
"qemu/edk2-x86_64-secure-code.fd".source =
lib.mkForce "${pkgs.OVMF.fd}/FV/OVMF_CODE.ms.fd";
lib.mkForce "${pkgs.OVMFFull.fd}/FV/OVMF_CODE.ms.fd";
"qemu/edk2-x86_64-secure-vars.fd".source =
lib.mkForce "${pkgs.OVMF.fd}/FV/OVMF_VARS.ms.fd";
lib.mkForce "${pkgs.OVMFFull.fd}/FV/OVMF_VARS.ms.fd";
"qemu/OVMF_VARS.fd".source =
lib.mkForce "${pkgs.OVMFFull.fd}/FV/OVMF_VARS.fd";
};
systemd.tmpfiles.rules = [
"d /var/lib/libvirt/qemu/nvram 0755 root root -"
@@ -209,7 +207,7 @@
boot.supportedFilesystems = [ "zfs" ];
boot.zfs.forceImportRoot = false;
networking.hostId = "eafe9551";
boot.zfs.extraPools = [ "data-ssd" "backup" "vms" "vms-2" ];
boot.zfs.extraPools = [ "data-ssd" "backup" "vms-2" "vms-3" ];
services.sanoid = {
enable = true;
templates.production = {
@@ -266,7 +264,6 @@
tokenFile = "/data/runner/github-infrastructure-token.txt";
url = "https://github.com/alexmickelson/infrastructure";
extraLabels = [ "home-server" ];
#workDir = "/data/runner/infrastructure/";
replace = true;
serviceOverrides = {
ReadWritePaths = [
@@ -281,12 +278,8 @@
ProtectSystem = false;
PrivateMounts = false;
PrivateUsers = false;
#DynamicUser = true;
#NoNewPrivileges = false;
ProtectHome = false;
#RuntimeDirectoryPreserve = "yes";
Restart = lib.mkForce "always";
#RuntimeMaxSec = "7d";
};
extraPackages = with pkgs; [
docker
@@ -295,18 +288,13 @@
sanoid
mbuffer
lzop
kubectl
kubernetes-helm
];
};
};
# services.cron = {
# enable = true;
# systemCronJobs = [
# "*/5 * * * * root date >> /tmp/cron.log"
# ];
# };
networking.firewall.enable = false;
# networking.firewall.trustedInterfaces = [ "docker0" ];
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions

View File

@@ -0,0 +1,132 @@
{ pkgs, lib, ... }:
{
services.gitea-actions-runner = {
instances.infrastructure = {
enable = true;
name = "infrastructure-runner";
url = "https://git.alexmickelson.guru";
tokenFile = "/data/runner/gitea-infrastructure-token.txt";
labels = [
"self-hosted"
"home-server"
"self-hosted:host"
"home-server:host"
"native:host"
];
hostPackages = with pkgs; [
bashNonInteractive
bash
coreutils
docker
git
git-secret
zfs
sanoid
mbuffer
lzop
kubectl
kubernetes-helm
curl
nodejs_24
openssl
gettext
];
settings = {
container = {
enabled = false;
};
runner = {
capacity = 5;
};
};
};
};
users.users.gitea-runner = {
isNormalUser = true;
description = "Gitea Actions Runner";
home = "/home/gitea-runner";
createHome = true;
group = "gitea-runner";
extraGroups = [ "docker" ];
packages = with pkgs; [
kubernetes-helm
nodejs_24
openssl
gettext
];
shell = pkgs.bash;
};
users.groups.gitea-runner = { };
security.sudo.extraRules = [
{
users = [ "gitea-runner" ];
commands = [
{
command = "/run/current-system/sw/bin/nix-collect-garbage";
options = [ "NOPASSWD" "SETENV" ];
}
];
}
];
system.activationScripts.zfs-delegate-gitea-runner = {
text =
let
poolNames = [ "data-ssd" "backup" ];
permissions = "compression,create,destroy,mount,mountpoint,receive,rollback,send,snapshot,hold";
in
''
${lib.concatMapStringsSep "\n" (pool:
"${pkgs.zfs}/bin/zfs allow -u gitea-runner ${permissions} ${pool} || true"
) poolNames}
'';
deps = [ ];
};
systemd.services.gitea-runner-infrastructure.serviceConfig = {
WorkingDirectory = lib.mkForce "/var/lib/gitea-runner/infrastructure";
User = lib.mkForce "gitea-runner";
Group = lib.mkForce "gitea-runner";
Environment = lib.mkForce [
"PATH=/run/wrappers/bin:/etc/profiles/per-user/gitea-runner/bin:/run/current-system/sw/bin"
"NIX_PATH=nixpkgs=${pkgs.path}"
];
DynamicUser = lib.mkForce false;
PrivateDevices = lib.mkForce false;
PrivateMounts = lib.mkForce false;
PrivateTmp = lib.mkForce false;
PrivateUsers = lib.mkForce false;
ProtectClock = lib.mkForce false;
ProtectControlGroups = lib.mkForce false;
ProtectHome = lib.mkForce false;
ProtectHostname = lib.mkForce false;
ProtectKernelLogs = lib.mkForce false;
ProtectKernelModules = lib.mkForce false;
ProtectKernelTunables = lib.mkForce false;
ProtectProc = lib.mkForce "default";
ProtectSystem = lib.mkForce false;
NoNewPrivileges = lib.mkForce false;
RestrictNamespaces = lib.mkForce false;
RestrictRealtime = lib.mkForce false;
RestrictSUIDSGID = lib.mkForce false;
RemoveIPC = lib.mkForce false;
LockPersonality = lib.mkForce false;
SystemCallFilter = lib.mkForce [ ];
RestrictAddressFamilies = lib.mkForce [ ];
ReadWritePaths = lib.mkForce [ ];
BindReadOnlyPaths = lib.mkForce [ ];
DeviceAllow = lib.mkForce [ "/dev/zfs rw" ];
DevicePolicy = lib.mkForce "auto";
Restart = lib.mkForce "always";
};
systemd.services.gitea-runner-infrastructure.path = [ pkgs.sudo ];
}

View File

@@ -6,17 +6,25 @@
enable = true;
role = "server";
extraFlags = toString [
# "--debug" # Optionally add additional args to k3s
"--disable=traefik"
"--node-ip=100.122.128.107"
"--bind-address 100.122.128.107"
"--node-external-ip 100.122.128.107"
"--tls-san 100.122.128.107"
# Disable disk-based evictions
"--kubelet-arg=eviction-hard="
"--kubelet-arg=eviction-soft="
"--kubelet-arg=eviction-soft-grace-period="
"--kubelet-arg=eviction-pressure-transition-period=0s"
];
serverAddr = "https://100.122.128.107:6443";
};
networking.firewall.allowedTCPPorts = [
443
80
10250
];
networking.firewall.allowedUDPPorts = [
443

View File

@@ -1,4 +1,3 @@
{ config, pkgs, ... }:
{
@@ -13,6 +12,21 @@
# "uinput"
# ];
boot.kernelPackages = pkgs.linuxPackages_6_6;
# boot.kernelPackages = pkgs.linuxPackages_6_1;
services.xserver.enable = true;
services.xserver.displayManager.gdm = {
enable = true;
wayland = false;
};
services.xserver.desktopManager.gnome.enable = true;
#boot.kernelParams = [
# "amdgpu.discovery=1"
#];
hardware.enableRedistributableFirmware = true;
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
networking.networkmanager.enable = true;
@@ -32,9 +46,6 @@
LC_TIME = "en_US.UTF-8";
};
services.xserver.enable = true;
services.displayManager.gdm.enable = true;
services.desktopManager.gnome.enable = true;
services.xserver.xkb = {
layout = "us";
variant = "";
@@ -64,6 +75,7 @@
programs.firefox.enable = true;
nixpkgs.config.allowUnfree = true;
services.fwupd.enable = true;
environment.systemPackages = with pkgs; [
vim
@@ -81,6 +93,9 @@
libcec
flirc
kdePackages.kdeconnect-kde
];
services.openssh.enable = true;
services.tailscale.enable = true;
@@ -101,6 +116,6 @@
systemd.targets.hibernate.enable = false;
systemd.targets.hybrid-sleep.enable = false;
system.stateVersion = "24.05"; # Did you read the comment?
system.stateVersion = "25.11"; # Did you read the comment?
}

View File

@@ -5,7 +5,7 @@ jobs:
runs-on: [home-server]
steps:
- name: checkout repo
working-directory: /home/github/infrastructure
working-directory: /home/gitea-runner/infrastructure
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
@@ -26,7 +26,7 @@ jobs:
GRAFANA_PASSWORD: ${{ secrets.GRAFANA_PASSWORD }}
CLOUDFLARE_CONFIG: ${{ secrets.CLOUDFLARE_CONFIG }}
COPILOT_TOKEN: ${{ secrets.COPILOT_TOKEN }}
working-directory: /home/github/infrastructure/infrastructure
working-directory: /home/gitea-runner/infrastructure/infrastructure
run: |
# echo "$CLOUDFLARE_CONFIG" > /data/cloudflare/cloudflare.ini
cd home-server
@@ -42,7 +42,7 @@ jobs:
# runs-on: [home-server]
# needs: update-repo
# steps:
# - working-directory: /home/github/infrastructure/infrastructure
# - working-directory: /home/gitea-runner/infrastructure/infrastructure
# run: |
# cd dns
# docker compose pull