Compare commits

..

173 Commits

Author SHA1 Message Date
3251639b39 coblemmon
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Cleanup Docker / cleanup-docker (push) Successful in 0s
Cleanup Docker / notify-on-failure (push) Has been skipped
Cleanup NixOS Generations / cleanup-generations (push) Successful in 2s
Cleanup NixOS Generations / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 50s
ZFS Backup / notify-on-failure (push) Has been skipped
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 48s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-11 19:08:13 -06:00
1b21f2c962 coblemmon
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-11 19:07:19 -06:00
04509ceade Merge branch 'main' of github.com:alexmickelson/infrastructure
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 53s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 57s
ZFS Backup / notify-on-failure (push) Has been skipped
2026-03-11 18:58:08 -06:00
8c2143c3b2 coblemmon 2026-03-11 18:58:07 -06:00
5a3a8e053d lazydocker fixed now 2026-03-10 10:06:29 -06:00
5fb34c7188 fish updates 2026-03-10 09:29:17 -06:00
75d1bcf15f minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
ZFS Backup / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 50s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 48s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-07 20:03:16 -07:00
a62d07ca6c minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 20:01:46 -07:00
3f5c9b24a4 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 20:01:12 -07:00
4f26431fcb minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 51s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-07 19:59:07 -07:00
d9083651c2 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 19:55:30 -07:00
5fc9da84d3 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 19:54:55 -07:00
0ca2ab2401 minecraft
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-07 19:53:46 -07:00
d24a905516 more elixir dev tools 2026-03-05 12:31:49 -07:00
9bf0cabd8d landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Libation / update-repo (push) Successful in 0s
ZFS Backup / update-infrastructure (push) Successful in 1m17s
ZFS Backup / notify-on-failure (push) Has been skipped
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Cleanup NixOS Generations / cleanup-generations (push) Successful in 8s
Cleanup NixOS Generations / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-05 11:00:58 -07:00
ceb89d92fe landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-05 10:59:46 -07:00
ee5966306a landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:56:38 -07:00
01b92733c3 landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:53:05 -07:00
e52ae3f451 landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:51:01 -07:00
567a59f9b1 landing updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:47:55 -07:00
4d6357cc74 landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:45:09 -07:00
ab27bb1183 landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:36:32 -07:00
a5e2ce944e landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:33:56 -07:00
9f9a2fdc2c more trying to change comfigmap
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:27:27 -07:00
169dc7e2bf landing page updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:24:54 -07:00
7e1ed7cf54 color vars
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:20:12 -07:00
dad37e8015 create configmap from files 2026-03-05 10:19:56 -07:00
e32f08391b new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:17:15 -07:00
60c633a1db new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:15:40 -07:00
61fa5e4e33 new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 4s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:13:57 -07:00
a4f49c42f7 new page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:08:51 -07:00
1611df4ec8 trying landing page
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 10:06:45 -07:00
641c6bd5c3 gitea stuff 2026-03-05 09:41:01 -07:00
95beb54b32 ntfy
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 47s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-05 09:39:59 -07:00
b6a8d96585 ntfy 2026-03-05 09:36:24 -07:00
d37726fcc9 cloudflare and ntfy
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 09:35:13 -07:00
cce76cdbc2 no healthcheck
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 09:33:18 -07:00
7242f64b0c cloudflare tunnel ingress
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-03-05 09:28:52 -07:00
b074a02edf Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-03-04 10:02:54 -07:00
d36486c935 more packages 2026-03-04 10:02:53 -07:00
906b6d6c0d back in time 2026-03-03 21:48:53 -07:00
e08252dc17 no wayland again 2026-03-03 21:46:41 -07:00
695a6723ce no wayland again 2026-03-03 21:43:33 -07:00
b2fdc5a3c4 other kernel 2026-03-03 21:41:16 -07:00
7ec08abcb2 other kernel 2026-03-03 21:41:09 -07:00
f0b6b7b08f try without wayland 2026-03-03 21:38:44 -07:00
768a7cf235 try without wayland 2026-03-03 21:37:54 -07:00
b0f36e989c mesa 2026-03-03 21:34:44 -07:00
bfc60bf27c node path 2026-03-03 15:39:02 -07:00
6301d82dff node path 2026-03-03 15:37:33 -07:00
fe10f7615c node path 2026-03-03 15:35:57 -07:00
b6b19a3950 node path 2026-03-03 15:34:24 -07:00
a79f524b6c Merge branch 'main' of github.com:alexmickelson/infrastructure
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 2m9s
ZFS Backup / notify-on-failure (push) Has been skipped
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 4s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / run-python (push) Successful in 46s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-03-03 15:32:19 -07:00
2e9b40fba0 node 2026-03-03 15:32:18 -07:00
6eeaed33a4 Update tv-computer.nix 2026-03-02 18:00:50 -07:00
6e6c1dc530 Update tv-computer.nix 2026-03-02 17:56:44 -07:00
c9fc909727 merging 2026-03-02 16:57:15 -07:00
ffc69352fa tv-computer 2026-03-02 16:55:25 -07:00
660be9736b no fprint 2026-03-02 15:33:47 -07:00
5daa737dab Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-02-28 15:02:58 -07:00
80b48ca458 fprintd 2026-02-28 15:02:57 -07:00
096cf1cc2d watchman 2026-02-26 11:16:07 -07:00
5d2f7b5ce0 inotify 2026-02-26 11:14:36 -07:00
4470db7960 updates 2026-02-24 19:16:44 -07:00
f7accecaae elixir at work 2026-02-23 16:04:30 -07:00
758e0fb3ba cleanup docker
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Cleanup NixOS Generations / cleanup-generations (push) Successful in 2s
Cleanup NixOS Generations / notify-on-failure (push) Has been skipped
Cleanup Docker / cleanup-docker (push) Successful in 1s
Cleanup Docker / notify-on-failure (push) Has been skipped
ZFS Backup / notify-on-failure (push) Has been skipped
ZFS Backup / update-infrastructure (push) Successful in 1m43s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Libation / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 52s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-02-18 21:11:55 -07:00
e1fb378cd3 updates 2026-02-18 21:09:09 -07:00
cedd54f901 updates 2026-02-18 21:08:00 -07:00
49156df8b4 updates 2026-02-18 21:06:47 -07:00
f7990beee6 updates 2026-02-18 21:05:55 -07:00
6deeb3d2a7 updates 2026-02-18 21:04:54 -07:00
e1673c5f10 updates 2026-02-18 21:04:19 -07:00
e42a65cc6e updates 2026-02-18 21:01:50 -07:00
f72966f229 automated garbage collection
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-02-18 21:00:32 -07:00
b64dd151ff more notifications
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 48s
Manage Jellyfin Playlists / notify-on-failure (push) Has been skipped
2026-02-18 20:48:17 -07:00
cb1cfa5c78 removed artificial failure
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-02-18 20:46:56 -07:00
870e26f0e7 new notification updates
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:45:24 -07:00
02bbb0e425 new notification updates
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:43:20 -07:00
0fe208cce1 secrets
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:40:54 -07:00
206f2671a6 secrets
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:38:54 -07:00
36db78a8bd real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:38:27 -07:00
fafdcae679 real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:37:14 -07:00
893f20663a real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:36:20 -07:00
8a6ec2fe5e real fix
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:35:34 -07:00
6fb8c6c6f6 fix fail
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 0s
2026-02-18 20:35:05 -07:00
2657217d93 force fail
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:31:26 -07:00
ffda56c3e9 curl 2026-02-18 20:29:15 -07:00
fab75ba547 try again
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 3s
Apply Kuberentes Configs / notify-on-failure (push) Successful in 0s
2026-02-18 20:26:00 -07:00
3b397600fe force fail
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 3s
Apply Kuberentes Configs / notify-on-failure (push) Failing after 1s
2026-02-18 20:24:44 -07:00
659849a652 notify ntfy on fail
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Apply Kuberentes Configs / notify-on-failure (push) Has been skipped
2026-02-18 20:24:02 -07:00
a12f1dd9fe updates 2026-02-16 21:17:37 -07:00
2443e4383c trying again
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 3s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 4s
ZFS Backup / update-infrastructure (push) Successful in 52s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 47s
2026-02-15 19:09:09 -07:00
72734bd734 homepage migration
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
2026-02-15 19:07:46 -07:00
b768860289 cleaning up
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 49s
2026-02-15 18:23:18 -07:00
b8a80d9290 no proxy
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
2026-02-15 18:22:12 -07:00
183ae6f91f kube
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 0s
2026-02-15 18:21:20 -07:00
46b710252e jellyfin 2026-02-15 18:20:55 -07:00
76708b98da no proxy
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 49s
ZFS Backup / update-infrastructure (push) Successful in 1m12s
2026-02-15 17:55:27 -07:00
ea983af9a4 copilot
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
2026-02-15 17:54:51 -07:00
e6d53e33df more kubernetes apply
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-15 17:49:48 -07:00
7f7309d2a3 kubeconfig
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-15 17:48:37 -07:00
c4273f5e63 kubernetes apply
Some checks failed
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 1s
2026-02-15 17:47:54 -07:00
f8006a4595 audiobook 2026-02-15 17:40:00 -07:00
108cfa79b7 musicassistant 2026-02-15 17:30:12 -07:00
7b0148696c remove zwave 2026-02-15 17:18:55 -07:00
d531f8c44a zwave 2026-02-15 17:18:35 -07:00
6e83dea4a3 home assistant in kube
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
Manage Jellyfin Playlists / update-repo (push) Successful in 0s
Manage Jellyfin Playlists / run-python (push) Successful in 50s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
2026-02-15 10:42:27 -07:00
90076edfac updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-10 14:34:24 -07:00
721ae13de2 creds
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
ZFS Backup / update-infrastructure (push) Successful in 48s
Libation / update-repo (push) Successful in 0s
Libation / sync-audiobooks (push) Successful in 5s
Manage Jellyfin Playlists / update-repo (push) Successful in 1s
Manage Jellyfin Playlists / run-python (push) Successful in 51s
2026-02-07 15:26:55 -07:00
41876c6347 playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:25:57 -07:00
4268297107 playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:25:26 -07:00
b6d48e8f3c playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:25:04 -07:00
92e3915d94 playlists in gitea now
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:24:32 -07:00
d121a5f179 no servie
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:21:46 -07:00
8dfc29071e libation sync
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:20:47 -07:00
faf0ac890a runs-on
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 2s
2026-02-07 15:14:18 -07:00
b9ec61015b runs-on
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 2s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:13:31 -07:00
040a7e50ce runs-on
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 2s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 2s
2026-02-07 15:12:47 -07:00
8bf14fbdf6 capacity
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 1s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 15:11:40 -07:00
54a0a804a9 capacity 2026-02-07 15:10:41 -07:00
0dc1f3e7aa zfs permissions 2026-02-07 15:00:14 -07:00
2df709af93 zfs permissions 2026-02-07 15:00:00 -07:00
cae3fdf479 zfs backup via gitea
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 2s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:54:31 -07:00
3ce79c4d5b gitea updates
All checks were successful
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:49:27 -07:00
cbd2f12189 split out repo
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 1m7s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:44:52 -07:00
ac3c221711 simplify 2026-02-07 14:42:07 -07:00
7e30a419db kubectl
All checks were successful
Apply Kuberentes Configs / test-environment (push) Successful in 0s
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Successful in 1s
2026-02-07 14:26:02 -07:00
7951a86cb3 environment
Some checks failed
Apply Kuberentes Configs / test-environment (push) Successful in 0s
Apply Kuberentes Configs / update-repo (push) Successful in 0s
Apply Kuberentes Configs / update-infrastructure (push) Failing after 0s
2026-02-07 14:15:00 -07:00
ef3002e328 environment
Some checks failed
Apply Kuberentes Configs / test-environment (push) Failing after 1s
Apply Kuberentes Configs / update-repo (push) Failing after 1s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-02-07 14:12:24 -07:00
6e9d586b9c gitea runner 2026-02-07 14:10:20 -07:00
78bf6e2cce gitea runner 2026-02-07 14:08:10 -07:00
91e94da379 gitea runner 2026-02-07 14:07:16 -07:00
3d9a162b1c gitea runner 2026-02-07 14:05:25 -07:00
2f176f9474 gitea runner 2026-02-07 14:03:42 -07:00
bc9d243c28 workflow
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-02-07 14:02:23 -07:00
5484553a87 environment 2026-02-07 14:02:00 -07:00
5f55fe11a3 environment 2026-02-07 13:59:39 -07:00
eb111fb5f8 environment 2026-02-07 13:54:49 -07:00
0ce24bad4e environment 2026-02-07 13:46:35 -07:00
d8ab3f161f environment 2026-02-07 13:45:35 -07:00
4117e4d46e environment 2026-02-07 13:44:09 -07:00
576a404aaf environment 2026-02-07 13:43:57 -07:00
cbf2241895 environment 2026-02-07 13:43:15 -07:00
cb8bff7c0a environment 2026-02-07 13:41:21 -07:00
a308b23380 environment 2026-02-07 13:39:59 -07:00
d39ab4b5f2 environment 2026-02-07 13:38:57 -07:00
5b50211103 environment 2026-02-07 13:37:39 -07:00
1f92a821fb environment 2026-02-07 13:37:27 -07:00
0ab0e939f3 environment 2026-02-07 13:36:49 -07:00
1a32a3d826 environment 2026-02-07 13:36:01 -07:00
474b0ac5ad environment 2026-02-07 13:34:47 -07:00
01f0524153 only gitea apply kube
Some checks failed
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-02-07 13:32:27 -07:00
109d31e210 Update tv-computer.nix 2026-01-31 09:33:19 -07:00
36bafd2602 Update tv-computer.nix 2026-01-31 09:32:49 -07:00
c9646e20ae Update tv-computer.nix 2026-01-31 09:32:15 -07:00
438c4dcb2d test 2026-01-31 09:31:02 -07:00
a29d1d15a6 Merge branch 'main' of github.com:alexmickelson/infrastructure 2026-01-31 09:30:29 -07:00
62158a3cdb test 2026-01-31 09:30:08 -07:00
4fe32a1600 Update tv-computer.nix 2026-01-31 09:27:40 -07:00
6d638e6fed trying again 2026-01-27 22:57:38 -07:00
8aab928228 trying again 2026-01-27 22:57:20 -07:00
af23a4089a trying again 2026-01-27 22:55:26 -07:00
1c3e28612f trying again 2026-01-27 22:54:30 -07:00
8948970733 restore esential github workflows
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
Libation / sync-audiobooks (push) Failing after 0s
Manage Jellyfin Playlists / run-python (push) Failing after 0s
ZFS Backup / update-infrastructure (push) Successful in 1m10s
2026-01-27 22:51:11 -07:00
2596129600 more stuff 2026-01-27 22:49:25 -07:00
13b2351075 more stuff 2026-01-27 22:46:17 -07:00
565572c869 more stuff
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-01-27 22:44:40 -07:00
dcd8a8590d more stuff 2026-01-27 22:42:43 -07:00
7575c9c974 more stuff 2026-01-27 22:40:07 -07:00
90df48ccee more stuff 2026-01-27 22:38:09 -07:00
6b516697e2 more stuff 2026-01-27 22:37:01 -07:00
e7c403e35c labels
Some checks failed
Update home server containers / update-repo (push) Failing after 0s
Update home server containers / update-infrastructure (push) Has been skipped
Apply Kuberentes Configs / update-repo (push) Failing after 0s
Apply Kuberentes Configs / update-infrastructure (push) Has been skipped
2026-01-27 22:35:41 -07:00
b6a6a7ebe1 labels 2026-01-27 22:34:16 -07:00
54 changed files with 2243 additions and 1069 deletions

View File

@@ -2,35 +2,83 @@ name: Apply Kuberentes Configs
on: [push, workflow_dispatch] on: [push, workflow_dispatch]
jobs: jobs:
update-repo: update-repo:
runs-on: [home-server:host] uses: ./.gitea/workflows/update-repo.yml
steps: runs-on: home-server
- name: checkout repo
working-directory: /home/gitea-runner/infrastructure
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
fi
update-infrastructure: update-infrastructure:
runs-on: [home-server:host] runs-on: home-server
needs: update-repo needs: update-repo-folder
steps:
- name: update home server containers
env: env:
KUBECONFIG: /home/gitea-runner/.kube/config KUBECONFIG: /home/gitea-runner/.kube/config
MY_GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }} defaults:
HOMEASSISTANT_TOKEN: ${{ secrets.HOMEASSISTANT_TOKEN }} run:
GRAFANA_PASSWORD: ${{ secrets.GRAFANA_PASSWORD }} working-directory: /home/gitea-runner/infrastructure
CLOUDFLARE_CONFIG: ${{ secrets.CLOUDFLARE_CONFIG }} steps:
COPILOT_TOKEN: ${{ secrets.COPILOT_TOKEN }} - name: update home server containers
working-directory: /home/gitea-runner/infrastructure/infrastructure
run: | run: |
# kubectl apply -f kubernetes/ingress
kubectl apply -f kubernetes/proxy-ingress kubectl apply -f kubernetes/proxy-ingress
kubectl annotate ingressclass nginx \ kubectl annotate ingressclass nginx \
ingressclass.kubernetes.io/is-default-class="true" --overwrite ingressclass.kubernetes.io/is-default-class="true" --overwrite
- name: audiobookshelf
run: |
kubectl apply -f kubernetes/audiobookshelf/
- name: home assistant
run: |
kubectl apply -f kubernetes/homeassistant/
- name: copilot
run: |
kubectl create secret generic copilot-secret \
-n copilot \
--from-literal=token=${{ secrets.COPILOT_SECRET }} \
--dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f kubernetes/copilot/
- name: jellyfin
run: |
kubectl apply -f kubernetes/jellyfin/
- name: minecraft
run: |
kubectl apply -f kubernetes/minecraft/
- name: homepage
run: |
kubectl apply -f kubernetes/homepage/
kubectl rollout restart deployment/homepage -n homepage
- name: gitea
env:
CLOUDFLARED_GITEA_TOKEN: ${{ secrets.CLOUDFLARED_GITEA_TOKEN }}
run: |
kubectl apply -f kubernetes/gitea/namespace.yml
kubectl create configmap gitea-landing-page \
-n gitea \
--from-file=home.tmpl=kubernetes/gitea/landingpage.html \
--from-file=custom-landing.css=kubernetes/gitea/landingpage.css \
--from-file=custom-landing.js=kubernetes/gitea/landingpage.js \
--dry-run=client -o yaml | kubectl apply -f -
for file in kubernetes/gitea/*.yml; do
cat "$file" | envsubst | kubectl apply -f -
done
kubectl rollout restart deployment/gitea-web -n gitea
notify-on-failure:
runs-on: home-server
needs: update-infrastructure
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Kubernetes Apply Failed"
message: |
Failed to apply kubernetes configs
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,kubernetes"

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch: workflow_dispatch:
jobs: jobs:
update-infrastructure: update-infrastructure:
runs-on: [self-hosted, home-server] runs-on: [home-server]
steps: steps:
- name: run syncoid - name: run syncoid
run: | run: |
@@ -24,23 +24,18 @@ jobs:
--no-privilege-elevation \ --no-privilege-elevation \
data-ssd/media \ data-ssd/media \
backup/media backup/media
# steps:
# - name: run syncoid
# run: |
# zpool status
# echo ""
# zfs list
# echo ""
# syncoid \
# --recursive \
# --no-privilege-elevation \
# --no-rollback \
# data-ssd/data \
# backup/data
# syncoid \ notify-on-failure:
# --recursive \ runs-on: home-server
# --no-privilege-elevation \ needs: update-infrastructure
# --no-rollback \ if: failure()
# data-ssd/media \ uses: ./.gitea/workflows/notify-ntfy.yml
# backup/media secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "ZFS Backup Failed"
message: |
Failed to backup ZFS datasets
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,backup"

View File

@@ -1,23 +0,0 @@
name: Beets
on:
# schedule:
# # Run 4 times a day: 6am, 12pm, 6pm, 12am UTC
# - cron: '0 6,12,18,0 * * *'
workflow_dispatch: # Allow manual trigger
jobs:
sync-beets:
runs-on: [home-server]
steps:
- name: Run Beets sync
working-directory: /home/gitea-runner/infrastructure/infrastructure/home-server/beets
run: |
git pull
docker compose pull -q
docker compose up -d
docker compose restart
sleep 2
docker exec -u 1000 beets bash -c 'beet -v import -i -q /managed/*' || true
# Clean up empty directories after import (but not /managed itself)
docker exec -u 1000 beets bash -c 'find /managed -mindepth 1 -type d -empty -delete' || true
echo "Beets sync completed"

View File

@@ -0,0 +1,31 @@
name: Cleanup Docker
on:
schedule:
- cron: '0 3 1,15 * *' # 1st and 15th of every month at 3am
workflow_dispatch:
jobs:
cleanup-docker:
runs-on: [home-server]
steps:
- name: Cleanup Docker resources
run: |
echo ""
echo "Removing unused images..."
docker image prune -a -f --filter "until=336h"
notify-on-failure:
runs-on: home-server
needs: cleanup-docker
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Docker Cleanup Failed"
message: |
Failed to cleanup Docker resources
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,docker"

View File

@@ -0,0 +1,29 @@
name: Cleanup NixOS Generations
on:
schedule:
- cron: '0 2 * * 0' # Every Sunday at 2am
workflow_dispatch:
jobs:
cleanup-generations:
runs-on: [home-server]
steps:
- name: Cleanup old NixOS generations
run: |
echo "Deleting generations older than 7 days..."
sudo nix-collect-garbage --delete-older-than 7d
notify-on-failure:
runs-on: home-server
needs: cleanup-generations
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "NixOS Cleanup Failed"
message: |
Failed to cleanup old NixOS generations
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,nixos"

View File

@@ -1,18 +1,35 @@
name: Libation name: Libation
on: on:
schedule: schedule:
# Run 4 times a day: 6am, 12pm, 6pm, 12am UTC
- cron: '0 6,12,18,0 * * *' - cron: '0 6,12,18,0 * * *'
workflow_dispatch: # Allow manual trigger workflow_dispatch:
jobs: jobs:
update-repo:
uses: ./.gitea/workflows/update-repo.yml
runs-on: home-server
sync-audiobooks: sync-audiobooks:
runs-on: [home-server] runs-on: [home-server]
steps: steps:
- name: Run Libation sync - name: Run Libation sync
working-directory: /home/gitea-runner/infrastructure/infrastructure/home-server/libation working-directory: /home/gitea-runner/infrastructure/home-server/libation
run: | run: |
echo "Starting Libation audiobook sync at $(date)" echo "Starting Libation audiobook sync at $(date)"
docker compose pull -q docker compose pull -q
docker compose run --rm libation docker compose run --rm libation
echo "Libation sync completed at $(date)" echo "Libation sync completed at $(date)"
notify-on-failure:
runs-on: home-server
needs: sync-audiobooks
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Libation Sync Failed"
message: |
Failed to sync audiobooks with Libation
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,audiobooks"

View File

@@ -0,0 +1,29 @@
name: deploy minecraft
on: [workflow_dispatch]
jobs:
minecraft:
runs-on: home-server
env:
KUBECONFIG: /home/gitea-runner/.kube/config
defaults:
run:
working-directory: /home/gitea-runner/infrastructure
steps:
- name: checkout repo
working-directory: /home/gitea-runner
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }}.git
fi
- name: deploy minecraft
env:
CF_API_KEY: ${{ secrets.CF_API_KEY }}
run: |
for file in kubernetes/minecraft/*.yml; do
cat "$file" | envsubst | kubectl apply -f -
done

View File

@@ -0,0 +1,51 @@
name: Notify NTFY
on:
workflow_dispatch:
inputs:
title:
required: true
type: string
message:
required: true
type: string
priority:
required: false
type: string
default: "default"
tags:
required: false
type: string
default: "warning"
action_url:
required: false
type: string
default: ""
jobs:
send-notification:
runs-on: [home-server]
env:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
steps:
- name: Send ntfy notification
working-directory: /home/gitea-runner
run: |
set -e
if [ -n "${{ inputs.action_url }}" ]; then
cat <<EOF | curl -f -H "Title: ${{ inputs.title }}" \
-H "Priority: ${{ inputs.priority }}" \
-H "Tags: ${{ inputs.tags }}" \
-H "Actions: view, View Logs, ${{ inputs.action_url }}" \
--data-binary "@-" \
"https://ntfy.sh/$NTFY_CHANNEL"
${{ inputs.message }}
EOF
else
cat <<EOF | curl -f -H "Title: ${{ inputs.title }}" \
-H "Priority: ${{ inputs.priority }}" \
-H "Tags: ${{ inputs.tags }}" \
--data-binary "@-" \
"https://ntfy.sh/$NTFY_CHANNEL"
${{ inputs.message }}
EOF
fi

View File

@@ -2,28 +2,21 @@ name: Manage Jellyfin Playlists
on: on:
workflow_dispatch: workflow_dispatch:
schedule: schedule:
- cron: '0 * * * *' - cron: "0 * * * *"
jobs: jobs:
update-repo:
uses: ./.gitea/workflows/update-repo.yml
runs-on: home-server
run-python: run-python:
runs-on: [self-hosted, home-server] runs-on: home-server
steps: steps:
- name: checkout repo
working-directory: /home/gitea-runner/infrastructure
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
fi
- name: Run Python script - name: Run Python script
env: env:
JELLYFIN_USER: ${{ secrets.JELLYFIN_USER }} JELLYFIN_USER: ${{ secrets.JELLYFIN_USER }}
JELLYFIN_PASSWORD: ${{ secrets.JELLYFIN_PASSWORD }} JELLYFIN_PASSWORD: ${{ secrets.JELLYFIN_PASSWORD }}
working-directory: /home/gitea-runner/infrastructure/infrastructure working-directory: /home/gitea-runner/infrastructure
run: | run: |
echo "$JELLYFIN_USER $JELLYFIN_PASSWORD" > /home/gitea-runner/jellyfin_credentials.txt
docker build -t jellyfin_management -f jellyfin/Dockerfile . docker build -t jellyfin_management -f jellyfin/Dockerfile .
docker run --rm \ docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \ -e JELLYFIN_USER=$JELLYFIN_USER \
@@ -35,3 +28,18 @@ jobs:
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \ -e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \ jellyfin_management \
-m jellyfin.update_unindexed -m jellyfin.update_unindexed
notify-on-failure:
runs-on: home-server
needs: run-python
if: failure()
uses: ./.gitea/workflows/notify-ntfy.yml
secrets:
NTFY_CHANNEL: ${{ secrets.NTFY_CHANNEL }}
with:
title: "Jellyfin Playlist Update Failed"
message: |
Failed to update Jellyfin playlists
action_url: "https://git.alexmickelson.guru/${{ gitea.repository }}/actions/runs/${{ gitea.run_number }}"
priority: "high"
tags: "rotating_light,jellyfin"

View File

@@ -0,0 +1,18 @@
name: Update Repository
on:
workflow_call:
jobs:
update-repo-folder:
runs-on: [home-server]
steps:
- name: checkout repo
working-directory: /home/gitea-runner
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITEA_TOKEN }}@git.alexmickelson.guru/${{ gitea.repository }}.git
fi

1
gitea/.gitignore vendored
View File

@@ -1 +0,0 @@
data/

View File

@@ -1,98 +0,0 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: host
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@@ -1,47 +0,0 @@
services:
server:
image: gitea/gitea:1.22.2
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
restart: always
volumes:
- ./data/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 0.0.0.0:3000:3000
- 0.0.0.0:222:22
depends_on:
- db
db:
image: postgres:14
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
volumes:
- ./data/postgres:/var/lib/postgresql/data
runner:
image: gitea/act_runner:nightly
environment:
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: http://0.0.0.0:3000/
GITEA_RUNNER_REGISTRATION_TOKEN: SMANpMfJk5G4fTFmuEZ9zleTBcdrj4M3k3eDCW6e
GITEA_RUNNER_NAME: test-runner
GITEA_RUNNER_LABELS: label1
network_mode: host
volumes:
- ./config.yaml:/config.yaml
- ./data/runner:/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- server

View File

@@ -1,23 +1,23 @@
services: services:
jellyfin: # jellyfin:
image: jellyfin/jellyfin # image: jellyfin/jellyfin
container_name: jellyfin # container_name: jellyfin
user: 1000:1000 # user: 1000:1000
network_mode: "host" # network_mode: "host"
volumes: # volumes:
- /data/jellyfin/config:/config # - /data/jellyfin/config:/config
- /data/jellyfin/cache:/cache # - /data/jellyfin/cache:/cache
- /data/media/music/tagged:/music # - /data/media/music/tagged:/music
- /data/media/movies:/movies # - /data/media/movies:/movies
- /data/media/tvshows:/tvshows # - /data/media/tvshows:/tvshows
- /data/nextcloud/html/data/alex/files/Documents/home-video:/home-videos:ro # - /data/nextcloud/html/data/alex/files/Documents/home-video:/home-videos:ro
restart: "unless-stopped" # restart: "unless-stopped"
group_add: # group_add:
- "303" # getent group render | cut -d: -f3 # - "303" # getent group render | cut -d: -f3
devices: # devices:
- /dev/dri/renderD128:/dev/dri/renderD128 # - /dev/dri/renderD128:/dev/dri/renderD128
environment: # environment:
- JELLYFIN_PublishedServerUrl=https://jellyfin.alexmickelson.guru # - JELLYFIN_PublishedServerUrl=https://jellyfin.alexmickelson.guru
nextcloud: nextcloud:
build: build:
@@ -102,43 +102,6 @@ services:
restart: always restart: always
network_mode: host network_mode: host
zwave-js-ui:
container_name: zwave-js-ui
image: zwavejs/zwave-js-ui:latest
restart: always
tty: true
stop_signal: SIGINT
environment:
- SESSION_SECRET=iqpwoeinf9384bw3p48gbwer
- TZ=America/Denver
devices:
# Do not use /dev/ttyUSBX serial devices, as those mappings can change over time.
# Instead, use the /dev/serial/by-id/X serial device for your Z-Wave stick.
# - '/dev/serial/by-id/insert_stick_reference_here:/dev/zwave'
- /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0:/dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0
- /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0:/dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0
volumes:
- /data/zwave:/usr/src/app/store
ports:
- '3050:8091'
- '3051:3051'
music-assistant-server:
image: ghcr.io/music-assistant/server:2
container_name: music-assistant-server
restart: unless-stopped
network_mode: host
volumes:
- /data/music-assistant-server/data:/data/
# cap_add:
# - SYS_ADMIN
# - DAC_READ_SEARCH
# security_opt:
# - apparmor:unconfined
environment:
- LOG_LEVEL=info
prometheus: prometheus:
image: public.ecr.aws/bitnami/prometheus:2 image: public.ecr.aws/bitnami/prometheus:2
container_name: prometheus container_name: prometheus
@@ -173,70 +136,6 @@ services:
ports: ports:
- 3000:3000 - 3000:3000
# acpupsd_exporter:
# image: sfudeus/apcupsd_exporter:master_1.19
# container_name: apcupsd_exporter
# restart: always
# extra_hosts:
# - host.docker.internal:host-gateway
# command: -apcupsd.addr host.docker.internal:3551
# ports:
# - 0.0.0.0:9162:9162
# docker run -it --rm -p 9162:9162 --net=host sfudeus/apcupsd_exporter:master_1.19
# reverse-proxy:
# image: ghcr.io/linuxserver/swag
# container_name: reverse-proxy
# restart: unless-stopped
# cap_add:
# - NET_ADMIN
# environment:
# - PUID=1000
# - PGID=1000
# - TZ=America/Denver
# - URL=alexmickelson.guru
# - SUBDOMAINS=wildcard
# - VALIDATION=dns
# - DNSPLUGIN=cloudflare
# volumes:
# - ./nginx.conf:/config/nginx/site-confs/default.conf
# - /data/swag:/config
# - /data/cloudflare/cloudflare.ini:/config/dns-conf/cloudflare.ini
# ports:
# - 0.0.0.0:80:80
# - 0.0.0.0:443:443
# # - 0.0.0.0:7080:80
# # - 0.0.0.0:7443:443
# extra_hosts:
# - host.docker.internal:host-gateway
# networks:
# - proxy
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:latest
restart: unless-stopped
ports:
- 13378:80
volumes:
- /data/media/audiobooks:/audiobooks
- /data/media/audiobooks-libation:/audiobooks-libation
- /data/audiobookshelf/config:/config
- /data/audiobookshelf/metadata:/metadata
networks:
- proxy
copilot-api:
image: node:latest
working_dir: /app
command: sh -c "npm cache clean --force && npx copilot-api@latest start --github-token $COPILOT_TOKEN --port 4444"
environment:
- COPILOT_TOKEN=${COPILOT_TOKEN}
ports:
- "4444:4444"
restart: unless-stopped
networks:
- proxy
esphome: esphome:
container_name: esphome container_name: esphome

View File

@@ -1,5 +1,3 @@
version: "3.8"
services: services:
libation: libation:
image: rmcrackan/libation:latest image: rmcrackan/libation:latest

View File

@@ -6,24 +6,24 @@ server {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
server { # server {
listen 443 ssl; # listen 443 ssl;
listen [::]:443 ssl; # listen [::]:443 ssl;
server_name ha.alexmickelson.guru; # server_name ha.alexmickelson.guru;
include /config/nginx/ssl.conf; # include /config/nginx/ssl.conf;
include /config/nginx/proxy.conf; # include /config/nginx/proxy.conf;
include /config/nginx/resolver.conf; # include /config/nginx/resolver.conf;
location / { # location / {
proxy_pass http://host.docker.internal:8123; # proxy_pass http://host.docker.internal:8123;
proxy_set_header Host $host; # proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; # proxy_set_header X-Real-IP $remote_addr;
proxy_http_version 1.1; # proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade; # proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade; # proxy_set_header Connection $connection_upgrade;
} # }
} # }
server { server {
listen 443 ssl; listen 443 ssl;
@@ -94,23 +94,23 @@ server {
} }
} }
server { # server {
listen 443 ssl; # listen 443 ssl;
listen [::]:443 ssl; # listen [::]:443 ssl;
server_name audiobook.alexmickelson.guru; # server_name audiobook.alexmickelson.guru;
location / { # location / {
proxy_pass http://audiobookshelf:80; # proxy_pass http://audiobookshelf:80;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme; # proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host; # proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade; # proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; # proxy_set_header Connection "upgrade";
proxy_http_version 1.1; # proxy_http_version 1.1;
} # }
} # }
# server { # server {
# listen 443 ssl; # listen 443 ssl;
@@ -176,24 +176,24 @@ server {
proxy_pass http://immich_server:2283; proxy_pass http://immich_server:2283;
} }
} }
server { # server {
listen 443 ssl; # listen 443 ssl;
listen [::]:443 ssl; # listen [::]:443 ssl;
server_name sound.alexmickelson.guru; # server_name sound.alexmickelson.guru;
location / { # location / {
proxy_pass http://host.docker.internal:8095; # proxy_pass http://host.docker.internal:8095;
proxy_set_header Host $host; # proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; # proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme; # proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme; # proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host; # proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Host $host; # proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade; # proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; # proxy_set_header Connection "upgrade";
} # }
} # }
server { server {
listen 443 ssl; listen 443 ssl;

View File

@@ -0,0 +1,95 @@
apiVersion: v1
kind: Namespace
metadata:
name: audiobookshelf
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: audiobookshelf
namespace: audiobookshelf
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: audiobookshelf
template:
metadata:
labels:
app: audiobookshelf
spec:
containers:
- name: audiobookshelf
image: ghcr.io/advplyr/audiobookshelf:latest
imagePullPolicy: Always
ports:
- containerPort: 80
hostPort: 13378
volumeMounts:
- name: audiobooks
mountPath: /audiobooks
- name: audiobooks-libation
mountPath: /audiobooks-libation
- name: config
mountPath: /config
- name: metadata
mountPath: /metadata
volumes:
- name: audiobooks
hostPath:
path: /data/media/audiobooks
type: DirectoryOrCreate
- name: audiobooks-libation
hostPath:
path: /data/media/audiobooks-libation
type: DirectoryOrCreate
- name: config
hostPath:
path: /data/audiobookshelf/config
type: DirectoryOrCreate
- name: metadata
hostPath:
path: /data/audiobookshelf/metadata
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
namespace: audiobookshelf
spec:
selector:
app: audiobookshelf
ports:
- name: http
protocol: TCP
port: 13378
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: audiobookshelf-ingress
namespace: audiobookshelf
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- audiobook.alexmickelson.guru
secretName: audiobookshelf-tls-cert
rules:
- host: audiobook.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: audiobookshelf
port:
number: 13378
---

View File

@@ -0,0 +1,77 @@
apiVersion: v1
kind: Namespace
metadata:
name: copilot
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: copilot-api
namespace: copilot
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: copilot-api
template:
metadata:
labels:
app: copilot-api
spec:
containers:
- name: copilot-api
image: node:latest
imagePullPolicy: Always
workingDir: /app
command: ["sh", "-c"]
args: ["npm cache clean --force && npx copilot-api@latest start --github-token $COPILOT_TOKEN --port 4444"]
env:
- name: COPILOT_TOKEN
valueFrom:
secretKeyRef:
name: copilot-secret
key: token
ports:
- containerPort: 4444
---
apiVersion: v1
kind: Service
metadata:
name: copilot-api
namespace: copilot
spec:
selector:
app: copilot-api
ports:
- name: http
protocol: TCP
port: 4444
targetPort: 4444
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: copilot-api-ingress
namespace: copilot
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- copilot.alexmickelson.guru
secretName: copilot-api-tls-cert
rules:
- host: copilot.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: copilot-api
port:
number: 4444
---

View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: Secret
metadata:
name: cloudflared-gitea-token
namespace: gitea
type: Opaque
stringData:
token: $CLOUDFLARED_GITEA_TOKEN
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cloudflared-gitea
namespace: gitea
spec:
replicas: 1
selector:
matchLabels:
app: cloudflared-gitea
template:
metadata:
labels:
app: cloudflared-gitea
spec:
containers:
- name: cloudflared
image: cloudflare/cloudflared:latest
imagePullPolicy: Always
args:
- tunnel
- run
env:
- name: TUNNEL_TOKEN
valueFrom:
secretKeyRef:
name: cloudflared-gitea-token
key: token

View File

@@ -0,0 +1,358 @@
:root {
--color-bg-page: #0d1117;
--color-bg-card: #161b22;
--color-bg-card-hover: #1c2128;
--color-border: #21262d;
--color-border-muted: #30363d;
--color-text: #e6edf3;
--color-text-muted: #8b949e;
--color-text-subtle: #6e7681;
--color-text-faint: #484f58;
--color-accent: #58a6ff;
--color-accent-subtle: #58a6ff11;
--color-accent-shadow: #58a6ff1a;
--color-success: #238636;
--color-success-hover: #2ea043;
--color-white: #fff;
/* Spacing */
--space-xl: 80px;
--space-lg: 24px;
--space-md: 20px;
--space-sm: 16px;
--space-xs: 12px;
--space-2xs: 10px;
/* Border radius */
--radius-lg: 12px;
--radius-md: 8px;
--radius-sm: 4px;
/* Font sizes */
--text-hero: 3rem;
--text-heading: 1.5rem;
--text-btn: 0.95rem;
--text-base: 0.875rem;
--text-sm: 0.8rem;
--text-xs: 0.75rem;
}
/* override gitea defaults */
.page-content > :first-child:not(.secondary-nav) {
margin-top: 0 !important;
margin: 0 !important;
}
#alex-landing {
min-height: 100vh;
background: var(--color-bg-page);
color: var(--color-text);
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
}
/* Hero */
.hero {
display: flex;
justify-content: center;
align-items: center;
padding-top: var(--space-xl);
padding-right: var(--space-lg);
padding-bottom: 60px;
padding-left: var(--space-lg);
text-align: center;
}
.hero-inner {
max-width: 640px;
}
.hero h1 {
font-size: var(--text-hero);
font-weight: 800;
margin: 0 0 var(--space-xs);
background: linear-gradient(135deg, var(--color-text) 0%, var(--color-accent) 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
/* Projects section */
.projects-section {
max-width: 1100px;
margin: 0 auto;
padding: 0 var(--space-lg) var(--space-xl);
}
.section-header {
margin-bottom: var(--space-lg);
display: flex;
align-items: baseline;
gap: var(--space-xs);
flex-wrap: wrap;
}
.section-header h2 {
font-size: var(--text-heading);
font-weight: 700;
margin: 0;
color: var(--color-text);
}
/* Grid */
.repo-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(320px, 1fr));
gap: var(--space-sm);
}
/* Skeleton loaders */
.skeleton-card {
height: 160px;
border-radius: var(--radius-lg);
background: linear-gradient(90deg, var(--color-bg-card) 25%, var(--color-border) 50%, var(--color-bg-card) 75%);
background-size: 200% 100%;
animation: shimmer 1.4s infinite;
}
@keyframes shimmer {
0% { background-position: 200% 0; }
100% { background-position: -200% 0; }
}
/* Repo cards */
.repo-card {
background: var(--color-bg-card);
border: 1px solid var(--color-border);
border-radius: var(--radius-lg);
padding: var(--space-md);
text-decoration: none;
color: inherit;
display: flex;
flex-direction: column;
gap: var(--space-2xs);
transition: border-color 0.2s, transform 0.2s, box-shadow 0.2s;
cursor: pointer;
}
.repo-card:hover {
border-color: var(--color-accent);
transform: translateY(-2px);
box-shadow: 0 8px 24px var(--color-accent-shadow);
}
.repo-card-header {
display: flex;
align-items: center;
gap: var(--space-2xs);
}
.repo-icon {
font-size: 1.1rem;
flex-shrink: 0;
}
.repo-name {
font-size: 1rem;
font-weight: 600;
color: var(--color-accent);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.repo-private {
font-size: 0.7rem;
background: var(--color-border);
border: 1px solid var(--color-border-muted);
border-radius: var(--radius-sm);
padding: 1px 6px;
color: var(--color-text-muted);
flex-shrink: 0;
}
.repo-desc {
font-size: var(--text-base);
color: var(--color-text-muted);
line-height: 1.5;
flex: 1;
overflow: hidden;
display: -webkit-box;
-webkit-line-clamp: 2;
-webkit-box-orient: vertical;
}
.repo-commit {
font-size: var(--text-sm);
color: var(--color-text-subtle);
border-top: 1px solid var(--color-border);
padding-top: var(--space-2xs);
display: flex;
align-items: center;
gap: 6px;
overflow: hidden;
}
.commit-dot {
width: 6px;
height: 6px;
border-radius: 50%;
background: var(--color-success);
flex-shrink: 0;
}
.commit-msg {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
flex: 1;
}
.commit-time {
color: var(--color-text-faint);
flex-shrink: 0;
}
.repo-meta {
display: flex;
gap: 14px;
font-size: var(--text-sm);
color: var(--color-text-subtle);
}
.repo-meta span { display: flex; align-items: center; gap: 4px; }
/* Error state */
.error-msg {
grid-column: 1 / -1;
text-align: center;
padding: 40px;
color: var(--color-text-muted);
font-size: var(--text-btn);
}
/* Activity section */
.activity-section {
max-width: 1100px;
margin: 0 auto;
padding: 0 var(--space-lg) var(--space-xl);
}
.view-all-link {
font-size: 0.85rem;
color: var(--color-accent);
text-decoration: none;
margin-left: auto;
}
.view-all-link:hover { text-decoration: underline; }
.activity-feed {
display: flex;
flex-direction: column;
gap: 0;
border: 1px solid var(--color-border);
border-radius: var(--radius-lg);
overflow: hidden;
}
.skeleton-activity {
height: 52px;
background: linear-gradient(90deg, var(--color-bg-card) 25%, var(--color-border) 50%, var(--color-bg-card) 75%);
background-size: 200% 100%;
animation: shimmer 1.4s infinite;
border-top: 1px solid var(--color-bg-page);
}
.skeleton-activity:first-child { border-top: none; }
.activity-item {
display: flex;
align-items: flex-start;
gap: var(--space-xs);
padding: var(--space-sm) var(--space-sm);
background: var(--color-bg-card);
border-top: 1px solid var(--color-border);
font-size: var(--text-base);
transition: background 0.15s;
}
.activity-item:first-child { border-top: none; }
.activity-item:hover { background: var(--color-bg-card-hover); }
.activity-op-icon {
flex-shrink: 0;
width: 28px;
height: 28px;
border-radius: 50%;
background: var(--color-border);
display: flex;
align-items: center;
justify-content: center;
font-size: var(--text-sm);
margin-top: 2px;
}
.activity-body { flex: 1; min-width: 0; }
.activity-headline-row {
display: flex;
align-items: baseline;
gap: var(--space-xs);
min-width: 0;
}
.activity-headline {
color: var(--color-text);
line-height: 1.5;
flex: 1;
min-width: 0;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.activity-headline a {
color: var(--color-accent);
text-decoration: none;
font-weight: 500;
}
.activity-headline a:hover { text-decoration: underline; }
.activity-commits {
margin-top: 5px;
display: flex;
flex-direction: column;
gap: 3px;
padding-left: 2px;
}
.activity-commit-line {
display: flex;
align-items: center;
gap: 8px;
font-size: var(--text-sm);
color: var(--color-text-muted);
overflow: hidden;
}
.activity-commit-sha {
font-family: monospace;
font-size: var(--text-xs);
color: var(--color-text-subtle);
background: var(--color-border);
border-radius: var(--radius-sm);
padding: 1px 5px;
flex-shrink: 0;
text-decoration: none;
}
.activity-commit-sha:hover { color: var(--color-accent); text-decoration: none; }
.activity-commit-text {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
color: var(--color-text-muted);
}
.activity-time {
flex-shrink: 0;
font-size: 0.75rem;
color: var(--color-text-faint);
white-space: nowrap;
padding-top: 3px;
}
/* ── Heatmap ─────────────────────────────────────────────── */
.heatmap-section {
max-width: 1100px;
margin: 0 auto;
padding: 0 var(--space-lg) var(--space-xl);
}
.activity-heatmap {
background: var(--color-bg-card);
border: 1px solid var(--color-border);
border-radius: var(--radius-lg);
padding: var(--space-md) var(--space-lg);
overflow-x: auto;
}
.heatmap-svg {
display: block;
}
.heatmap-month {
font-size: 9px;
fill: var(--color-text-muted, #8b949e);
font-family: inherit;
}
.heatmap-day {
font-size: 9px;
fill: var(--color-text-muted, #8b949e);
font-family: inherit;
}

View File

@@ -0,0 +1,51 @@
{{template "base/head" .}}
<div class="page-content home" id="alex-landing">
<section class="hero">
<div class="hero-inner">
<h1>Alex Mickelson</h1>
</div>
</section>
<section class="projects-section">
<div class="section-header">
<h2>Recent Projects</h2>
</div>
<div id="repo-grid" class="repo-grid">
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
<div class="skeleton-card"></div>
</div>
</section>
<section class="heatmap-section">
<div class="section-header">
<h2>Activity</h2>
</div>
<div id="activity-heatmap" class="activity-heatmap"></div>
</section>
<section class="activity-section">
<div class="section-header">
<h2>Recent Activity</h2>
<a href="/alex" class="view-all-link">View full profile →</a>
</div>
<div id="activity-feed" class="activity-feed">
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
<div class="skeleton-activity"></div>
</div>
</section>
</div>
<script>
window.GITEA_APP_URL = "{{AppUrl}}";
window.GITEA_SUB_URL = "{{AppSubUrl}}";
</script>
<!-- update version when changed to reset cloudflare cache -->
<script src="{{AppSubUrl}}/assets/js/custom-landing.js?v=9"></script>
<link href="{{AppSubUrl}}/assets/css/custom-landing.css?v=9" rel="stylesheet" />
{{template "base/footer" .}}

View File

@@ -0,0 +1,396 @@
const baseUrl = window.GITEA_SUB_URL || "";
const httpService = {
async fetchRss() {
const resp = await fetch(`${baseUrl}/alex.rss`);
if (!resp.ok) throw new Error(`HTTP ${resp.status}`);
const text = await resp.text();
return new DOMParser().parseFromString(text, "application/xml");
},
async fetchHeatmap(username = "alex") {
const resp = await fetch(`${baseUrl}/api/v1/users/${username}/heatmap`);
if (!resp.ok) throw new Error(`HTTP ${resp.status}`);
return resp.json(); // [{timestamp: unix_seconds, contributions: number}]
},
};
const dataDomain = {
timeAgo(dateStr) {
const diff = (Date.now() - new Date(dateStr)) / 1000;
if (diff < 60) return "just now";
if (diff < 3600) return Math.floor(diff / 60) + "m ago";
if (diff < 86400) return Math.floor(diff / 3600) + "h ago";
if (diff < 2592000) return Math.floor(diff / 86400) + "d ago";
if (diff < 31536000) return Math.floor(diff / 2592000) + "mo ago";
return Math.floor(diff / 31536000) + "y ago";
},
esc(str) {
return (str || "")
.replace(/&/g, "&amp;")
.replace(/</g, "&lt;")
.replace(/>/g, "&gt;")
.replace(/"/g, "&quot;");
},
safeTitleHtml(rawTitleText) {
const doc = new DOMParser().parseFromString(rawTitleText, "text/html");
doc.body
.querySelectorAll("*:not(a)")
.forEach((el) => el.replaceWith(el.textContent));
return doc.body.innerHTML;
},
titlePlainText(rawTitleText) {
const doc = new DOMParser().parseFromString(rawTitleText, "text/html");
return doc.body.textContent || rawTitleText;
},
activityIcon(titleText) {
const t = titleText.toLowerCase();
if (t.includes("push") || t.includes("commit")) return "📤";
if (t.includes("creat") && t.includes("repo")) return "📁";
if (t.includes("fork")) return "🍴";
if (t.includes("open") && t.includes("issue")) return "🔴";
if (t.includes("clos") && t.includes("issue")) return "🟢";
if (t.includes("pull request") || t.includes("merge")) return "🔀";
if (t.includes("tag")) return "🏷️";
if (t.includes("branch")) return "🌿";
if (t.includes("comment")) return "💬";
if (t.includes("release")) return "🚀";
return "⚡";
},
parseCommits(descriptionText) {
const doc = new DOMParser().parseFromString(descriptionText, "text/html");
return Array.from(doc.querySelectorAll("a")).map((anchor) => {
const sha = anchor.textContent.trim().slice(0, 7);
const href = anchor.getAttribute("href") || "#";
let msg = "";
let node = anchor.nextSibling;
while (node) {
const t = (node.textContent || "").trim();
if (t) {
msg = t;
break;
}
node = node.nextSibling;
}
return { sha, href, msg };
});
},
parseRepos(xmlDoc) {
const items = Array.from(xmlDoc.querySelectorAll("channel > item"));
const seen = new Map();
for (const item of items) {
const titleHtml = item.querySelector("title")?.textContent || "";
const titleDoc = new DOMParser().parseFromString(titleHtml, "text/html");
const anchors = titleDoc.querySelectorAll("a");
if (anchors.length < 2) continue;
const repoAnchor = anchors[anchors.length - 1];
const repoName = repoAnchor.textContent.trim();
if (!repoName || seen.has(repoName)) continue;
seen.set(repoName, {
repoName,
repoUrl: repoAnchor.getAttribute("href") || "#",
shortName: repoName.includes("/")
? repoName.split("/").pop()
: repoName,
pubDate: item.querySelector("pubDate")?.textContent || "",
firstCommit:
dataDomain.parseCommits(
item.querySelector("description")?.textContent || "",
)[0] || null,
});
}
return Array.from(seen.values());
},
parseAllActivityDates(xmlDoc) {
const counts = new Map();
for (const item of Array.from(xmlDoc.querySelectorAll("channel > item"))) {
const pubDate = item.querySelector("pubDate")?.textContent || "";
if (!pubDate) continue;
const d = new Date(pubDate);
if (isNaN(d)) continue;
const key = d.toISOString().slice(0, 10);
counts.set(key, (counts.get(key) || 0) + 1);
}
return counts;
},
parseActivity(xmlDoc, limit = 20) {
return Array.from(xmlDoc.querySelectorAll("channel > item"))
.slice(0, limit)
.map((item) => {
const rawTitle = item.querySelector("title")?.textContent || "";
const titleText = dataDomain.titlePlainText(rawTitle);
return {
titleHtmlSafe: dataDomain.safeTitleHtml(rawTitle),
titleText,
link: item.querySelector("link")?.textContent || "#",
pubDate: item.querySelector("pubDate")?.textContent || "",
icon: dataDomain.activityIcon(titleText),
commits: dataDomain.parseCommits(
item.querySelector("description")?.textContent || "",
).slice(0, 3),
};
});
},
};
const uiRendering = {
async renderRepos(xmlDoc) {
const grid = document.getElementById("repo-grid");
if (!grid) return;
const repos = dataDomain.parseRepos(xmlDoc);
if (repos.length === 0) {
grid.innerHTML = `<div class="error-msg">No repositories found in feed.</div>`;
return;
}
grid.innerHTML = "";
for (const {
shortName,
repoName,
repoUrl,
pubDate,
firstCommit,
} of repos) {
const when = dataDomain.timeAgo(pubDate);
const commitMsg = firstCommit?.msg || firstCommit?.sha || "";
const card = document.createElement("a");
card.className = "repo-card";
card.href = dataDomain.esc(repoUrl);
card.innerHTML = `
<div class="repo-card-header">
<span class="repo-icon">📦</span>
<span class="repo-name">${dataDomain.esc(shortName)}</span>
</div>
<div class="repo-desc">${dataDomain.esc(repoName)}</div>
<div class="repo-commit">
<span class="commit-dot"></span>
<span class="commit-msg">${dataDomain.esc(commitMsg)}</span>
<span class="commit-time">${dataDomain.esc(when)}</span>
</div>
`.trim();
grid.appendChild(card);
}
},
async renderActivity(xmlDoc) {
const feed = document.getElementById("activity-feed");
if (!feed) return;
const items = dataDomain.parseActivity(xmlDoc);
if (items.length === 0) {
feed.innerHTML = `<div class="error-msg">No public activity yet.</div>`;
return;
}
feed.innerHTML = "";
for (const { titleHtmlSafe, icon, pubDate, commits } of items) {
const when = dataDomain.timeAgo(pubDate);
const commitsHtml =
commits.length === 0
? ""
: `<div class="activity-commits">` +
commits
.map(
({ sha, href, msg }) => `
<div class="activity-commit-line">
<a class="activity-commit-sha" href="${dataDomain.esc(href)}">${dataDomain.esc(sha)}</a>
<span class="activity-commit-text">${dataDomain.esc(msg)}</span>
</div>`,
)
.join("") +
`</div>`;
const el = document.createElement("div");
el.className = "activity-item";
el.innerHTML = `
<div class="activity-op-icon">${icon}</div>
<div class="activity-body">
<div class="activity-headline-row">
<div class="activity-headline">${titleHtmlSafe}</div>
<span class="activity-time">${when}</span>
</div>
${commitsHtml}
</div>
`;
feed.appendChild(el);
}
},
async activityMapRender() {
const container = document.getElementById("activity-heatmap");
if (!container) return;
let heatmapData;
try {
heatmapData = await httpService.fetchHeatmap();
} catch (e) {
container.innerHTML = `<div class="error-msg">Could not load heatmap (${e.message})</div>`;
return;
}
// Build counts map from API data
const counts = new Map();
for (const { timestamp, contributions } of heatmapData) {
const d = new Date(timestamp * 1000);
const key = d.toISOString().slice(0, 10);
counts.set(key, (counts.get(key) || 0) + (contributions || 1));
}
const today = new Date();
today.setHours(0, 0, 0, 0);
// Align start to Sunday 52 weeks ago
const startDate = new Date(today);
startDate.setDate(startDate.getDate() - 52 * 7);
startDate.setDate(startDate.getDate() - startDate.getDay());
const cellSize = 11;
const gap = 2;
const step = cellSize + gap;
const cols = 53;
const rows = 7;
const padLeft = 28;
const padTop = 20;
const svgW = padLeft + cols * step;
const svgH = padTop + rows * step;
const LEVELS = ["#2d333b", "#0e4429", "#006d32", "#26a641", "#39d353"];
const countToLevel = (n) =>
n === 0 ? 0 : n === 1 ? 1 : n <= 3 ? 2 : n <= 6 ? 3 : 4;
// Collect month labels (one per column where the month changes)
const monthLabels = new Map();
let lastMonth = -1;
for (let col = 0; col < cols; col++) {
const d = new Date(startDate);
d.setDate(d.getDate() + col * 7);
if (d.getMonth() !== lastMonth) {
lastMonth = d.getMonth();
monthLabels.set(col, d.toLocaleString("default", { month: "short" }));
}
}
const ns = "http://www.w3.org/2000/svg";
const svg = document.createElementNS(ns, "svg");
svg.setAttribute("width", svgW);
svg.setAttribute("height", svgH);
svg.setAttribute("class", "heatmap-svg");
svg.setAttribute("aria-label", "Activity heatmap");
// Month labels
for (const [col, name] of monthLabels) {
const t = document.createElementNS(ns, "text");
t.setAttribute("x", padLeft + col * step);
t.setAttribute("y", 12);
t.setAttribute("class", "heatmap-month");
t.textContent = name;
svg.appendChild(t);
}
// Day-of-week labels (Sun / Tue / Thu / Sat)
["Sun", "", "Tue", "", "Thu", "", "Sat"].forEach((label, i) => {
if (!label) return;
const t = document.createElementNS(ns, "text");
t.setAttribute("x", 0);
t.setAttribute("y", padTop + i * step + cellSize - 2);
t.setAttribute("class", "heatmap-day");
t.textContent = label;
svg.appendChild(t);
});
// Day cells
for (let col = 0; col < cols; col++) {
for (let row = 0; row < rows; row++) {
const d = new Date(startDate);
d.setDate(d.getDate() + col * 7 + row);
if (d > today) continue;
const key = d.toISOString().slice(0, 10);
const count = counts.get(key) || 0;
const rect = document.createElementNS(ns, "rect");
rect.setAttribute("x", padLeft + col * step);
rect.setAttribute("y", padTop + row * step);
rect.setAttribute("width", cellSize);
rect.setAttribute("height", cellSize);
rect.setAttribute("rx", 2);
rect.setAttribute("fill", LEVELS[countToLevel(count)]);
rect.setAttribute("data-date", key);
rect.setAttribute("data-count", count);
const title = document.createElementNS(ns, "title");
title.textContent = count > 0
? `${count} activit${count === 1 ? "y" : "ies"} on ${key}`
: `No activity on ${key}`;
rect.appendChild(title);
svg.appendChild(rect);
}
}
// Legend
const legendY = svgH + 6;
const legendG = document.createElementNS(ns, "g");
const legendLabel = document.createElementNS(ns, "text");
legendLabel.setAttribute("x", padLeft);
legendLabel.setAttribute("y", legendY + cellSize - 2);
legendLabel.setAttribute("class", "heatmap-day");
legendLabel.textContent = "Less";
legendG.appendChild(legendLabel);
LEVELS.forEach((color, i) => {
const r = document.createElementNS(ns, "rect");
r.setAttribute("x", padLeft + 32 + i * step);
r.setAttribute("y", legendY);
r.setAttribute("width", cellSize);
r.setAttribute("height", cellSize);
r.setAttribute("rx", 2);
r.setAttribute("fill", color);
legendG.appendChild(r);
});
const moreLabel = document.createElementNS(ns, "text");
moreLabel.setAttribute("x", padLeft + 32 + LEVELS.length * step + 4);
moreLabel.setAttribute("y", legendY + cellSize - 2);
moreLabel.setAttribute("class", "heatmap-day");
moreLabel.textContent = "More";
legendG.appendChild(moreLabel);
svg.setAttribute("height", svgH + cellSize + 12);
svg.appendChild(legendG);
container.innerHTML = "";
container.appendChild(svg);
},
async render() {
const baseUrl = httpService.baseUrl;
try {
const xmlDoc = await httpService.fetchRss();
await Promise.all([
uiRendering.renderRepos(xmlDoc),
uiRendering.renderActivity(xmlDoc),
uiRendering.activityMapRender(),
]);
} catch (e) {
console.error("Gitea landing: RSS fetch failed", e);
const grid = document.getElementById("repo-grid");
const feed = document.getElementById("activity-feed");
if (grid)
grid.innerHTML = `<div class="error-msg">Could not load feed (${e.message}). <a href="${baseUrl}/explore/repos" style="color:#58a6ff">Browse manually →</a></div>`;
if (feed)
feed.innerHTML = `<div class="error-msg">Could not load activity (${e.message})</div>`;
return;
}
},
};
document.addEventListener("DOMContentLoaded", uiRendering.render);

View File

@@ -34,18 +34,33 @@ spec:
value: "gitea" value: "gitea"
- name: GITEA__database__PASSWD - name: GITEA__database__PASSWD
value: wauiofnasufnweaiufbsdklfjb23456 value: wauiofnasufnweaiufbsdklfjb23456
- name: GITEA__server__ROOT_URL - name: GITEA__server__PROTOCOL
value: "https://git.alexmickelson.guru/" value: "http"
- name: GITEA__server__DOMAIN
value: "git.alexmickelson.guru"
- name: GITEA__server__PUBLIC_URL_DETECTION
value: "auto"
- name: GITEA__server__LOCAL_ROOT_URL
value: "http://gitea-web-svc.gitea.svc.cluster.local:3000/"
- name: GITEA__server__SSH_DOMAIN - name: GITEA__server__SSH_DOMAIN
value: "gitea-gitea-web-svc.beefalo-newton.ts.net" value: "gitea-gitea-web-svc.beefalo-newton.ts.net"
- name: GITEA__server__SSH_PORT - name: GITEA__server__SSH_PORT
value: "22" value: "22"
# security
- name: GITEA__service__ENABLE_BASIC_AUTHENTICATION
value: "false"
- name: GITEA__service__DISABLE_REGISTRATION - name: GITEA__service__DISABLE_REGISTRATION
value: "true" value: "true"
- name: GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION - name: GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION
value: "false" value: "false"
- name: GITEA__openid__ENABLE_OPENID_SIGNIN
value: "false"
- name: GITEA__openid__ENABLE_OPENID_SIGNUP - name: GITEA__openid__ENABLE_OPENID_SIGNUP
value: "false" value: "false"
- name: GITEA__ui__DEFAULT_THEME
value: "gitea-dark"
- name: GITEA__ui__THEMES
value: "gitea-dark"
volumeMounts: volumeMounts:
- name: gitea-data - name: gitea-data
mountPath: /data mountPath: /data
@@ -55,6 +70,18 @@ spec:
- name: localtime - name: localtime
mountPath: /etc/localtime mountPath: /etc/localtime
readOnly: true readOnly: true
- name: landing-page
mountPath: /data/gitea/templates/home.tmpl
subPath: home.tmpl
readOnly: true
- name: landing-page
mountPath: /data/gitea/public/assets/css/custom-landing.css
subPath: custom-landing.css
readOnly: true
- name: landing-page
mountPath: /data/gitea/public/assets/js/custom-landing.js
subPath: custom-landing.js
readOnly: true
volumes: volumes:
- name: gitea-data - name: gitea-data
hostPath: hostPath:
@@ -66,6 +93,9 @@ spec:
- name: localtime - name: localtime
hostPath: hostPath:
path: /etc/localtime path: /etc/localtime
- name: landing-page
configMap:
name: gitea-landing-page
--- ---
apiVersion: v1 apiVersion: v1

View File

@@ -0,0 +1,97 @@
apiVersion: v1
kind: Namespace
metadata:
name: homeassistant
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homeassistant
namespace: homeassistant
spec:
replicas: 1
selector:
matchLabels:
app: homeassistant
template:
metadata:
labels:
app: homeassistant
spec:
hostNetwork: true
containers:
- name: homeassistant
image: homeassistant/home-assistant:stable
imagePullPolicy: Always
env:
- name: TZ
value: "America/Denver"
- name: OPENAI_BASE_URL
value: "http://openwebui.beefalo-newton.ts.net/v1"
volumeMounts:
- name: config
mountPath: /config
- name: localtime
mountPath: /etc/localtime
readOnly: true
- name: zigbee-dongle
mountPath: /dev/serial/by-id/usb-Itead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_V2_0cad0783fc73ef11b46be21e313510fd-if00-port0
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
- NET_RAW
volumes:
- name: config
hostPath:
path: /data/homeAssistant/config
type: Directory
- name: localtime
hostPath:
path: /etc/localtime
type: File
- name: zigbee-dongle
hostPath:
path: /dev/serial/by-id/usb-Itead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_V2_0cad0783fc73ef11b46be21e313510fd-if00-port0
type: CharDevice
---
apiVersion: v1
kind: Service
metadata:
name: home-assistant
namespace: homeassistant
spec:
selector:
app: homeassistant
ports:
- name: http
protocol: TCP
port: 8123
targetPort: 8123
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: home-assistant-ingress
namespace: homeassistant
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- ha.alexmickelson.guru
secretName: ha-tls-cert
rules:
- host: ha.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: home-assistant
port:
number: 8123

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: music-assistant-server
namespace: homeassistant
spec:
replicas: 1
selector:
matchLabels:
app: music-assistant-server
template:
metadata:
labels:
app: music-assistant-server
spec:
hostNetwork: true
containers:
- name: music-assistant-server
image: ghcr.io/music-assistant/server:2
imagePullPolicy: Always
env:
- name: LOG_LEVEL
value: "info"
- name: TZ
value: "America/Denver"
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
hostPath:
path: /data/music-assistant-server/data
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: music-assistant
namespace: homeassistant
# annotations:
# tailscale.com/expose: "true"
spec:
selector:
app: music-assistant-server
ports:
- name: http
protocol: TCP
port: 8095
targetPort: 8095
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: music-assistant-ingress
namespace: homeassistant
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- sound.alexmickelson.guru
secretName: music-assistant-tls-cert
rules:
- host: sound.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: music-assistant
port:
number: 8095
---

View File

@@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zwave-js-ui
namespace: homeassistant
spec:
replicas: 1
selector:
matchLabels:
app: zwave-js-ui
template:
metadata:
labels:
app: zwave-js-ui
spec:
hostNetwork: true
containers:
- name: zwave-js-ui
image: zwavejs/zwave-js-ui:latest
imagePullPolicy: Always
tty: true
env:
- name: SESSION_SECRET
value: "iqpwoeinf9384bw3p48gbwer"
- name: TZ
value: "America/Denver"
volumeMounts:
- name: zwave-data
mountPath: /usr/src/app/store
- name: zwave-device-if00
mountPath: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0
- name: zwave-device-if01
mountPath: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0
securityContext:
privileged: true
ports:
- containerPort: 8091
name: http
- containerPort: 3051
name: websocket
volumes:
- name: zwave-data
hostPath:
path: /data/zwave
type: DirectoryOrCreate
- name: zwave-device-if00
hostPath:
path: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if00-port0
type: CharDevice
- name: zwave-device-if01
hostPath:
path: /dev/serial/by-id/usb-Silicon_Labs_HubZ_Smart_Home_Controller_31500417-if01-port0
type: CharDevice
---
apiVersion: v1
kind: Service
metadata:
name: zwave-js-ui
namespace: homeassistant
annotations:
tailscale.com/expose: "true"
spec:
selector:
app: zwave-js-ui
ports:
- name: http
protocol: TCP
port: 8091
targetPort: 8091
- name: websocket
protocol: TCP
port: 3051
targetPort: 3051
---

View File

@@ -0,0 +1,79 @@
apiVersion: v1
kind: Namespace
metadata:
name: homepage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: homepage
spec:
replicas: 1
selector:
matchLabels:
app: homepage
template:
metadata:
labels:
app: homepage
spec:
restartPolicy: Always
containers:
- name: homepage
image: ghcr.io/gethomepage/homepage:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
env:
- name: HOMEPAGE_ALLOWED_HOSTS
value: "server.alexmickelson.guru:3001,home.alexmickelson.guru"
volumeMounts:
- name: host-configs
mountPath: /app/config/
volumes:
- name: host-configs
hostPath:
path: /data/homepage
type: DirectoryOrCreate
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: homepage
spec:
selector:
app: homepage
ports:
- name: http
protocol: TCP
port: 3000
targetPort: 3000
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: homepage
namespace: homepage
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- home.alexmickelson.guru
secretName: homepage-tls
rules:
- host: home.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: homepage
port:
number: 3000

View File

@@ -1,8 +1,13 @@
apiVersion: v1
kind: Namespace
metadata:
name: jellyfin
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: jellyfin name: jellyfin
namespace: projects namespace: jellyfin
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@@ -14,14 +19,16 @@ spec:
app: jellyfin app: jellyfin
spec: spec:
hostNetwork: true hostNetwork: true
securityContext:
fsGroup: 1000
supplementalGroups:
- 303 # render group for GPU access
containers: containers:
- name: jellyfin - name: jellyfin
image: jellyfin/jellyfin image: jellyfin/jellyfin
securityContext: securityContext:
runAsUser: 1000 runAsUser: 1000
runAsGroup: 1000 runAsGroup: 1000
supplementalGroups:
- 303 # render group for GPU access
volumeMounts: volumeMounts:
- name: dri-device - name: dri-device
mountPath: /dev/dri/renderD128 mountPath: /dev/dri/renderD128
@@ -35,6 +42,9 @@ spec:
mountPath: /movies mountPath: /movies
- name: tvshows-volume - name: tvshows-volume
mountPath: /tvshows mountPath: /tvshows
- name: home-videos-volume
mountPath: /home-videos
readOnly: true
ports: ports:
- containerPort: 8096 - containerPort: 8096
name: jellyfin name: jellyfin
@@ -50,15 +60,58 @@ spec:
path: /data/jellyfin/cache path: /data/jellyfin/cache
- name: music-volume - name: music-volume
hostPath: hostPath:
path: /data/jellyfin/music path: /data/media/music/tagged
- name: movies-volume - name: movies-volume
hostPath: hostPath:
path: /data/jellyfin/movies path: /data/media/movies
- name: tvshows-volume - name: tvshows-volume
hostPath: hostPath:
path: /data/jellyfin/tvshows path: /data/media/tvshows
- name: home-videos-volume
hostPath:
path: /data/nextcloud/html/data/alex/files/Documents/home-video
- name: dri-device - name: dri-device
hostPath: hostPath:
path: /dev/dri/renderD128 path: /dev/dri/renderD128
type: CharDevice type: CharDevice
restartPolicy: Always restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: jellyfin
spec:
selector:
app: jellyfin
ports:
- name: http
protocol: TCP
port: 8096
targetPort: 8096
type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin
namespace: jellyfin
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- jellyfin.alexmickelson.guru
secretName: jellyfin-tls
rules:
- host: jellyfin.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
number: 8096

View File

@@ -1,14 +0,0 @@
# apiVersion: networking.k8s.io/v1
# kind: Ingress
# metadata:
# name: jellyfin-ingress
# namespace: projects
# spec:
# rules:
# - host: jellyfin.alexmickelson.guru
# http:
# paths:
# - path: /
# backend:
# service: jellyfin
# port: 8096

View File

@@ -1,27 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: projects
spec:
selector:
app: jellyfin
ports:
- protocol: TCP
port: 8096
targetPort: 8096
nodePort: 30096
type: NodePort
# apiVersion: v1
# kind: Service
# metadata:
# name: jellyfin
# namespace: projects
# spec:
# selector:
# app: jellyfin
# ports:
# - protocol: TCP
# port: 8096
# targetPort: 8096
# type: ClusterIP

View File

@@ -0,0 +1,126 @@
apiVersion: v1
kind: Namespace
metadata:
name: minecraft
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: minecraft
namespace: minecraft
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: minecraft
template:
metadata:
labels:
app: minecraft
spec:
containers:
- name: tailscale
image: tailscale/tailscale:latest
env:
- name: TS_STATE_DIR
value: "/var/lib/tailscale"
- name: TS_KUBE_SECRET
value: ""
- name: TS_AUTHKEY
value: "tskey-auth-REPLACE_ME"
- name: TS_HOSTNAME
value: "minecraft"
volumeMounts:
- name: tailscale-data
mountPath: /var/lib/tailscale
- name: dev-tun
mountPath: /dev/net/tun
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_MODULE
- name: minecraft
image: itzg/minecraft-server:java21
stdin: true
tty: true
env:
- name: EULA
value: "true"
- name: MEMORY
value: "6G"
- name: CF_OVERRIDES_EXCLUSIONS
value: |
# Not applicable for server side
shaderpacks/**
volumeMounts:
- name: minecraft-data
mountPath: /data
- name: modpacks
mountPath: /modpacks
readOnly: true
resources:
requests:
memory: "6Gi"
limits:
memory: "8Gi"
- name: minecraft-cobblemon
image: itzg/minecraft-server:java21
stdin: true
tty: true
env:
- name: EULA
value: "true"
- name: TYPE
value: "AUTO_CURSEFORGE"
- name: CF_SLUG
value: "cobbleverse-cobblemon"
- name: CF_MODPACK_ZIP
value: "/modpacks/COBBLEVERSE-1.7.30-CF.zip"
- name: CF_API_KEY
value: "$CF_API_KEY"
- name: MEMORY
value: "4G"
- name: SERVER_PORT
value: "2222"
- name: RCON_PORT
value: "25576"
- name: CF_OVERRIDES_EXCLUSIONS
value: |
# Not applicable for server side
shaderpacks/**
resourcepacks/**
volumeMounts:
- name: cobblemon-data
mountPath: /data
- name: modpacks
mountPath: /modpacks
readOnly: true
resources:
requests:
memory: "4Gi"
limits:
memory: "6Gi"
volumes:
- name: minecraft-data
hostPath:
path: /data/minecraft/data
type: DirectoryOrCreate
- name: modpacks
hostPath:
path: /data/minecraft/modpacks
type: DirectoryOrCreate
- name: tailscale-data
hostPath:
path: /data/minecraft/tailscale
type: DirectoryOrCreate
- name: dev-tun
hostPath:
path: /dev/net/tun
type: CharDevice
- name: cobblemon-data
hostPath:
path: /data/minecraft/cobblemon-data
type: DirectoryOrCreate

View File

@@ -1 +0,0 @@
test/

View File

@@ -1,13 +0,0 @@
FROM alpine:latest
RUN apk add --no-cache nfs-utils bash
RUN mkdir -p /exports
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
EXPOSE 2049 20048
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]

View File

@@ -1,24 +0,0 @@
#!/bin/bash
set -e
ALLOWED_CLIENTS="${ALLOWED_CLIENTS:-*}"
echo "/exports $ALLOWED_CLIENTS(rw,sync,no_subtree_check,no_root_squash)" > /etc/exports
rpcbind || true
rpc.statd || true
echo "Starting NFS server..."
mount -t nfsd nfsd /proc/fs/nfsd
rpc.nfsd -N 3 -V 4 --grace-time 10 $nfsd_debug_opt &
rpc.mountd -N 2 -N 3 -V 4 --foreground $mountd_debug_opt &
wait
# rpc.mountd -N 2 -N 3 -V 4 --foreground
# wait

View File

@@ -1,19 +0,0 @@
<https://wiki.alpinelinux.org/wiki/Setting_up_an_NFS_server>
example docker run
```bash
docker run --rm -it \
--name nfs-server \
--cap-add SYS_ADMIN \
-e ALLOWED_CLIENTS="127.0.0.1.0/24" \
-v (pwd)/test:/exports \
--network host \
nfs-server
```
currently not working, i like the idea of running the nfs server in a docker container, but doing it as a nixos module is probably better

View File

@@ -1,53 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: audiobookshelf-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- audiobook.alexmickelson.guru
secretName: audiobookshelf-tls-cert
rules:
- host: audiobook.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: audiobookshelf
port:
number: 13378
---
apiVersion: v1
kind: Service
metadata:
name: audiobookshelf
namespace: projects
spec:
ports:
- port: 13378
targetPort: 13378
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: audiobookshelf
namespace: projects
labels:
kubernetes.io/service-name: audiobookshelf
addressType: IPv4
ports:
- name: http
port: 13378
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,53 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: copilot-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- copilot.alexmickelson.guru
secretName: copilot-tls-cert
rules:
- host: copilot.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: copilot
port:
number: 4444
---
apiVersion: v1
kind: Service
metadata:
name: copilot
namespace: projects
spec:
ports:
- port: 4444
targetPort: 4444
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: copilot
namespace: projects
labels:
kubernetes.io/service-name: copilot
addressType: IPv4
ports:
- name: http
port: 4444
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,53 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: home-assistant-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- ha.alexmickelson.guru
secretName: ha-tls-cert
rules:
- host: ha.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: home-assistant
port:
number: 8123
---
apiVersion: v1
kind: Service
metadata:
name: home-assistant
namespace: projects
spec:
ports:
- port: 8123
targetPort: 8123
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: home-assistant
namespace: projects
labels:
kubernetes.io/service-name: home-assistant
addressType: IPv4
ports:
- name: http
port: 8123
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,53 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: homepage-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- home.alexmickelson.guru
secretName: home-tls-cert
rules:
- host: home.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: homepage
port:
number: 3001
---
apiVersion: v1
kind: Service
metadata:
name: homepage
namespace: projects
spec:
ports:
- port: 3001
targetPort: 3001
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: homepage
namespace: projects
labels:
kubernetes.io/service-name: homepage
addressType: IPv4
ports:
- name: http
port: 3001
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,53 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin-proxy-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- jellyfin.alexmickelson.guru
secretName: jellyfin-tls-cert
rules:
- host: jellyfin.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
number: 8096
---
apiVersion: v1
kind: Service
metadata:
name: jellyfin
namespace: projects
spec:
ports:
- port: 8096
targetPort: 8096
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: jellyfin
namespace: projects
labels:
kubernetes.io/service-name: jellyfin
addressType: IPv4
ports:
- name: http
port: 8096
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -1,53 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: musicassistant-ingress
namespace: projects
annotations:
cert-manager.io/cluster-issuer: cloudflare-issuer
spec:
ingressClassName: nginx
tls:
- hosts:
- sound.alexmickelson.guru
secretName: sound-tls-cert
rules:
- host: sound.alexmickelson.guru
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: musicassistant
port:
number: 8095
---
apiVersion: v1
kind: Service
metadata:
name: musicassistant
namespace: projects
spec:
ports:
- port: 8095
targetPort: 8095
protocol: TCP
---
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
name: musicassistant
namespace: projects
labels:
kubernetes.io/service-name: musicassistant
addressType: IPv4
ports:
- name: http
port: 8095
protocol: TCP
endpoints:
- addresses:
- 100.122.128.107
conditions:
ready: true

View File

@@ -103,21 +103,20 @@
iperf iperf
mangohud mangohud
mlocate mlocate
kdePackages.kdeconnect-kde
wineWowPackages.stable # wineWowPackages.stable
wine # wine
(wine.override { wineBuild = "wine64"; }) # (wine.override { wineBuild = "wine64"; })
wine64 # wine64
wineWowPackages.staging # wineWowPackages.staging
winetricks # winetricks
wineWowPackages.waylandFull # wineWowPackages.waylandFull
mesa-gl-headers # mesa-gl-headers
mesa # mesa
driversi686Linux.mesa # driversi686Linux.mesa
mesa-demos # mesa-demos
android-tools
]; ];
services.tailscale.enable = true; services.tailscale.enable = true;
services.openssh.enable = true; services.openssh.enable = true;
@@ -160,6 +159,25 @@
}; };
}; };
# fingerprint
# services.fprintd = {
# enable = true;
# package = pkgs.fprintd.override {
# libfprint = pkgs.libfprint;
# };
# };
# services.gnome.gnome-keyring.enable = true;
# security.polkit.enable = true;
# security.pam.services.gdm.fprintAuth = true;
# security.pam.services.gdm.enableGnomeKeyring = true;
# security.pam.services.sudo.fprintAuth = true;
# services.udev.extraRules = ''
# ACTION=="add", SUBSYSTEM=="usb", ATTR{idVendor}=="04f3", ATTR{idProduct}=="0c3d", TEST=="power/control", ATTR{power/control}="on"
# '';
# This value determines the NixOS release from which the default # This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions # settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave # on your system were taken. Its perfectly fine and recommended to leave

View File

@@ -31,6 +31,15 @@
"gpt-oss-20b" = { }; "gpt-oss-20b" = { };
}; };
}; };
office = {
npm = "@ai-sdk/openai-compatible";
options = {
baseURL = "http://ai-office-server:8081/v1";
};
models = {
"gpt-oss-20b" = { };
};
};
}; };
mcp = { mcp = {
playwright = { playwright = {

View File

@@ -1,5 +1,12 @@
{ pkgs, ... }: { pkgs, ... }:
{ {
imports = [ ./fish.home.nix ];
customFish = {
dotnetPackage = pkgs.dotnetCorePackages.sdk_8_0;
bitwardenSshAgent = true;
};
home.packages = with pkgs; [ home.packages = with pkgs; [
vscode-fhs vscode-fhs
gnome-software gnome-software
@@ -43,37 +50,7 @@
package = pkgs.gnome-themes-extra; package = pkgs.gnome-themes-extra;
}; };
}; };
programs.fish = {
enable = true;
shellInit = ''
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_ROOT=${pkgs.dotnetCorePackages.sdk_8_0}
set -x LIBVIRT_DEFAULT_URI qemu:///system
set -x TERM xterm-256color # ghostty
export SSH_AUTH_SOCK=/home/alex/.bitwarden-ssh-agent.sock # ssh agent
'';
};
home.file = { home.file = {
".config/lazydocker/config.yml".text = '' ".config/lazydocker/config.yml".text = ''
gui: gui:

View File

@@ -1,5 +1,7 @@
{ pkgs, ... }: { pkgs, ... }:
{ {
imports = [ ./fish.home.nix ];
home.packages = with pkgs; [ home.packages = with pkgs; [
vscode-fhs vscode-fhs
gnome-software gnome-software
@@ -38,31 +40,7 @@
package = pkgs.gnome-themes-extra; package = pkgs.gnome-themes-extra;
}; };
}; };
programs.fish = {
enable = true;
shellInit = ''
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
set -x LIBVIRT_DEFAULT_URI qemu:///system
'';
};
home.file = { home.file = {
".config/lazydocker/config.yml".text = '' ".config/lazydocker/config.yml".text = ''
gui: gui:

View File

@@ -1,5 +1,10 @@
{ pkgs, ... }: { pkgs, ... }:
{ {
imports = [ ./fish.home.nix ];
customFish = {
bluetuiAliases = true;
};
home.packages = with pkgs; [ home.packages = with pkgs; [
k9s k9s
jwt-cli jwt-cli
@@ -36,36 +41,7 @@
home.sessionVariables = { home.sessionVariables = {
EDITOR = "vim"; EDITOR = "vim";
}; };
programs.fish = {
enable = true;
shellInit = ''
alias blue="bluetui"
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
set -x LIBVIRT_DEFAULT_URI qemu:///system
alias blue="bluetui"
alias jelly="jellyfin-tui"
'';
};
home.file = { home.file = {
".config/lazydocker/config.yml".text = '' ".config/lazydocker/config.yml".text = ''
gui: gui:

View File

@@ -4,16 +4,11 @@
vscode-fhs vscode-fhs
gnome-software gnome-software
gnome-tweaks gnome-tweaks
# nvtopPackages.nvidia
nerd-fonts.fira-code nerd-fonts.fira-code
nerd-fonts.droid-sans-mono nerd-fonts.droid-sans-mono
# fira-code
# (nerdfonts.override { fonts = [ "FiraCode" "DroidSansMono" ]; })
kubernetes-helm kubernetes-helm
busybox busybox
ghostty ghostty
elixir_1_18
inotify-tools # needed for elixir hot-reloading
nodejs_24 nodejs_24
pnpm pnpm
legcord legcord
@@ -22,7 +17,14 @@
bitwarden-desktop bitwarden-desktop
jellyfin-tui jellyfin-tui
bluetui bluetui
nexusmods-app-unfree # bitwarden-desktop
lazydocker
elixir
elixir-ls
inotify-tools
watchman
]; ];
programs.ghostty = { programs.ghostty = {

View File

@@ -0,0 +1,75 @@
{ pkgs, lib, config, ... }:
let
cfg = config.customFish;
in {
options.customFish = {
# Opt-in: only enable if the relevant tools are installed on this machine
bluetuiAliases = lib.mkEnableOption "bluetui/jellyfin-tui shell aliases";
dotnetPackage = lib.mkOption {
type = lib.types.nullOr lib.types.package;
default = null;
description = "Enable dotnet env vars and PATH entries. Set to the desired SDK package (e.g. pkgs.dotnetCorePackages.sdk_8_0).";
};
bitwardenSshAgent = lib.mkEnableOption "Bitwarden SSH agent (sets SSH_AUTH_SOCK)";
};
config = {
programs.fish = {
enable = true;
shellInit = lib.concatStringsSep "\n" (lib.filter (s: s != "") [
# https://gist.github.com/thomd/7667642
''
export LS_COLORS=':di=95'
function commit
git add --all
git commit -m "$argv"
for remote in (git remote)
git pull $remote
git push $remote
end
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin ~/bin ~/.dotnet ~/.dotnet/tools $fish_user_paths
set fish_pager_color_selected_background --background='00399c'
export VISUAL=vim
export EDITOR="$VISUAL"
set -x LIBVIRT_DEFAULT_URI qemu:///system
set -x TERM xterm-256color
if test -f "$HOME/.cargo/env.fish"
source "$HOME/.cargo/env.fish"
end
''
(lib.optionalString cfg.bluetuiAliases ''
alias blue="bluetui"
alias jelly="jellyfin-tui"
'')
(lib.optionalString (cfg.dotnetPackage != null) ''
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_ROOT=${cfg.dotnetPackage}
'')
(lib.optionalString cfg.bitwardenSshAgent ''
export SSH_AUTH_SOCK=$HOME/.bitwarden-ssh-agent.sock
'')
]);
};
};
}

View File

@@ -8,6 +8,14 @@ let
(fetchTarball "https://github.com/nix-community/nixGL/archive/main.tar.gz") (fetchTarball "https://github.com/nix-community/nixGL/archive/main.tar.gz")
{ }; { };
in { in {
imports = [ ./fish.home.nix ];
customFish = {
bluetuiAliases = true;
dotnetPackage = pkgs.dotnetCorePackages.sdk_8_0;
bitwardenSshAgent = true;
};
home.username = "alexm"; home.username = "alexm";
home.homeDirectory = "/home/alexm"; home.homeDirectory = "/home/alexm";
nixpkgs.config.allowUnfree = true; nixpkgs.config.allowUnfree = true;
@@ -16,16 +24,16 @@ in {
jwt-cli jwt-cli
fish fish
kubectl kubectl
(lazydocker.overrideAttrs (oldAttrs: rec { # (lazydocker.overrideAttrs (oldAttrs: rec {
version = "0.24.1"; # version = "0.24.4";
src = pkgs.fetchFromGitHub { # src = pkgs.fetchFromGitHub {
owner = "jesseduffield"; # owner = "jesseduffield";
repo = "lazydocker"; # repo = "lazydocker";
rev = "v${version}"; # rev = "v${version}";
hash = "sha256-cVjDdrxmGt+hj/WWP9B3BT739k9SSr4ryye5qWb3XNM="; # hash = "sha256-cW90/yblSLBkcR4ZdtcSI9MXFjOUxyEectjRn9vZwvg=";
}; # };
})) # }))
# lazydocker lazydocker
traceroute traceroute
(with dotnetCorePackages; combinePackages [ sdk_8_0 sdk_9_0 ]) (with dotnetCorePackages; combinePackages [ sdk_8_0 sdk_9_0 ])
nodejs_22 nodejs_22
@@ -41,7 +49,6 @@ in {
iperf iperf
#makemkv #makemkv
#elixir_1_18 #elixir_1_18
#inotify-tools
# gnome-themes-extra # gnome-themes-extra
uv uv
ghostty ghostty
@@ -66,7 +73,11 @@ in {
# vscode-fhs # vscode-fhs
# aider-chat-full # aider-chat-full
codex # codex
elixir
elixir-ls
inotify-tools
watchman
]; ];
fonts.fontconfig.enable = true; fonts.fontconfig.enable = true;
programs.firefox = { programs.firefox = {
@@ -87,47 +98,7 @@ in {
window-width = "120"; window-width = "120";
}; };
}; };
programs.fish = {
enable = true;
shellInit = ''
# https://gist.github.com/thomd/7667642
export LS_COLORS=':di=95'
function commit
git add --all
git commit -m "$argv"
git pull
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
alias blue="bluetui"
alias jelly="jellyfin-tui"
set -U fish_user_paths ~/.local/bin $fish_user_paths
set -U fish_user_paths ~/bin $fish_user_paths
set -U fish_user_paths ~/.dotnet $fish_user_paths
set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
set fish_pager_color_selected_background --background='00399c'
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
export DOTNET_ROOT=${pkgs.dotnetCorePackages.sdk_8_0}
set -x LIBVIRT_DEFAULT_URI qemu:///system
set -x TERM xterm-256color # ghostty
source "$HOME/.cargo/env.fish"
export SSH_AUTH_SOCK=/home/alexm/.bitwarden-ssh-agent.sock # ssh agent
'';
};
home.file = { home.file = {
".config/lazydocker/config.yml".text = '' ".config/lazydocker/config.yml".text = ''
gui: gui:

View File

@@ -7,6 +7,7 @@
<home-manager/nixos> <home-manager/nixos>
./modules/k3s.nix ./modules/k3s.nix
./modules/pci-passthrough.nix ./modules/pci-passthrough.nix
./modules/gitea-runner.nix
]; ];
security.pam.loginLimits = [ security.pam.loginLimits = [
{ {
@@ -95,6 +96,7 @@
nixpkgs.config.allowUnfree = true; nixpkgs.config.allowUnfree = true;
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
bash
vim vim
wget wget
curl curl
@@ -126,6 +128,11 @@
]; ];
services.envfs.enable = true; services.envfs.enable = true;
security.sudo = {
enable = true;
wheelNeedsPassword = true;
};
# printing # printing
services.printing = { services.printing = {
enable = true; enable = true;
@@ -287,77 +294,6 @@
}; };
}; };
services.gitea-actions-runner = {
instances.infrastructure = {
enable = true;
name = "infrastructure-runner";
url = "https://git.alexmickelson.guru";
tokenFile = "/data/runner/gitea-infrastructure-token.txt";
labels = [
"home-server:host"
];
hostPackages = with pkgs; [
docker
git
git-secret
zfs
sanoid
mbuffer
lzop
kubectl
kubernetes-helm
];
settings = {
container = {
enabled = false;
};
};
};
};
systemd.services.gitea-runner-infrastructure.serviceConfig = {
ReadWritePaths = [
"/data/cloudflare/"
"/data/runner/infrastructure"
"/data/runner"
"/home/github/infrastructure"
];
PrivateDevices = false;
DeviceAllow = [ "/dev/zfs rw" ];
ProtectProc = "default";
ProtectSystem = false;
PrivateMounts = false;
PrivateUsers = false;
ProtectHome = false;
Restart = lib.mkForce "always";
};
users.users.gitea-runner = {
isNormalUser = true;
description = "Gitea Actions Runner";
home = "/home/gitea-runner";
createHome = true;
extraGroups = [ "docker" ];
packages = with pkgs; [
kubernetes-helm
];
shell = pkgs.bashInteractive;
};
# users.users.github = {
# isNormalUser = true;
# description = "github";
# extraGroups = [ "docker" ];
# shell = pkgs.fish;
# packages = with pkgs; [
# kubernetes-helm
# ];
# };
networking.firewall.enable = false; networking.firewall.enable = false;
# This value determines the NixOS release from which the default # This value determines the NixOS release from which the default

View File

@@ -0,0 +1,132 @@
{ pkgs, lib, ... }:
{
services.gitea-actions-runner = {
instances.infrastructure = {
enable = true;
name = "infrastructure-runner";
url = "https://git.alexmickelson.guru";
tokenFile = "/data/runner/gitea-infrastructure-token.txt";
labels = [
"self-hosted"
"home-server"
"self-hosted:host"
"home-server:host"
"native:host"
];
hostPackages = with pkgs; [
bashNonInteractive
bash
coreutils
docker
git
git-secret
zfs
sanoid
mbuffer
lzop
kubectl
kubernetes-helm
curl
nodejs_24
openssl
gettext
];
settings = {
container = {
enabled = false;
};
runner = {
capacity = 5;
};
};
};
};
users.users.gitea-runner = {
isNormalUser = true;
description = "Gitea Actions Runner";
home = "/home/gitea-runner";
createHome = true;
group = "gitea-runner";
extraGroups = [ "docker" ];
packages = with pkgs; [
kubernetes-helm
nodejs_24
openssl
gettext
];
shell = pkgs.bash;
};
users.groups.gitea-runner = { };
security.sudo.extraRules = [
{
users = [ "gitea-runner" ];
commands = [
{
command = "/run/current-system/sw/bin/nix-collect-garbage";
options = [ "NOPASSWD" "SETENV" ];
}
];
}
];
system.activationScripts.zfs-delegate-gitea-runner = {
text =
let
poolNames = [ "data-ssd" "backup" ];
permissions = "compression,create,destroy,mount,mountpoint,receive,rollback,send,snapshot,hold";
in
''
${lib.concatMapStringsSep "\n" (pool:
"${pkgs.zfs}/bin/zfs allow -u gitea-runner ${permissions} ${pool} || true"
) poolNames}
'';
deps = [ ];
};
systemd.services.gitea-runner-infrastructure.serviceConfig = {
WorkingDirectory = lib.mkForce "/var/lib/gitea-runner/infrastructure";
User = lib.mkForce "gitea-runner";
Group = lib.mkForce "gitea-runner";
Environment = lib.mkForce [
"PATH=/run/wrappers/bin:/etc/profiles/per-user/gitea-runner/bin:/run/current-system/sw/bin"
"NIX_PATH=nixpkgs=${pkgs.path}"
];
DynamicUser = lib.mkForce false;
PrivateDevices = lib.mkForce false;
PrivateMounts = lib.mkForce false;
PrivateTmp = lib.mkForce false;
PrivateUsers = lib.mkForce false;
ProtectClock = lib.mkForce false;
ProtectControlGroups = lib.mkForce false;
ProtectHome = lib.mkForce false;
ProtectHostname = lib.mkForce false;
ProtectKernelLogs = lib.mkForce false;
ProtectKernelModules = lib.mkForce false;
ProtectKernelTunables = lib.mkForce false;
ProtectProc = lib.mkForce "default";
ProtectSystem = lib.mkForce false;
NoNewPrivileges = lib.mkForce false;
RestrictNamespaces = lib.mkForce false;
RestrictRealtime = lib.mkForce false;
RestrictSUIDSGID = lib.mkForce false;
RemoveIPC = lib.mkForce false;
LockPersonality = lib.mkForce false;
SystemCallFilter = lib.mkForce [ ];
RestrictAddressFamilies = lib.mkForce [ ];
ReadWritePaths = lib.mkForce [ ];
BindReadOnlyPaths = lib.mkForce [ ];
DeviceAllow = lib.mkForce [ "/dev/zfs rw" ];
DevicePolicy = lib.mkForce "auto";
Restart = lib.mkForce "always";
};
systemd.services.gitea-runner-infrastructure.path = [ pkgs.sudo ];
}

View File

@@ -1,4 +1,3 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
@@ -13,6 +12,21 @@
# "uinput" # "uinput"
# ]; # ];
boot.kernelPackages = pkgs.linuxPackages_6_6;
# boot.kernelPackages = pkgs.linuxPackages_6_1;
services.xserver.enable = true;
services.xserver.displayManager.gdm = {
enable = true;
wayland = false;
};
services.xserver.desktopManager.gnome.enable = true;
#boot.kernelParams = [
# "amdgpu.discovery=1"
#];
hardware.enableRedistributableFirmware = true;
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant. # networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
networking.networkmanager.enable = true; networking.networkmanager.enable = true;
@@ -32,9 +46,6 @@
LC_TIME = "en_US.UTF-8"; LC_TIME = "en_US.UTF-8";
}; };
services.xserver.enable = true;
services.displayManager.gdm.enable = true;
services.desktopManager.gnome.enable = true;
services.xserver.xkb = { services.xserver.xkb = {
layout = "us"; layout = "us";
variant = ""; variant = "";
@@ -82,6 +93,9 @@
libcec libcec
flirc flirc
kdePackages.kdeconnect-kde
]; ];
services.openssh.enable = true; services.openssh.enable = true;
services.tailscale.enable = true; services.tailscale.enable = true;