Initial commit

This commit is contained in:
2024-12-30 11:42:12 -07:00
commit 09ba4114c1
86 changed files with 7522 additions and 0 deletions

46
.github/workflows/backup-zfs.yml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: ZFS Backup
on:
schedule:
- cron: 0 1 * * *
workflow_dispatch:
jobs:
update-infrastructure:
runs-on: [self-hosted, home-server]
steps:
- name: run syncoid
run: |
zpool status
echo ""
zfs list
echo ""
syncoid \
--recursive \
--no-privilege-elevation \
data-ssd/data \
backup/data
syncoid \
--recursive \
--no-privilege-elevation \
data-ssd/media \
backup/media
# steps:
# - name: run syncoid
# run: |
# zpool status
# echo ""
# zfs list
# echo ""
# syncoid \
# --recursive \
# --no-privilege-elevation \
# --no-rollback \
# data-ssd/data \
# backup/data
# syncoid \
# --recursive \
# --no-privilege-elevation \
# --no-rollback \
# data-ssd/media \
# backup/media

15
.github/workflows/deploy-bot.yml vendored Normal file
View File

@@ -0,0 +1,15 @@
name: Deploy Discord Bot
on:
workflow_dispatch:
jobs:
run-python:
runs-on: [self-hosted, home-server]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: deploy bot
env:
DISCORD_SECRET: ${{ secrets.DISCORD_SECRET }}
run: |
cd discord-bot
./run.sh

View File

@@ -0,0 +1,51 @@
name: Update home server containers
on: [push, workflow_dispatch]
jobs:
update-repo:
runs-on: [home-server]
steps:
- name: checkout repo
working-directory: /home/github/infrastructure
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
fi
update-infrastructure:
runs-on: [home-server]
needs: update-repo
steps:
- name: update home server containers
env:
MY_GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }}
HOMEASSISTANT_TOKEN: ${{ secrets.HOMEASSISTANT_TOKEN }}
GRAFANA_PASSWORD: ${{ secrets.GRAFANA_PASSWORD }}
CLOUDFLARE_CONFIG: ${{ secrets.CLOUDFLARE_CONFIG }}
working-directory: /home/github/infrastructure/infrastructure
run: |
# git secret reveal -f
pwd
# echo "$CLOUDFLARE_CONFIG" > /data/cloudflare/cloudflare.ini
cd home-server
docker pull nextcloud:production
docker compose pull
docker compose build
docker compose up -d
# docker restart reverse-proxy
# docker exec reverse-proxy nginx -t
# docker exec reverse-proxy nginx -s reload
update-pihole:
runs-on: [home-server]
needs: update-repo
steps:
- working-directory: /home/github/infrastructure/infrastructure
run: |
cd dns
docker compose pull
docker compose up -d

37
.github/workflows/update-playlist.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: Manage Jellyfin Playlists
on:
workflow_dispatch:
schedule:
- cron: '0 * * * *'
jobs:
run-python:
runs-on: [self-hosted, home-server]
steps:
- name: checkout repo
working-directory: /home/github/infrastructure
run: |
if [ -d "infrastructure" ]; then
cd infrastructure
echo "Infrastructure folder exists. Resetting to the most recent commit."
git reset --hard HEAD
git pull https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }} $(git rev-parse --abbrev-ref HEAD)
else
git clone https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git
fi
- name: Run Python script
env:
JELLYFIN_USER: ${{ secrets.JELLYFIN_USER }}
JELLYFIN_PASSWORD: ${{ secrets.JELLYFIN_PASSWORD }}
working-directory: /home/github/infrastructure/infrastructure
run: |
docker build -t jellyfin_management -f jellyfin/Dockerfile .
docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \
-m jellyfin.update_all_songs_playlist
docker run --rm \
-e JELLYFIN_USER=$JELLYFIN_USER \
-e JELLYFIN_PASSWORD=$JELLYFIN_PASSWORD \
jellyfin_management \
-m jellyfin.update_unindexed

10
.gitignore vendored Normal file
View File

@@ -0,0 +1,10 @@
.gitsecret/keys/random_seed
!*.secret
home-pi/dns/cloudflare.env
linode/wireguard/wg-easy.env
home-pi/plex.env
*.env
**/*.env
__pycache__/
.mypy_cache/
.venv/

4
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,4 @@
{
"python.linting.mypyEnabled": true,
"python.linting.enabled": true
}

5
README.md Normal file
View File

@@ -0,0 +1,5 @@
![home server update](https://github.com/alexmickelson/infrastructure/actions/workflows/update-home-server.yml/badge.svg)
[![ZFS Backup](https://github.com/alexmickelson/infrastructure/actions/workflows/backup-zfs.yml/badge.svg)](https://github.com/alexmickelson/infrastructure/actions/workflows/backup-zfs.yml)
[![Manage Jellyfin Playlists](https://github.com/alexmickelson/infrastructure/actions/workflows/update-playlist.yml/badge.svg)](https://github.com/alexmickelson/infrastructure/actions/workflows/update-playlist.yml)

View File

@@ -0,0 +1,7 @@
.vscode/
virtualenv/
songs/
.mypy_cache/
Dockerfile
node_modules/
venv/

2
discord-bot/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
songs/
venv/

7
discord-bot/.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,7 @@
{
"python.linting.mypyEnabled": false,
"python.linting.enabled": true,
"python.linting.flake8Enabled": false,
"python.linting.pylintEnabled": false,
"python.linting.banditEnabled": true
}

24
discord-bot/Dockerfile Normal file
View File

@@ -0,0 +1,24 @@
FROM node:20 as build-stage
WORKDIR /app
COPY client/package.json client/package-lock.json ./
RUN npm install
COPY client/ ./
RUN npm run build
FROM python:3.10
RUN apt-get update && apt-get install -y ffmpeg
COPY requirements.txt requirements.txt
RUN pip install -r requirements.txt
COPY src src
COPY main.py main.py
RUN mkdir songs
RUN mkdir client
COPY --from=build-stage /app/dist /client
ENTRYPOINT [ "fastapi", "run", "main.py", "--port", "5677" ]

View File

@@ -0,0 +1,18 @@
module.exports = {
root: true,
env: { browser: true, es2020: true },
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
'plugin:react-hooks/recommended',
],
ignorePatterns: ['dist', '.eslintrc.cjs'],
parser: '@typescript-eslint/parser',
plugins: ['react-refresh'],
rules: {
'react-refresh/only-export-components': [
'warn',
{ allowConstantExport: true },
],
},
}

24
discord-bot/client/.gitignore vendored Normal file
View File

@@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

View File

@@ -0,0 +1,30 @@
# React + TypeScript + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
- Configure the top-level `parserOptions` property like this:
```js
export default {
// other rules...
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
project: ['./tsconfig.json', './tsconfig.node.json'],
tsconfigRootDir: __dirname,
},
}
```
- Replace `plugin:@typescript-eslint/recommended` to `plugin:@typescript-eslint/recommended-type-checked` or `plugin:@typescript-eslint/strict-type-checked`
- Optionally add `plugin:@typescript-eslint/stylistic-type-checked`
- Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and add `plugin:react/recommended` & `plugin:react/jsx-runtime` to the `extends` list

View File

@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite + React + TS</title>
</head>
<body data-bs-theme="dark">
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

3673
discord-bot/client/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,32 @@
{
"name": "client",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "tsc -b && vite build",
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
"preview": "vite preview"
},
"dependencies": {
"bootstrap": "^5.3.3",
"bootstrap-icons": "^1.11.3",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"sass": "^1.77.6"
},
"devDependencies": {
"@types/bootstrap": "^5.2.10",
"@types/react": "^18.3.3",
"@types/react-dom": "^18.3.0",
"@typescript-eslint/eslint-plugin": "^7.13.1",
"@typescript-eslint/parser": "^7.13.1",
"@vitejs/plugin-react": "^4.3.1",
"eslint": "^8.57.0",
"eslint-plugin-react-hooks": "^4.6.2",
"eslint-plugin-react-refresh": "^0.4.7",
"typescript": "^5.2.2",
"vite": "^5.3.1"
}
}

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -0,0 +1,14 @@
import { CurrentSong } from "./components/CurrentSong";
import { PlaybackInfo } from "./components/PlaybackInfo";
import { SongQueue } from "./components/SongQueue";
export const App = () => {
return (
<div className="container mt-5">
<h1 className="text-center">Discord Music</h1>
<CurrentSong />
<SongQueue />
<PlaybackInfo />
</div>
);
};

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

@@ -0,0 +1,26 @@
import { useWebSocket } from "../contexts/useWebSocket";
import { Slider } from "./Slider";
export const CurrentSong = () => {
const { ws, playbackInfo, sendMessage } = useWebSocket();
return (
<>
{playbackInfo && (
<div className="rounded border p-3 my-5 bg-body-tertiary bg-opacity-50">
<h2>Playing Song</h2>
<h5>{playbackInfo.file_name}</h5>
{ws && (
<Slider
min={0}
max={playbackInfo.duration}
current={playbackInfo.current_position}
onChange={(v) => {
sendMessage({ action: "set_playback", position: v });
}}
/>
)}
</div>
)}
</>
);
};

View File

@@ -0,0 +1,22 @@
import React from "react";
import { useInfoTask } from "./useInfoTask";
import { useWebSocket } from "../contexts/useWebSocket";
export const PlaybackInfo: React.FC = () => {
const { ws, error, message, botStatus } = useWebSocket();
useInfoTask(ws);
return (
<div className="row justify-content-end my-3">
<div className="col-auto">
<div className="border rounded-3 p-3 bg-secondary-subtle">
<h5 className="text-center">Status Messages</h5>
{botStatus && <div>status: {botStatus}</div>}
{error && <div>error: {error}</div>}
{message && <div>message: {message}</div>}
</div>
</div>
</div>
);
};

View File

@@ -0,0 +1,38 @@
@import "../../node_modules/bootstrap/scss/bootstrap.scss";
:root {
--slider-color: var(--bs-primary);
--slider-background-color: var(--bs-primary-bg-subtle);
}
.slider {
height: 15px;
border-radius: 5px;
background: var(--slider-background-color);
outline: none;
transition: opacity 0.2s;
opacity: .5;
&:hover {
opacity: 1;
}
&::-webkit-slider-thumb {
-webkit-appearance: none;
appearance: none;
width: 25px;
height: 25px;
border-radius: 50%;
background: var(--slider-color);
cursor: pointer;
}
&::-moz-range-thumb {
width: 25px;
height: 25px;
border-radius: 50%;
background: var(--slider-color);
cursor: pointer;
}
}

View File

@@ -0,0 +1,44 @@
import { ChangeEvent, FC, useEffect, useState } from "react";
import "./Slider.scss";
interface SliderProps {
min: number;
max: number;
current: number;
onChange: (value: number) => void;
}
export const Slider: FC<SliderProps> = ({ min, max, current, onChange }) => {
const [localValue, setLocalValue] = useState<number>(current);
const [isDragging, setIsDragging] = useState<boolean>(false);
const handleChange = (e: ChangeEvent<HTMLInputElement>) => {
setLocalValue(Number(e.target.value));
};
const handleMouseDown = () => {
setIsDragging(true);
};
const handleMouseUp = () => {
setIsDragging(false);
onChange(localValue);
};
useEffect(() => {
if (!isDragging) setLocalValue(current);
}, [current, isDragging]);
return (
<div className="w-100">
<input
type="range"
min={min}
max={max}
value={localValue}
onChange={handleChange}
onMouseDown={handleMouseDown}
onMouseUp={handleMouseUp}
className="slider w-100"
/>
</div>
);
};

View File

@@ -0,0 +1,8 @@
@import "../../node_modules/bootstrap/scss/bootstrap.scss";
.songListItem {
height: 3em;
&:hover {
@extend .bg-dark-subtle;
}
}

View File

@@ -0,0 +1,63 @@
import { useWebSocket } from "../contexts/useWebSocket";
import classes from "./SongQueue.module.scss";
export const SongQueue = () => {
const { songQueue, sendMessage } = useWebSocket();
return (
<div>
{songQueue && (
<div>
<ul className="list-group">
{songQueue.song_file_list.map((s, i) => {
const isCurrent = i === songQueue.position;
return (
<li
key={i}
className={` list-group-item m-0 p-0 ${
isCurrent && "bg-primary-subtle"
} ${classes.songListItem}`}
>
<div className="row h-100">
<div className="col-1 text-end my-auto">
{!isCurrent && (
<i
className="bi bi-play-circle text-primary fs-3 "
role="button"
onClick={() => {
sendMessage({
action: "set_position",
position: i,
});
}}
></i>
)}
{isCurrent && (
<i
className="bi bi-pause-circle text-primary fs-3 "
role="button"
onClick={() => {
// send pause message
// sendMessage({
// action: "set_position",
// position: i,
// });
}}
></i>
)}
</div>
<div className="col my-auto">
{s.filename
.substring(s.filename.lastIndexOf("/") + 1)
.replace(".mp3", "")}
</div>
</div>
</li>
);
})}
</ul>
</div>
)}
</div>
);
};

View File

@@ -0,0 +1,17 @@
import { useEffect } from "react";
const updateInterval = 100;
const getPlaybackInfo = (ws: WebSocket) => {
ws.send(JSON.stringify({ action: "get_playback_info" }));
};
export const useInfoTask = (websocket?: WebSocket) => {
useEffect(() => {
const interval = setInterval(() => {
if(websocket)
getPlaybackInfo(websocket);
}, updateInterval);
return () => clearInterval(interval);
}, [websocket]);
};

View File

@@ -0,0 +1,82 @@
import {
FC,
ReactNode,
useEffect,
useState,
} from "react";
import { BotResponse, PlaybackInfoData, SongQueue } from "../models";
import { WebSocketContext } from "./useWebSocket";
export const WebSocketProvider: FC<{ children: ReactNode }> = ({
children,
}) => {
const [ws, setWs] = useState<WebSocket | undefined>();
const [playbackInfo, setPlaybackInfo] = useState<
PlaybackInfoData | undefined
>();
const [songQueue, setSongQueue] = useState<SongQueue | undefined>();
const [error, setError] = useState<string>("");
const [message, setMessage] = useState("");
const [botStatus, setBotStatus] = useState<string | undefined>();
useEffect(() => {
const websocket = new WebSocket(`ws://server.alexmickelson.guru:5678/`);
// const websocket = new WebSocket(`ws://${window.location.hostname}:5678/`);
setWs(websocket);
websocket.onopen = () => {
console.log("websocket connected");
websocket.send(JSON.stringify({ action: "get_playback_info" }));
};
websocket.onmessage = (event) => {
const response: BotResponse = JSON.parse(event.data);
setBotStatus(response.status);
if (response.message_type === "ERROR") {
setError(response.error ?? "");
} else if (response.message_type === "MESSAGE") {
setMessage(response.message ?? "");
} else if (response.message_type === "PLAYBACK_INFORMATION") {
setPlaybackInfo(response.playback_information);
setSongQueue(response.song_queue);
}
};
websocket.onerror = (event: Event) => {
console.log(event);
setError("WebSocket error occurred.");
};
websocket.onclose = () => {
console.log("WebSocket connection closed");
};
return () => {
setWs(undefined);
websocket.close();
};
}, []);
const sendMessage = (message: unknown) => {
if (ws) {
ws.send(JSON.stringify(message));
}
};
return (
<WebSocketContext.Provider
value={{
ws,
error,
message,
botStatus,
playbackInfo,
songQueue,
sendMessage,
}}
>
{children}
</WebSocketContext.Provider>
);
};

View File

@@ -0,0 +1,24 @@
import { createContext, useContext } from "react";
import { PlaybackInfoData, SongQueue } from "../models";
interface WebSocketContextType {
ws: WebSocket | undefined;
error: string;
message: string;
botStatus: string | undefined;
playbackInfo: PlaybackInfoData | undefined;
songQueue: SongQueue | undefined;
sendMessage: (message: unknown) => void;
}
export const WebSocketContext = createContext<WebSocketContextType | undefined>(
undefined
);
export const useWebSocket = () => {
const context = useContext(WebSocketContext);
if (!context) {
throw new Error("useWebSocket must be used within a WebSocketProvider");
}
return context;
};

View File

@@ -0,0 +1,15 @@
import React from "react";
import ReactDOM from "react-dom/client";
import "bootstrap";
import "bootstrap/scss/bootstrap.scss";
import "bootstrap-icons/font/bootstrap-icons.css";
import { App } from "./App";
import { WebSocketProvider } from "./contexts/WebSocketContext";
ReactDOM.createRoot(document.getElementById("root")!).render(
<React.StrictMode>
<WebSocketProvider>
<App />
</WebSocketProvider>
</React.StrictMode>
);

View File

@@ -0,0 +1,27 @@
export enum BotStatus {
PLAYING = "Playing",
Idle = "Idle",
}
export interface PlaybackInfoData {
file_name: string;
current_position: number;
duration: number;
}
export interface SongQueue {
song_file_list: {
filename: string;
duration: number;
}[];
position: number;
}
export interface BotResponse {
message_type: "PLAYBACK_INFORMATION" | "ERROR" | "MESSAGE";
status: BotStatus;
error?: string;
message?: string;
playback_information?: PlaybackInfoData;
song_queue?: SongQueue;
}

1
discord-bot/client/src/vite-env.d.ts vendored Normal file
View File

@@ -0,0 +1 @@
/// <reference types="vite/client" />

View File

@@ -0,0 +1,27 @@
{
"compilerOptions": {
"composite": true,
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
"target": "ES2020",
"useDefineForClassFields": true,
"lib": ["ES2020", "DOM", "DOM.Iterable"],
"module": "ESNext",
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true
},
"include": ["src"]
}

View File

@@ -0,0 +1,11 @@
{
"files": [],
"references": [
{
"path": "./tsconfig.app.json"
},
{
"path": "./tsconfig.node.json"
}
]
}

View File

@@ -0,0 +1,13 @@
{
"compilerOptions": {
"composite": true,
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true,
"strict": true,
"noEmit": true
},
"include": ["vite.config.ts"]
}

View File

@@ -0,0 +1,7 @@
import { defineConfig } from 'vite'
import react from '@vitejs/plugin-react'
// https://vitejs.dev/config/
export default defineConfig({
plugins: [react()],
})

114
discord-bot/main.py Normal file
View File

@@ -0,0 +1,114 @@
from enum import Enum
import os
from pprint import pprint
from typing import Optional, Set
import discord
from discord.ext import commands
from threading import Thread
from dotenv import load_dotenv
from fastapi.concurrency import asynccontextmanager
from pydantic import BaseModel
import asyncio
import websockets
import json
import time
from src.models import BotResponse, BotStatus, MessageType, PlaybackInformation
from src.my_voice_client import get_voice_client, set_voice_client
from src.playback_service import (
get_filename_and_starttime,
get_status,
handle_message,
handle_new_song_on_queue,
pause_song,
play_current_song,
start_time_now,
)
from src.song_queue import add_to_queue, get_current_metadata, handle_song_end, has_current_song, move_to_last_song_in_queue
load_dotenv()
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
bot = commands.Bot(command_prefix="!", intents=discord.Intents.all())
connected_clients: Set[websockets.WebSocketServerProtocol] = set()
async def broadcast_bot_response(response: BotResponse):
if connected_clients:
await asyncio.wait(
[
asyncio.create_task(client.send(response.model_dump_json()))
for client in connected_clients
]
)
else:
raise TypeError("Passing coroutines is forbidden, use tasks explicitly.")
async def send_response_message(
websocket: websockets.WebSocketServerProtocol, response: BotResponse
):
await websocket.send(response.model_dump_json())
async def websocket_handler(websocket: websockets.WebSocketServerProtocol, path: str):
connected_clients.add(websocket)
try:
async for message in websocket:
data = json.loads(message)
response = handle_message(data)
await send_response_message(websocket, response)
except websockets.ConnectionClosedError as e:
print(f"Connection closed with error: {e}")
except Exception as e:
print(f"Unexpected error: {e}")
raise e
finally:
connected_clients.remove(websocket)
print("WebSocket connection closed")
@bot.event
async def on_ready():
print("Bot is ready")
@bot.command(name="play", pass_context=True)
async def play(ctx: commands.Context, url: str):
print("playing", url)
channel = ctx.message.author.voice.channel
if ctx.voice_client is None:
set_voice_client(await channel.connect())
add_to_queue(url)
handle_new_song_on_queue()
@bot.command(pass_context=True)
async def stop(ctx: commands.Context):
voice_client = get_voice_client()
if voice_client and voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect()
await ctx.send("Stopped playing")
@bot.command(pass_context=True)
async def pause(ctx: commands.Context):
pause_song()
def run_websocket():
print("started websocket")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
start_server = websockets.serve(websocket_handler, "0.0.0.0", 5678)
loop.run_until_complete(start_server)
loop.run_forever()
@asynccontextmanager
async def lifespan(app: FastAPI):
Thread(target=run_websocket).start()
Thread(target=lambda: bot.run(os.getenv("DISCORD_SECRET"))).start()
yield
app = FastAPI(lifespan=lifespan)
app.mount("/", StaticFiles(directory="./client", html=True), name="static")

View File

@@ -0,0 +1,9 @@
discord
yt_dlp
PyNaCl
python-dotenv
websockets
ffmpeg
pydantic
mutagen
fastapi

17
discord-bot/run.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
docker pull node:20
docker pull python:3.10
docker build -t discord-bot .
# docker run -it --rm discord-bot
echo $DISCORD_SECRET
docker rm -f discord-bot || true
docker run -d \
--name discord-bot \
-e DISCORD_SECRET=$DISCORD_SECRET \
--restart always\
-p 0.0.0.0:5677:5677 \
-p 0.0.0.0:5678:5678 \
discord-bot

View File

39
discord-bot/src/models.py Normal file
View File

@@ -0,0 +1,39 @@
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel
class BotStatus(str, Enum):
PLAYING = "Playing"
IDLE = "Idle"
class MessageType(str, Enum):
PLAYBACK_INFORMATION = "PLAYBACK_INFORMATION"
ERROR = "ERROR"
MESSAGE = "MESSAGE"
class SongItem(BaseModel):
filename: str
duration: int
class SongQueueStatus(BaseModel):
song_file_list: list[SongItem]
position: int
class PlaybackInformation(BaseModel):
file_name: str
current_position: float
duration: float
class BotResponse(BaseModel):
message_type: MessageType
status: BotStatus
error: Optional[str] = None
message: Optional[str] = None
playback_information: Optional[PlaybackInformation] = None
song_queue: Optional[SongQueueStatus] = None

View File

@@ -0,0 +1,14 @@
from discord import VoiceClient
__voice_client: VoiceClient | None = None
def get_voice_client():
global __voice_client
return __voice_client
def set_voice_client(client: VoiceClient | None):
global __voice_client
__voice_client = client

View File

@@ -0,0 +1,192 @@
import time
import discord
from src.models import BotResponse, BotStatus, MessageType, PlaybackInformation
from src.my_voice_client import get_voice_client
from src.song_queue import (
get_current_metadata,
get_queue_status,
handle_song_end,
has_current_song,
move_to_last_song_in_queue,
set_current_song_start_time,
set_queue_position,
)
pause_offset = -1
def after_playing(error):
if error:
print(f"Error during playback: {error}")
fileName, duration, start_time = get_current_metadata()
print(f"Finished playing {fileName}")
fileName, duration, start_time = get_current_metadata()
current_playing_time = time.time() - start_time
if current_playing_time > (duration - 1):
# song ended
handle_song_end()
if has_current_song():
print("start next song")
play_current_song()
else:
print("end of queue")
else:
print("not changing song because it is still playing")
def change_playback_position(position: int):
fileName, duration, start_time = get_current_metadata()
voice_client = get_voice_client()
if voice_client and voice_client.is_playing():
voice_client.pause()
audio = discord.FFmpegPCMAudio(
source=fileName, before_options=f"-ss {position}"
)
voice_client.play(audio, after=after_playing)
set_current_song_start_time(time.time() - position)
return {"status": "Playback position changed"}
else:
print("cannot change position, no song playing")
return None
def play_current_song():
if has_current_song():
fileName, duration, start_time = get_current_metadata()
start_time_now()
get_voice_client().play(
discord.FFmpegPCMAudio(source=fileName), after=after_playing
)
def get_status():
voice_client = get_voice_client()
if voice_client and voice_client.is_playing():
return BotStatus.PLAYING
return BotStatus.IDLE
def get_playback_info():
fileName, duration, start_time = get_current_metadata()
voice_client = get_voice_client()
if voice_client and voice_client.is_playing():
elapsed_time = time.time() - start_time
return PlaybackInformation(
file_name=fileName,
current_position=elapsed_time,
duration=duration,
)
else:
return None
def handle_message(data) -> BotResponse:
if "action" not in data:
response = BotResponse(
message_type=MessageType.ERROR,
status=get_status(),
error="Invalid request, action is required",
)
return response
if data["action"] == "set_playback":
if "position" not in data:
response = BotResponse(
message_type=MessageType.ERROR,
status=get_status(),
error="Invalid request, position is required",
)
return response
result = change_playback_position(data["position"])
if result:
response = BotResponse(
message_type=MessageType.MESSAGE,
status=get_status(),
message="position changed",
)
return response
else:
response = BotResponse(
message_type=MessageType.ERROR,
status=get_status(),
error="unable to change position",
)
return response
elif data["action"] == "set_position":
if "position" not in data:
response = BotResponse(
message_type=MessageType.ERROR,
status=get_status(),
error="Invalid request, position is required",
)
return response
set_queue_position(data["position"])
get_voice_client().stop()
play_current_song()
info = get_playback_info()
response = BotResponse(
message_type=MessageType.PLAYBACK_INFORMATION,
status=BotStatus.PLAYING if info else BotStatus.IDLE,
playback_information=info,
song_queue=get_queue_status(),
)
return response
elif data["action"] == "get_playback_info":
if not has_current_song():
return BotResponse(
message_type=MessageType.PLAYBACK_INFORMATION,
status=BotStatus.IDLE,
playback_information=None,
song_queue=get_queue_status(),
)
info = get_playback_info()
response = BotResponse(
message_type=MessageType.PLAYBACK_INFORMATION,
status=BotStatus.PLAYING if info else BotStatus.IDLE,
playback_information=info,
song_queue=get_queue_status(),
)
return response
def get_filename_and_starttime():
fileName, duration, start_time = get_current_metadata()
return fileName, start_time
def start_time_now():
set_current_song_start_time(time.time())
def handle_new_song_on_queue():
if not has_current_song():
move_to_last_song_in_queue()
if has_current_song():
play_current_song()
else:
print("moving to the last song did not put us on a current song")
else:
print("not moving to new song because there is current song")
def pause_song():
global pause_offset
voice_client = get_voice_client()
if voice_client and voice_client.is_playing():
fileName, duration, start_time = get_current_metadata()
pause_offset = time.time() - start_time
voice_client.pause()
def unpause_song():
global pause_offset
voice_client = get_voice_client()
if voice_client and voice_client.is_playing():
voice_client.resume()
set_current_song_start_time(time.time() - pause_offset)
pause_offset = -1

View File

@@ -0,0 +1,95 @@
from typing import List
from pydantic import BaseModel
import yt_dlp
from src.models import SongItem, SongQueueStatus
song_file_list: List[SongItem] = []
current_position = -1
current_song_start_time = 0
def __download_url(url: str):
fileName = ""
def yt_dlp_monitor(d):
nonlocal fileName
final_filename = d.get("info_dict").get("_filename")
fileName = final_filename
ydl_opts = {
"extract_audio": True,
"format": "bestaudio/best",
"outtmpl": "./songs/%(title)s.mp3",
"progress_hooks": [yt_dlp_monitor],
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
res = ydl.extract_info(url)
song_duration = res["duration"]
return fileName, song_duration
def add_to_queue(url: str):
global current_song_start_time, song_file_list, current_position
filename, duration = __download_url(url)
song = SongItem(filename=filename, duration=duration)
song_file_list.append(song)
def has_current_song():
global current_song_start_time, song_file_list, current_position
if not song_file_list:
return False
if len(song_file_list) == current_position:
return False
if current_position == -1:
return False
return True
def get_current_metadata():
global current_song_start_time, song_file_list, current_position
if not has_current_song():
print("cannot request metadata when no current song")
return None
return (
song_file_list[current_position].filename,
song_file_list[current_position].duration,
current_song_start_time,
)
def set_current_song_start_time(start_time: float):
global current_song_start_time, song_file_list, current_position
current_song_start_time = start_time
def handle_song_end():
global current_song_start_time, song_file_list, current_position
print("handling song end ", current_position, len(song_file_list))
if current_position == -1:
return
if current_position == (len(song_file_list) - 1):
print("last song ended, reseting position")
current_position = -1
return
print("song ended, moving to next song")
current_position += 1
def move_to_last_song_in_queue():
global current_song_start_time, song_file_list, current_position
current_position = len(song_file_list) - 1
def get_queue_status():
global current_song_start_time, song_file_list, current_position
return SongQueueStatus(song_file_list=song_file_list, position=current_position)
def set_queue_position(position: int):
global current_song_start_time, song_file_list, current_position
current_position = position

57
dns/docker-compose.yml Normal file
View File

@@ -0,0 +1,57 @@
services:
ts-ingress:
image: tailscale/tailscale:latest
container_name: dns-tailscale
hostname: home-dns
restart: unless-stopped
environment:
- TS_STATE_DIR=/var/lib/tailscale
- TS_SERVE_CONFIG=/config/config.json
# - TS_AUTHKEY=
volumes:
- tailscale-data:/var/lib/tailscale
- ./ts-serve-config.json:/config/config.json
- /dev/net/tun:/dev/net/tun
cap_add:
- net_admin
- sys_module
# pihole:
# container_name: pihole
# image: pihole/pihole:latest
# # For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
# # ports:
# # - "0.0.0.0:53:53/tcp"
# # - "0.0.0.0:53:53/udp"
# # - "127.0.0.1:53:53/tcp"
# # - "127.0.0.1:53:53/udp"
# # - "100.122.128.107:53:53/tcp"
# # - "100.122.128.107:53:53/udp"
# # # - "67:67/udp" # Only required if you are using Pi-hole as your DHCP server
# # - "8580:80"
# environment:
# TZ: 'America/Denver'
# # WEBPASSWORD: 'set a secure password here or it will be random'
# volumes:
# - '/data/pihole/etc-pihole:/etc/pihole'
# - '/data/pihole/etc-dnsmasq.d:/etc/dnsmasq.d'
# # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# # cap_add:
# # - NET_ADMIN # Required if you are using Pi-hole as your DHCP server, else not needed
# restart: unless-stopped
# network_mode: service:ts-ingress
adguardhome:
image: adguard/adguardhome
container_name: dns-adguardhome
network_mode: service:ts-ingress
restart: unless-stopped
volumes:
- /data/adguard/conf:/opt/adguardhome/conf
- /data/adguard/work:/opt/adguardhome/work
depends_on:
- ts-ingress
volumes:
tailscale-data:

16
dns/ts-serve-config.json Normal file
View File

@@ -0,0 +1,16 @@
{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/": {
"Proxy": "http://127.0.0.1:80"
}
}
}
}
}

View File

@@ -0,0 +1,35 @@
version: '3'
services:
esphome:
container_name: esphome
image: ghcr.io/esphome/esphome
volumes:
- esphome-data:/config
- /etc/localtime:/etc/localtime:ro
restart: always
privileged: true
network_mode: host
# network_mode: service:ts-ingress
environment:
- USERNAME=alex
- PASSWORD=alex
# ts-ingress:
# image: tailscale/tailscale:latest
# container_name: ts-ingress
# hostname: esphomehttps://tailscale.com/blog/docker-tailscale-guide
# env_file:
# - .env
# environment:
# - TS_STATE_DIR=/var/lib/tailscale
# - TS_SERVE_CONFIG=/config/esphome.json
# volumes:
# - tailscale-data:/var/lib/tailscale
# - ./ts-serve-config.json:/config/esphome.json
# - /dev/net/tun:/dev/net/tun
# cap_add:
# - net_admin
# - sys_module
volumes:
tailscale-data:
esphome-data:

View File

@@ -0,0 +1,19 @@
{
"TCP": {
"443": {
"HTTPS": true
}
},
"Web": {
"${TS_CERT_DOMAIN}:443": {
"Handlers": {
"/": {
"Proxy": "http://127.0.0.1:6052"
}
}
}
},
"AllowFunnel": {
"${TS_CERT_DOMAIN}:443": false
}
}

1
gitea/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
data/

98
gitea/config.yaml Normal file
View File

@@ -0,0 +1,98 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/gitea/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://gitea/runner-images:ubuntu-latest"
- "ubuntu-22.04:docker://gitea/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://gitea/runner-images:ubuntu-20.04"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: host
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

47
gitea/docker-compose.yml Normal file
View File

@@ -0,0 +1,47 @@
services:
server:
image: gitea/gitea:1.22.2
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
restart: always
volumes:
- ./data/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- 0.0.0.0:3000:3000
- 0.0.0.0:222:22
depends_on:
- db
db:
image: postgres:14
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
volumes:
- ./data/postgres:/var/lib/postgresql/data
runner:
image: gitea/act_runner:nightly
environment:
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: http://0.0.0.0:3000/
GITEA_RUNNER_REGISTRATION_TOKEN: SMANpMfJk5G4fTFmuEZ9zleTBcdrj4M3k3eDCW6e
GITEA_RUNNER_NAME: test-runner
GITEA_RUNNER_LABELS: label1
network_mode: host
volumes:
- ./config.yaml:/config.yaml
- ./data/runner:/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- server

18
home-kubernetes/README.md Normal file
View File

@@ -0,0 +1,18 @@
## Home Kubernetes With K3S
<https://k3s.io/>
## Other Kubernetes Distros
I have tried k0s a few times and consistently got an "agent not available on node" error that stopped me from reading logs from pods.
I have used kubeadm to deploy clusters, while it works, it it pretty manual.

146
home-manager/home.nix Normal file
View File

@@ -0,0 +1,146 @@
{ config, pkgs, ... }:
{
# Home Manager needs a bit of information about you and the paths it should
# manage.
home.username = "alexm";
home.homeDirectory = "/home/alexm";
# You should not change this value, even if you update Home Manager. If you do
# want to update the value, then make sure to first check the Home Manager
# release notes.
home.stateVersion = "24.05"; # Please read the comment before changing.
home.packages = [
pkgs.openldap
pkgs.k9s
pkgs.jwt-cli
pkgs.thefuck
pkgs.fish
pkgs.kubectl
pkgs.lazydocker
];
programs.fish = {
enable = true;
shellAliases = {
dang="fuck";
};
shellInit = ''
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
set -x LIBVIRT_DEFAULT_URI qemu:///system
thefuck --alias | source
'';
};
# Home Manager is pretty good at managing dotfiles. The primary way to manage
# plain files is through 'home.file'.
home.file = {
# # Building this configuration will create a copy of 'dotfiles/screenrc' in
# # the Nix store. Activating the configuration will then make '~/.screenrc' a
# # symlink to the Nix store copy.
# ".screenrc".source = dotfiles/screenrc;
# # You can also set the file content immediately.
# ".gradle/gradle.properties".text = ''
# org.gradle.console=verbose
# org.gradle.daemon.idletimeout=3600000
# '';
".config/lazydocker/config.yml".text = ''
gui:
returnImmediately: true
'';
".config/k9s/config.yaml".text = ''
k9s:
liveViewAutoRefresh: true
screenDumpDir: /home/alexm/.local/state/k9s/screen-dumps
refreshRate: 2
maxConnRetry: 5
readOnly: false
noExitOnCtrlC: false
ui:
enableMouse: false
headless: false
logoless: false
crumbsless: false
reactive: false
noIcons: false
defaultsToFullScreen: false
skipLatestRevCheck: false
disablePodCounting: false
shellPod:
image: busybox:1.35.0
namespace: default
limits:
cpu: 100m
memory: 100Mi
imageScans:
enable: false
exclusions:
namespaces: []
labels: {}
logger:
tail: 1000
buffer: 5000
sinceSeconds: -1
textWrap: false
showTime: false
thresholds:
cpu:
critical: 90
warn: 70
memory:
critical: 90
warn: 70
namespace:
lockFavorites: false
'';
};
# Home Manager can also manage your environment variables through
# 'home.sessionVariables'. These will be explicitly sourced when using a
# shell provided by Home Manager. If you don't want to manage your shell
# through Home Manager then you have to manually source 'hm-session-vars.sh'
# located at either
#
# ~/.nix-profile/etc/profile.d/hm-session-vars.sh
#
# or
#
# ~/.local/state/nix/profiles/profile/etc/profile.d/hm-session-vars.sh
#
# or
#
# /etc/profiles/per-user/alexm/etc/profile.d/hm-session-vars.sh
#
home.sessionVariables = {
EDITOR = "vim";
};
dconf.enable = true;
dconf.settings = {
"org/gnome/desktop/wm/keybindings" = {
toggle-maximized=["<Super>m"];
};
};
# Let Home Manager install and manage itself.
programs.home-manager.enable = true;
}

43
home-server/dns/update-dns.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
# curl -X GET https://api.cloudflare.com/client/v4/zones/bf7a05315be9bf7a39d50dd4001e7a97/dns_records -H "X-Auth-Email: alexmickelson96@gmail.com" -H "X-Auth-Key: jo7GntHEEBtANFsuteAM8EJ-stLUqyNbOk2x4Czr" | python -m json.tool
source /home/alex/actions-runner/_work/infrastructure/infrastructure/home-pi/dns/cloudflare.env
NETWORK_INTERFACE=wlan0
IP=$(ip a s $NETWORK_INTERFACE | awk '/inet / {print$2}' | cut -d/ -f1)
EMAIL="alexmickelson96@gmail.com";
ZONE_ID="bf7a05315be9bf7a39d50dd4001e7a97";
update_record() {
LOCAL_NAME=$1
LOCAL_RECORD_ID=$2
echo "UPDATING RECORD FOR $LOCAL_NAME TO $IP"
curl -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$LOCAL_RECORD_ID" \
-H "X-Auth-Email: alexmickelson96@gmail.com" \
-H "X-Auth-Key: $CLOUDFLARE_TOKEN" \
-H "Content-Type: application/json" \
--data '{"type":"A","name":"'"$LOCAL_NAME"'","content":"'"$IP"'","ttl":1}' \
| python3 -m json.tool;
echo
echo "------------------------------------"
echo
}
NAME="ha.alexmickelson.guru";
RECORD_ID="09eac5a17fa4302091532dabdbe73a68"
update_record $NAME $RECORD_ID
NAME="jellyfin.alexmickelson.guru";
RECORD_ID="577293ab0488913308fda78010a7483b"
update_record $NAME $RECORD_ID
NAME="next.alexmickelson.guru";
RECORD_ID="cc686333d2421a4e558a04589b375ded"
update_record $NAME $RECORD_ID

View File

@@ -0,0 +1,256 @@
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
user: 1000:1000
network_mode: "host"
volumes:
- /data/jellyfin/config:/config
- /data/jellyfin/cache:/cache
- /data/media/music/tagged:/music
- /data/media/movies:/movies
- /data/media/tvshows:/tvshows
restart: "unless-stopped"
environment:
- JELLYFIN_PublishedServerUrl=https://jellyfin.alexmickelson.guru
nextcloud:
build:
context: nextcloud
container_name: nextcloud
environment:
- TZ=America/Denver
- OVERWRITEPROTOCOL=https
- MYSQL_PASSWORD=slkdnflksnelkfnsdweoinv
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_HOST=nextcloud-db
volumes:
- /data/nextcloud/html:/var/www/html
- /data/media/music:/music
- /data/media/movies:/movies
- /data/media/tvshows:/tvshows
- /data/media/shared:/shared
- /data/media/audiobooks:/audiobooks
restart: unless-stopped
networks:
- proxy
nextcloud-cron:
build:
context: nextcloud
container_name: nextcloud-cron
environment:
- TZ=America/Denver
- OVERWRITEPROTOCOL=https
- MYSQL_PASSWORD=slkdnflksnelkfnsdweoinv
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_HOST=nextcloud-db
volumes:
- /data/nextcloud/html:/var/www/html
- /data/media/music:/music
- /data/media/movies:/movies
- /data/media/tvshows:/tvshows
- /data/media/shared:/shared
- /data/media/audiobooks:/audiobooks
restart: unless-stopped
entrypoint: /cron.sh
depends_on:
- nextcloud
networks:
- proxy
nextcloud-db:
image: mariadb:10.6
container_name: nextcloud_db
# mysql -u$MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DATABASE
restart: always
command: --transaction-isolation=READ-COMMITTED --log-bin=binlog --binlog-format=ROW
volumes:
- /data/nextcloud-db/:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=klsdnofinsodkflksen34tesrg
- MYSQL_PASSWORD=slkdnflksnelkfnsdweoinv
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
networks:
- proxy
homeassistant:
container_name: homeassistant
image: homeassistant/home-assistant:stable
volumes:
- /data/homeAssistant/config:/config
- /etc/localtime:/etc/localtime:ro
- /dev/serial/by-id:/dev/serial/by-id
devices:
- /dev/ttyUSB0:/dev/ttyUSB0
- /dev/ttyUSB1:/dev/ttyUSB1
environment:
- TZ=America/Denver
restart: always
network_mode: host
# octoprint:
# image: octoprint/octoprint
# container_name: octoprint
# restart: unless-stopped
# # ports:
# # - 80:80
# # devices:
# # # use `python -m serial.tools.miniterm` to see what the name is of the printer, this requires pyserial
# # - /dev/ttyACM0:/dev/ttyACM0
# # - /dev/video0:/dev/video0
# volumes:
# - /data/octoprint:/octoprint
# # uncomment the lines below to ensure camera streaming is enabled when
# # you add a video device
# environment:
# - ENABLE_MJPG_STREAMER=true
# - MJPG_SREAMER_INPUT=-n -r 1280x720 -f 30
prometheus:
image: bitnami/prometheus:2
container_name: prometheus
restart: unless-stopped
environment:
- HOMEASSISTANT_TOKEN=${HOMEASSISTANT_TOKEN}
volumes:
- ./prometheus.yml:/opt/bitnami/prometheus/conf/prometheus.yml
- /data/prometheus:/opt/bitnami/prometheus/data
# command:
# - '--config.file=/etc/prometheus/prometheus.yml'
# - '--storage.tsdb.path=/prometheus'
# - '--web.console.libraries=/etc/prometheus/console_libraries'
# - '--web.console.templates=/etc/prometheus/consoles'
# - '--web.enable-lifecycle'
# expose:
# - 9090
networks:
- proxy
grafana:
image: grafana/grafana:main
container_name: grafana
restart: always
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD}
volumes:
- /data/grafana:/var/lib/grafana
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/robots.txt"]
interval: 10s
timeout: 5s
retries: 3
start_period: 3s
networks:
- proxy
# acpupsd_exporter:
# image: sfudeus/apcupsd_exporter:master_1.19
# container_name: apcupsd_exporter
# restart: always
# extra_hosts:
# - host.docker.internal:host-gateway
# command: -apcupsd.addr host.docker.internal:3551
# ports:
# - 0.0.0.0:9162:9162
# docker run -it --rm -p 9162:9162 --net=host sfudeus/apcupsd_exporter:master_1.19
reverse-proxy:
image: ghcr.io/linuxserver/swag
container_name: reverse-proxy
restart: unless-stopped
cap_add:
- NET_ADMIN
environment:
- PUID=1000
- PGID=1000
- TZ=America/Denver
- URL=alexmickelson.guru
- SUBDOMAINS=wildcard
- VALIDATION=dns
- DNSPLUGIN=cloudflare
volumes:
- ./nginx.conf:/config/nginx/site-confs/default.conf
- /data/swag:/config
- /data/cloudflare/cloudflare.ini:/config/dns-conf/cloudflare.ini
ports:
- 0.0.0.0:80:80
- 0.0.0.0:443:443
extra_hosts:
- host.docker.internal:host-gateway
networks:
- proxy
audiobookshelf:
image: ghcr.io/advplyr/audiobookshelf:latest
restart: unless-stopped
ports:
- 13378:80
volumes:
- /data/media/audiobooks:/audiobooks
# - </path/to/podcasts>:/podcasts
- /data/audiobookshelf/config:/config
- /data/audiobookshelf/metadata:/metadata
networks:
- proxy
docker-registry:
image: registry:2
container_name: docker-registry
restart: unless-stopped
ports:
- "5000:5000"
environment:
REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /data
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/docker/certs.d/server.alexmickelson.guru/cert.pem
REGISTRY_HTTP_TLS_KEY: /etc/docker/certs.d/server.alexmickelson.guru/key.pem
volumes:
- /data/docker-registry:/data
- /data/swag/keys/letsencrypt/fullchain.pem:/etc/docker/certs.d/server.alexmickelson.guru/cert.pem
- /data/swag/keys/letsencrypt/privkey.pem:/etc/docker/certs.d/server.alexmickelson.guru/key.pem
depends_on:
- reverse-proxy
networks:
- proxy
# github-actions-exporter:
# # ports:
# # - 9999:9999
# image: ghcr.io/labbs/github-actions-exporter
# environment:
# - GITHUB_REPOS=alexmickelson/infrastructure
# - GITHUB_TOKEN=${MY_GITHUB_TOKEN}
# pihole:
# container_name: pihole
# image: pihole/pihole:latest
# # For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
# ports:
# # - "0.0.0.0:53:53/tcp"
# # - "0.0.0.0:53:53/udp"
# # - "127.0.0.1:53:53/tcp"
# # - "127.0.0.1:53:53/udp"
# - "100.122.128.107:53:53/tcp"
# - "100.122.128.107:53:53/udp"
# # # - "67:67/udp" # Only required if you are using Pi-hole as your DHCP server
# - "8580:80"
# environment:
# TZ: 'America/Denver'
# # WEBPASSWORD: 'set a secure password here or it will be random'
# volumes:
# - '/data/pihole/etc-pihole:/etc/pihole'
# - '/data/pihole/etc-dnsmasq.d:/etc/dnsmasq.d'
# # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
# cap_add:
# - NET_ADMIN # Required if you are using Pi-hole as your DHCP server, else not needed
# restart: unless-stopped
networks:
proxy:
external:
name: proxy

View File

@@ -0,0 +1,4 @@
FROM nextcloud:production
RUN usermod -u 1000 www-data
RUN groupmod -g 1000 www-data

202
home-server/nginx.conf Normal file
View File

@@ -0,0 +1,202 @@
# include mime.types;
# default_type application/octet-stream;
# log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
# access_log /var/log/nginx/access.log main;
# sendfile on;
# keepalive_timeout 65;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name ha.alexmickelson.guru;
include /config/nginx/ssl.conf;
include /config/nginx/proxy.conf;
include /config/nginx/resolver.conf;
location / {
proxy_pass http://host.docker.internal:8123;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name next.alexmickelson.guru;
include /config/nginx/ssl.conf;
include /config/nginx/proxy.conf;
include /config/nginx/resolver.conf;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
location /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
}
location /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
}
location / {
proxy_pass http://nextcloud:80;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Forwarded-Port $server_port;
client_max_body_size 1G;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name plex.alexmickelson.guru;
location / {
proxy_pass http://host.docker.internal:32400;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name jellyfin.alexmickelson.guru;
add_header X-Frame-Options "SAMEORIGIN";
add_header X-XSS-Protection "0"; # Do NOT enable. This is obsolete/dangerous
add_header X-Content-Type-Options "nosniff";
client_max_body_size 20M;
location / {
# Proxy main Jellyfin traffic
proxy_pass http://host.docker.internal:8096;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_buffering off;
}
location /socket {
# Proxy Jellyfin Websockets traffic
proxy_pass http://host.docker.internal:8096;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Protocol $scheme;
proxy_set_header X-Forwarded-Host $http_host;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name audiobook.alexmickelson.guru;
location / {
proxy_pass http://audiobookshelf:80;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
}
}
# server {
# listen 443 ssl;
# listen [::]:443 ssl;
# server_name octoprint.alexmickelson.guru;
# location / {
# proxy_pass http://octoprint:80;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# }
# }
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name prometheus.alexmickelson.guru;
location / {
proxy_pass http://prometheus:9090;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name grafana.alexmickelson.guru;
location / {
proxy_pass http://grafana:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name photos.alexmickelson.guru;
# allow large file uploads
client_max_body_size 50000M;
# Set headers
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# enable websockets: http://nginx.org/en/docs/http/websocket.html
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_redirect off;
# set timeout
proxy_read_timeout 600s;
proxy_send_timeout 600s;
send_timeout 600s;
location / {
proxy_pass http://immich_server:2283;
}
}

View File

@@ -0,0 +1,340 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help).
{ config, pkgs, ... }:
{
imports =
[ # Include the results of the hardware scan.
./hardware-configuration.nix
<home-manager/nixos>
];
# Bootloader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "home-server"; # Define your hostname.
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
nix.settings.experimental-features = [ "nix-command" "flakes" ];
# Configure network proxy if necessary
# networking.proxy.default = "http://user:password@proxy:port/";
# networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
# Enable networking
networking.networkmanager.enable = true;
networking.nat.enable = true;
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
# Set your time zone.
time.timeZone = "America/Denver";
# Select internationalisation properties.
i18n.defaultLocale = "en_US.UTF-8";
i18n.extraLocaleSettings = {
LC_ADDRESS = "en_US.UTF-8";
LC_IDENTIFICATION = "en_US.UTF-8";
LC_MEASUREMENT = "en_US.UTF-8";
LC_MONETARY = "en_US.UTF-8";
LC_NAME = "en_US.UTF-8";
LC_NUMERIC = "en_US.UTF-8";
LC_PAPER = "en_US.UTF-8";
LC_TELEPHONE = "en_US.UTF-8";
LC_TIME = "en_US.UTF-8";
};
# Configure keymap in X11
services.xserver.xkb = {
layout = "us";
variant = "";
};
users.users.github = {
isNormalUser = true;
description = "github";
extraGroups = [ "docker" ];
shell = pkgs.fish;
};
users.users.alex = {
isNormalUser = true;
description = "alex";
extraGroups = [ "networkmanager" "wheel" "docker" "users" "libvirtd" "cdrom" ];
shell = pkgs.fish;
};
home-manager.users.alex = { pgks, ...}: {
home.stateVersion = "24.05";
home.packages = with pkgs; [
openldap
k9s
jwt-cli
thefuck
fish
kubectl
lazydocker
btop
nix-index
usbutils
makemkv
mbuffer
lzop
lsof
code-server
];
programs.fish = {
enable = true;
shellAliases = {
dang="fuck";
};
shellInit = ''
function commit
git add --all
git commit -m "$argv"
git push
end
# have ctrl+backspace delete previous word
bind \e\[3\;5~ kill-word
# have ctrl+delete delete following word
bind \b backward-kill-word
set -U fish_user_paths ~/.local/bin $fish_user_paths
#set -U fish_user_paths ~/.dotnet $fish_user_paths
#set -U fish_user_paths ~/.dotnet/tools $fish_user_paths
export VISUAL=vim
export EDITOR="$VISUAL"
export DOTNET_WATCH_RESTART_ON_RUDE_EDIT=1
export DOTNET_CLI_TELEMETRY_OPTOUT=1
set -x LIBVIRT_DEFAULT_URI qemu:///system
thefuck --alias | source
'';
};
home.file = {
".config/lazydocker/config.yml".text = ''
gui:
returnImmediately: true
'';
".config/k9s/config.yaml".text = ''
k9s:
liveViewAutoRefresh: true
screenDumpDir: /home/alexm/.local/state/k9s/screen-dumps
refreshRate: 2
maxConnRetry: 5
readOnly: false
noExitOnCtrlC: false
ui:
enableMouse: false
headless: false
logoless: false
crumbsless: false
reactive: false
noIcons: false
defaultsToFullScreen: false
skipLatestRevCheck: false
disablePodCounting: false
shellPod:
image: busybox:1.35.0
namespace: default
limits:
cpu: 100m
memory: 100Mi
imageScans:
enable: false
exclusions:
namespaces: []
labels: {}
logger:
tail: 1000
buffer: 5000
sinceSeconds: -1
textWrap: false
showTime: false
thresholds:
cpu:
critical: 90
warn: 70
memory:
critical: 90
warn: 70
namespace:
lockFavorites: false
'';
};
home.sessionVariables = {
EDITOR = "vim";
};
};
home-manager.useGlobalPkgs = true;
# Allow unfree packages
nixpkgs.config.allowUnfree = true;
# List packages installed in system profile. To search, run:
# $ nix search wget
environment.systemPackages = with pkgs; [
vim
wget
curl
docker
fish
git
zfs
gcc-unwrapped
github-runner
sanoid
virtiofsd
tmux
];
services.openssh.enable = true;
programs.fish.enable = true;
virtualisation.docker.enable = true;
#virtualisation.docker.extraOptions = "--dns 1.1.1.1 --dns 8.8.8.8 --dns 100.100.100.100";
services.tailscale.enable = true;
services.tailscale.extraSetFlags = [
"--stateful-filtering=false"
];
services.envfs.enable = true;
# printing
services.printing = {
enable = true;
drivers = [ pkgs.brgenml1lpr pkgs.brgenml1cupswrapper pkgs.brlaser];
listenAddresses = [ "*:631" ];
extraConf = ''
ServerAlias server.alexmickelson.guru
'';
allowFrom = [ "all" ];
browsing = true;
defaultShared = true;
openFirewall = true;
};
services.avahi = {
enable = true;
nssmdns4 = true;
openFirewall = true;
publish = {
enable = true;
userServices = true;
};
};
systemd.services.printing-server = {
description = "Web Printing Server Service";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${pkgs.nix}/bin/nix run .#fastapi-server";
Restart = "always";
WorkingDirectory = "/home/alex/infrastructure/home-server/printing/server";
User = "alex";
};
};
# virtualization stuff
virtualisation.libvirtd.enable = true;
# zfs stuff
boot.supportedFilesystems = [ "zfs" ];
boot.zfs.forceImportRoot = false;
networking.hostId = "eafe9551";
boot.zfs.extraPools = [ "data-ssd" "backup" ];
services.sanoid = {
enable = true;
templates.production = {
hourly = 24;
daily = 14;
monthly = 5;
autoprune = true;
autosnap = true;
};
datasets."data-ssd/data" = {
useTemplate = [ "production" ];
};
datasets."data-ssd/media" = {
useTemplate = [ "production" ];
};
templates.backup = {
hourly = 24;
daily = 14;
monthly = 5;
autoprune = true;
autosnap = false;
};
datasets."backup/data" = {
useTemplate = [ "backup" ];
};
datasets."backup/media" = {
useTemplate = [ "backup" ];
};
};
services.github-runners = {
infrastructure = {
enable = true;
name = "infrastructure-runner";
user = "github";
tokenFile = "/data/runner/github-infrastructure-token.txt";
url = "https://github.com/alexmickelson/infrastructure";
extraLabels = [ "home-server" ];
#workDir = "/data/runner/infrastructure/";
replace = true;
serviceOverrides = {
ReadWritePaths = [
"/data/cloudflare/"
"/data/runner/infrastructure"
"/data/runner"
"/home/github/infrastructure"
];
PrivateDevices = false;
DeviceAllow = "/dev/zfs rw";
ProtectProc = false;
ProtectSystem = false;
PrivateMounts = false;
PrivateUsers = false;
#DynamicUser = true;
#NoNewPrivileges = false;
ProtectHome = false;
#RuntimeDirectoryPreserve = "yes";
};
extraPackages = with pkgs; [
docker
git-secret
zfs
sanoid
mbuffer
lzop
];
};
};
# Open ports in the firewall.
# networking.firewall.allowedTCPPorts = [ ... ];
# networking.firewall.allowedUDPPorts = [ ... ];
# Or disable the firewall altogether.
networking.firewall.enable = false;
# networking.firewall.trustedInterfaces = [ "docker0" ];
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "24.05"; # Did you read the comment?
}

View File

@@ -0,0 +1,198 @@
#
# Configuration file for the CUPS scheduler. See "man cupsd.conf" for a
# complete description of this file.
#
# Log general information in error_log - change "warn" to "debug"
# for troubleshooting...
LogLevel warn
PageLogFormat
# Specifies the maximum size of the log files before they are rotated. The value "0" disables log rotation.
MaxLogSize 0
# Default error policy for printers
ErrorPolicy retry-job
# Allow remote access
Listen *:631 # important
ServerAlias * # important
# Show shared printers on the local network.
Browsing Yes
BrowseLocalProtocols dnssd
# Default authentication type, when authentication is required...
DefaultAuthType Basic
DefaultEncryption IfRequested
# Web interface setting...
WebInterface Yes
# Timeout after cupsd exits if idle (applied only if cupsd runs on-demand - with -l)
IdleExitTimeout 60
# Restrict access to the server...
<Location />
Order allow,deny
Allow all
</Location>
# Restrict access to the admin pages...
<Location /admin>
Order allow,deny
Allow all
</Location>
# Restrict access to configuration files...
<Location /admin/conf>
AuthType Default
Require user @SYSTEM
Order allow,deny
Allow all
</Location>
# Restrict access to log files...
<Location /admin/log>
AuthType Default
Require user @SYSTEM
Order allow,deny
Allow all
</Location>
# Set the default printer/job policies...
<Policy default>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
Order deny,allow
# Allow all # mine...
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default CUPS-Get-Devices>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
# Allow all # mine
</Limit>
</Policy>
# Set the authenticated printer/job policies...
<Policy authenticated>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
AuthType Default
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
AuthType Default
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
AuthType Default
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>
# Set the kerberized printer/job policies...
<Policy kerberos>
# Job/subscription privacy...
JobPrivateAccess default
JobPrivateValues default
SubscriptionPrivateAccess default
SubscriptionPrivateValues default
# Job-related operations must be done by the owner or an administrator...
<Limit Create-Job Print-Job Print-URI Validate-Job>
AuthType Negotiate
Order deny,allow
</Limit>
<Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job Cancel-My-Jobs Close-Job CUPS-Move-Job CUPS-Get-Document>
AuthType Negotiate
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
# All administration operations require an administrator to authenticate...
<Limit CUPS-Add-Modify-Printer CUPS-Delete-Printer CUPS-Add-Modify-Class CUPS-Delete-Class CUPS-Set-Default>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# All printer operations require a printer operator to authenticate...
<Limit Pause-Printer Resume-Printer Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After Cancel-Jobs CUPS-Accept-Jobs CUPS-Reject-Jobs>
AuthType Default
Require user @SYSTEM
Order deny,allow
</Limit>
# Only the owner or an administrator can cancel or authenticate a job...
<Limit Cancel-Job CUPS-Authenticate-Job>
AuthType Negotiate
Require user @OWNER @SYSTEM
Order deny,allow
</Limit>
<Limit All>
Order deny,allow
</Limit>
</Policy>

View File

@@ -0,0 +1,19 @@
version: "3.8"
services:
cups:
image: olbat/cupsd:stable-2024-01-19 # admin user/password: print/print
container_name: cups
privileged: true
volumes:
- "/dev/bus/usb:/dev/bus/usb" # keep this under volumes, not devices
- "/run/dbus:/run/dbus"
- "./cupsd.conf:/etc/cups/cupsd.conf:ro"
#- "./data/printers.conf:/etc/cups/printers.conf:ro"
ports:
- "631:631/tcp" # CUPS
restart: "always"
cups-webpage:
buid: server
ports:
- 6311:6311

View File

@@ -0,0 +1,24 @@
# Printer configuration file for CUPS v2.4.2
# Written by cupsd
# DO NOT EDIT THIS FILE WHEN CUPSD IS RUNNING
NextPrinterId 2
<Printer Brother_HL-L2300D_series>
PrinterId 1
UUID urn:uuid:8ac038d7-8659-3de9-57d0-0a7f97b956cc
Info Brother HL-L2300D series
Location
MakeModel Brother HL-L2300D series, using brlaser v6
DeviceURI usb://Brother/HL-L2300D%20series?serial=U63878J0N375067
State Idle
StateTime 1714021523
ConfigTime 1714021523
Type 4180
Accepting Yes
Shared Yes
JobSheets none none
QuotaPeriod 0
PageLimit 0
KLimit 0
OpPolicy default
ErrorPolicy retry-job
</Printer>

View File

@@ -0,0 +1,13 @@
## what I am running on office server
```bash
sudo apt install python3-pip cups python3-cups hplip
pip install pycups fastapi "uvicorn[standard]" python-multipart
sudo hp-setup -i # manually configure printer...
python -m uvicorn print_api:app --reload --host 0.0.0.0
```
url: http://100.103.75.97:8000/

View File

@@ -0,0 +1,11 @@
FROM python:3
RUN apt-get update \
&& apt-get install -y libcups2-dev python3-pip cups python3-cups gcc \
&& pip install pycups fastapi "uvicorn[standard]" python-multipart
WORKDIR /app
COPY ./src .
CMD python -m uvicorn print_api:app --reload --host 0.0.0.0 --port 6311

27
home-server/printing/server/flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1725634671,
"narHash": "sha256-v3rIhsJBOMLR8e/RNWxr828tB+WywYIoajrZKFM+0Gg=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "574d1eac1c200690e27b8eb4e24887f8df7ac27c",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -0,0 +1,38 @@
{
description = "Printer Server Flake";
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
};
outputs = { self, nixpkgs, ... }:
let
system = "x86_64-linux";
pkgs = import nixpkgs { inherit system; };
myPython = pkgs.python3.withPackages (python-pkgs: with pkgs; [
python312Packages.fastapi
python312Packages.fastapi-cli
python312Packages.pycups
python312Packages.python-multipart
python312Packages.uvicorn
]);
in
{
devShells.${system}.default = pkgs.mkShell {
packages = with pkgs; [
python312Packages.fastapi
python312Packages.fastapi-cli
python312Packages.pycups
python312Packages.python-multipart
python312Packages.uvicorn
];
};
packages.${system} = rec {
fastapi-server = pkgs.writeShellScriptBin "start-server" ''
${myPython}/bin/fastapi run ${self}/src/print_api.py
'';
default = fastapi-server;
};
};
}

View File

@@ -0,0 +1,149 @@
<!DOCTYPE html>
<html>
<head>
<title>Document Upload</title>
<style>
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 0;
height: 100vh;
background: #09373e;
color: #85bfc8;
}
#form-container {
display: flex;
justify-content: center;
align-items: center;
}
form {
border: 1px solid #ccc;
padding: 20px;
background: #05252a;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
}
h2 {
margin-bottom: 20px;
text-align: center;
}
input[type="file"] {
display: none; /* Hide the file input */
}
input[type="submit"] {
padding: 10px 20px;
border: none;
background: #007bff;
color: #fff;
font-size: 16px;
cursor: pointer;
}
input[type="submit"]:hover {
background: #0056b3;
}
#dropzone {
border: 2px dashed #ccc;
padding: 20px;
width: 300px;
text-align: center;
color: #ccc;
cursor: pointer;
}
#dropzone.dragover {
border-color: #000;
color: #000;
}
</style>
</head>
<body>
<h2>Upload Document</h2>
<br />
<section id="form-container">
<form
id="printForm"
action="/print/"
method="post"
enctype="multipart/form-data"
>
<div id="dropzone">Drop file to upload or click to select</div>
<input type="file" id="fileInput" />
<br />
<input type="submit" value="Upload Document" name="submit" />
</form>
</section>
<script>
var stagedFile = undefined;
const formElement = document.getElementById("printForm");
const fileInputElement = document.getElementById("fileInput");
formElement.addEventListener("submit", async (e) => {
e.preventDefault();
const formData = new FormData();
formData.append("file", stagedFile);
const response = await fetch("/print/", {
method: "POST",
body: formData,
});
const data = await response.json();
console.log(data);
});
document
.getElementById("dropzone")
.addEventListener("dragover", function (event) {
event.preventDefault(); // Prevent default behavior (Prevent file from being opened)
event.stopPropagation();
event.target.classList.add("dragover"); // Optional: add a style change
});
document
.getElementById("dropzone")
.addEventListener("dragleave", function (event) {
event.preventDefault();
event.stopPropagation();
event.target.classList.remove("dragover"); // Optional: revert style change
});
document
.getElementById("dropzone")
.addEventListener("drop", function (event) {
event.preventDefault();
event.stopPropagation();
event.target.classList.remove("dragover"); // Optional: revert style change
// Process files
var files = event.dataTransfer.files;
handleFiles(files);
});
// Handle file selection when clicked
document
.getElementById("dropzone")
.addEventListener("click", function () {
fileInputElement.click(); // Trigger the hidden file input's click
});
fileInputElement.addEventListener("change", function (event) {
const files = event.target.files;
handleFiles(files);
});
const handleFiles = (files) => {
stagedFile = files[0];
renderStagedFile();
};
const renderStagedFile = () => {
const element = document.getElementById("dropzone");
if (!stagedFile) {
element.textContent = "Drop file to upload or click to select";
} else {
element.textContent = `FILE: ${stagedFile.name}`;
}
};
</script>
</body>
</html>

View File

@@ -0,0 +1,69 @@
import os
from pprint import pprint
import tempfile
from fastapi import FastAPI, File, UploadFile, Request
import cups
from fastapi.responses import HTMLResponse
app = FastAPI()
# @app.post("/print/")
# async def print_document(file: UploadFile = File(...)):
# temp_file = tempfile.NamedTemporaryFile(delete=False)
# temp_file.write(await file.read())
# temp_file.close()
# conn = cups.Connection(host="server.alexmickelson.guru")
# printers = conn.getPrinters()
# print(file.filename)
# print(temp_file.name)
# pprint(printers)
# for printer in printers:
# print(printer, printers[printer]["device-uri"])
# default_printer = list(printers.keys())[0]
# job_id = conn.printFile(default_printer, temp_file.name, f"FastAPI Print Job for {temp_file.name}", {})
# os.unlink(temp_file.name)
# return {"job_id": job_id, "file_name": file.filename}
@app.post("/print/")
async def print_document(file: UploadFile = File(...)):
# Save the uploaded file to a temporary file
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.write(await file.read())
temp_file.close()
# Connect to the CUPS server on the host (use default CUPS connection)
conn = cups.Connection() # This will connect to localhost CUPS
# Get the list of available printers
printers = conn.getPrinters()
print(file.filename)
print(temp_file.name)
pprint(printers)
for printer in printers:
print(printer, printers[printer]["device-uri"])
# Use the default printer (first one in the list)
default_printer = list(printers.keys())[0]
# Print the file
job_id = conn.printFile(default_printer, temp_file.name, f"FastAPI Print Job for {temp_file.name}", {})
# Clean up the temporary file
os.unlink(temp_file.name)
return {"job_id": job_id, "file_name": file.filename}
@app.get("/", response_class=HTMLResponse)
async def read_root(request: Request):
with open('src/index.html', 'r') as f:
html_content = f.read()
return HTMLResponse(content=html_content, status_code=200)

View File

@@ -0,0 +1,52 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
- job_name: "node"
static_configs:
- targets:
- 100.119.183.105:9100 # desktop
- 100.122.128.107:9100 # home server
- 100.64.229.40:9100 # linode
- job_name: "docker"
static_configs:
- targets:
# - 100.119.183.105:9323 # desktop
- 100.122.128.107:9323 # home server
- 100.64.229.40:9323 # linode
- job_name: ups
static_configs:
- targets:
- 100.122.128.107:9162 # home server
- job_name: homeassistant
scrape_interval: 60s
metrics_path: /api/prometheus
authorization:
credentials: '%{HOMEASSITANT_TOKEN}'
scheme: https
static_configs:
- targets: ['ha.alexmickelson.guru']

80
immich/docker-compose.yml Normal file
View File

@@ -0,0 +1,80 @@
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/usr/src/app/upload
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- 0.0.0.0:2283:2283
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
networks:
- proxy
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
networks:
- proxy
redis:
container_name: immich_redis
image: docker.io/redis:6.2-alpine@sha256:2d1463258f2764328496376f5d965f20c6a67f66ea2b06dc42af351f75248792
healthcheck:
test: redis-cli ping || exit 1
restart: always
networks:
- proxy
database:
container_name: immich_postgres
image: docker.io/tensorchord/pgvecto-rs:pg14-v0.2.0@sha256:90724186f0a3517cf6914295b5ab410db9ce23190a2d9d0b9dd6463e3fa298f0
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
healthcheck:
test: pg_isready --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' || exit 1; Chksum="$$(psql --dbname='${DB_DATABASE_NAME}' --username='${DB_USERNAME}' --tuples-only --no-align --command='SELECT COALESCE(SUM(checksum_failures), 0) FROM pg_stat_database')"; echo "checksum failure count is $$Chksum"; [ "$$Chksum" = '0' ] || exit 1
interval: 5m
start_interval: 30s
start_period: 5m
command: ["postgres", "-c", "shared_preload_libraries=vectors.so", "-c", 'search_path="$$user", public, vectors', "-c", "logging_collector=on", "-c", "max_wal_size=2GB", "-c", "shared_buffers=512MB", "-c", "wal_compression=on"]
restart: always
networks:
- proxy
volumes:
model-cache:
networks:
proxy:
external:
name: proxy

21
immich/immich-env Normal file
View File

@@ -0,0 +1,21 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/data/immich/library
# The location where your database files are stored
DB_DATA_LOCATION=/data/immich/postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

3
jellyfin/.dockerignore Normal file
View File

@@ -0,0 +1,3 @@
__pycache__/
Dockerifile
*.http

9
jellyfin/Dockerfile Normal file
View File

@@ -0,0 +1,9 @@
FROM python:3.10
RUN pip install pydantic requests python-dotenv
COPY jellyfin /app/jellyfin
WORKDIR /app
ENTRYPOINT [ "python" ]

View File

@@ -0,0 +1,85 @@
import os
import requests
import json
from dotenv import load_dotenv
load_dotenv()
# Set your Jellyfin server address and API key here
server_address = "https://jellyfin.alexmickelson.guru"
api_key = os.environ["JELLYFIN_TOKEN"]
# Set the API endpoints to get all songs and create a playlist
songs_endpoint = (
"/Users/b30951b36b37400498dbfd182d49a42e/Items"
+ "?SortBy=DateCreated,SortName"
+ "&SortOrder=Descending"
+ "&IncludeItemTypes=Audio"
+ "&Recursive=true"
+ "&Fields=AudioInfo,ParentId"
+ "&StartIndex=0"
+ "&ImageTypeLimit=1"
+ "&EnableImageTypes=Primary"
+ "&Limit=100"
+ "&ParentId=7e64e319657a9516ec78490da03edccb"
)
songs_endpoint = "/Users/b30951b36b37400498dbfd182d49a42e/Items"
# Set the parameters for the API request to get all songs
params = {
"api_key": api_key,
"SortBy": "SortName",
"ParentId": "7e64e319657a9516ec78490da03edccb",
}
# Make the API request to get all songs
response = requests.get(server_address + songs_endpoint, params=params)
# Parse the JSON response
data = json.loads(response.text)
# # Loop through the songs and print their names
for song in data["Items"]:
print(song["Name"], song["Id"])
# Create a list of all song IDs
song_ids = [song["Id"] for song in data["Items"]]
ids = ",".join(song_ids)
# print(ids)
playlist_data = {
"Name": "All Songs",
"UserId": "b30951b36b37400498dbfd182d49a42e",
"Ids": ids,
"MediaType": "Audio",
}
headers = {"Content-type": "application/json"}
params = {"api_key": api_key}
playlist_endpoint = "/Playlists"
# https://jellyfin.alexmickelson.guru/Playlists?Name=test playlist&Ids=f78ddd409c5ebb2405f5477d15e8e23c&userId=b30951b36b37400498dbfd182d49a42e
response = requests.post(
server_address + playlist_endpoint,
headers=headers,
params=params,
data=json.dumps(playlist_data),
)
# print(response.text)
playlist_id = response.json()["Id"]
# add_song_url = f"/Playlists/{playlist_id}/Items"
# params = {"api_key": api_key}
# body = {
# "Ids": ids,
# "UserId": "b30951b36b37400498dbfd182d49a42e",
# "MediaType": "Audio",
# }
# response = requests.post(
# server_address + add_song_url, headers=headers, params=params, json=body
# )
# print(response.text)
# print(response.status_code)
# print(response.headers)
# jellyfin_service.logout()

50
jellyfin/jellyfin.http Normal file
View File

@@ -0,0 +1,50 @@
# https://jellyfin.alexmickelson.guru/api-docs/swagger/index.html
# https://gist.github.com/nielsvanvelzen/ea047d9028f676185832e51ffaf12a6f
GET https://jellyfin.alexmickelson.guru/Users/b30951b36b37400498dbfd182d49a42e/Items
?SortBy=SortName&SortOrder=Ascending
&IncludeItemTypes=Playlist
&Recursive=true
&Fields=PrimaryImageAspectRatio,SortName,CanDelete
&StartIndex=0
&api_key={{$dotenv JELLYFIN_TOKEN}}
###
GET https://jellyfin.alexmickelson.guru/Users/b30951b36b37400498dbfd182d49a42e/Items
?IncludeItemTypes=Playlist
&Recursive=true
&ParentId=7e64e319657a9516ec78490da03edccb
&api_key={{$dotenv JELLYFIN_TOKEN}}
###
# get items from unindexed playlist
GET https://jellyfin.alexmickelson.guru/Playlists/2f191b23f0a49e70d6f90e9d82e408c6/Items
?Fields=PrimaryImageAspectRatio
&EnableImageTypes=Primary,Backdrop,Banner,Thumb
&UserId=b30951b36b37400498dbfd182d49a42e
&api_key={{$dotenv JELLYFIN_TOKEN}}
### remove item from unindexed
DELETE https://jellyfin.alexmickelson.guru/Playlists/2f191b23f0a49e70d6f90e9d82e408c6/Items
?EntryIds=186f4d63492b405b97865ff9a99ef3ab
&userId=b30951b36b37400498dbfd182d49a42e
Authorization: MediaBrowser Client="scriptclient", Device="script", DeviceId="asdfasdfasdfasdfasdf", Version="1.0.0", Token="f313e2045fc34ce3ac510ce9ba2be1fc"
### get all playlists
GET https://jellyfin.alexmickelson.guru/Users/b30951b36b37400498dbfd182d49a42e/Items
?api_key={{$dotenv JELLYFIN_TOKEN}}
&ParentId=29772619d609592f4cdb3fc34a6ec97d
### get token by user/pass
POST https://jellyfin.alexmickelson.guru/Users/AuthenticateByName
Content-Type: application/json
Authorization: MediaBrowser Client="scriptclient", Device="script", DeviceId="asdfasdfasdfasdfasdf", Version="1.0.0", Token=""
{
"Username": "alex",
"Pw": "{{$dotenv JELLYFIN_PASSWORD}}"
}
###
POST https://jellyfin.alexmickelson.guru/Sessions/Logout
Authorization: MediaBrowser Client="scriptclient", Device="script", DeviceId="asdfasdfasdfasdfasdf", Version="1.0.0", Token="c704c71900cc41d2a454a4f3b5132778"

View File

@@ -0,0 +1,163 @@
from functools import lru_cache
import os
from pprint import pprint
from typing import List, Optional
from pydantic import BaseModel, Field
import requests
from dotenv import load_dotenv
load_dotenv()
server_address = "https://jellyfin.alexmickelson.guru"
# api_key = os.environ["JELLYFIN_TOKEN"]
username = os.environ["JELLYFIN_USER"]
password = os.environ["JELLYFIN_PASSWORD"]
alex_user_id = "b30951b36b37400498dbfd182d49a42e"
all_songs_playlist_id = "2e176c02e7cc7f460c40bb1510723510"
unindexed_playlist_id = "2f191b23f0a49e70d6f90e9d82e408c6"
class Song(BaseModel):
Id: str
Name: str
Album: Optional[str] = Field(default=None)
Artists: Optional[List[str]] = Field(default=None)
class PlaylistSong(BaseModel):
Id: str
PlaylistItemId: str
Name: str
Album: Optional[str] = Field(default=None)
Artists: Optional[List[str]] = Field(default=None)
class Playlist(BaseModel):
Id: str
Name: str
Songs: List[PlaylistSong]
@lru_cache(maxsize=10)
def get_token():
auth_endpoint = f"{server_address}/Users/AuthenticateByName"
body = {"Username": username, "Pw": password}
response = requests.post(
auth_endpoint,
json=body,
headers={
"Content-Type": "application/json",
"Authorization": 'MediaBrowser Client="scriptclient", Device="script", DeviceId="testscriptasdfasdfasdf", Version="1.0.0", Token=""',
},
)
return response.json()["AccessToken"]
def get_auth_headers():
token = get_token()
return {
"Authorization": f'MediaBrowser Client="scriptclient", Device="script", DeviceId="asdfasdfasdfasdfasdf", Version="1.0.0", Token="{token}"'
}
def get_all_songs():
songs_endpoint = (
f"{server_address}/Users/{alex_user_id}/Items"
# + "?SortBy=DateCreated,SortName"
# + "&SortOrder=Descending"
+ "?IncludeItemTypes=Audio"
+ "&Recursive=true"
# + "&Fields=AudioInfo,ParentId"
# + "&StartIndex=0"
# + "&ImageTypeLimit=1"
# + "&EnableImageTypes=Primary"
# + "&Limit=100"
+ "&ParentId=7e64e319657a9516ec78490da03edccb"
)
params = {
"SortBy": "SortName",
}
response = requests.get(songs_endpoint, params=params, headers=get_auth_headers())
if not response.ok:
print(response.status_code)
print(response.text)
data = response.json()
songs = [Song(**song) for song in data["Items"]]
return songs
def add_song_to_playlist(song_id: str, playlist_id: str):
add_song_endpoint = f"{server_address}/Playlists/{playlist_id}/Items"
params = {"ids": song_id, "userId": alex_user_id}
response = requests.post(
add_song_endpoint, params=params, headers=get_auth_headers()
)
if not response.ok:
print(response.status_code)
print(response.text)
def remove_song_from_playlist(song_playlist_id: str, playlist_id: str):
url = f"{server_address}/Playlists/{playlist_id}/Items"
params = {
"EntryIds": song_playlist_id,
"userId": alex_user_id,
} # , "apiKey": api_key}
response = requests.delete(url, params=params, headers=get_auth_headers())
if not response.ok:
print(response.status_code)
print(response.text)
print(response.url)
print(song_playlist_id)
print(playlist_id)
print(response.content)
pprint(response.request.headers)
def get_songs_in_playlist(playlist_id: str):
url = f"{server_address}/Playlists/{playlist_id}/Items"
params = {"userId": alex_user_id}
response = requests.get(url, params=params, headers=get_auth_headers())
if not response.ok:
print(response.status_code)
print(response.text)
raise Exception(f"Error getting songs in playlist: {playlist_id}")
data = response.json()
songs = [PlaylistSong.parse_obj(song) for song in data["Items"]]
return songs
def get_all_playlists():
url = f"{server_address}/Users/{alex_user_id}/Items"
params = {
"IncludeItemTypes": "Playlist",
"Recursive": True,
"ParentId": "29772619d609592f4cdb3fc34a6ec97d",
}
response = requests.get(url, params=params, headers=get_auth_headers())
if not response.ok:
print(response.status_code)
print(response.text)
raise Exception("Error getting all playlists")
data = response.json()
print("got all playlists", len(data["Items"]))
playlists: List[Playlist] = []
for playlist in data["Items"]:
songs = get_songs_in_playlist(playlist["Id"])
playlist_object = Playlist(
Id=playlist["Id"], Name=playlist["Name"], Songs=songs
)
playlists.append(playlist_object)
return playlists
def logout():
url = f"{server_address}/Sessions/Logout"
response = requests.post(url, headers=get_auth_headers())
print("ending session: " + str(response.status_code))

View File

@@ -0,0 +1,20 @@
from jellyfin import jellyfin_service
if __name__ == "__main__":
all_songs = jellyfin_service.get_all_songs()
print("total songs", len(all_songs))
playlist_songs = jellyfin_service.get_songs_in_playlist(
jellyfin_service.all_songs_playlist_id
)
print("songs already in playlist", len(playlist_songs))
playlist_ids = [s.Id for s in playlist_songs]
for song in all_songs:
if song.Id not in playlist_ids:
print(f"adding song {song.Name} to playlist")
jellyfin_service.add_song_to_playlist(
song.Id, jellyfin_service.all_songs_playlist_id
)
jellyfin_service.logout()

View File

@@ -0,0 +1,34 @@
from pprint import pprint
from jellyfin import jellyfin_service
if __name__ == "__main__":
all_songs = jellyfin_service.get_all_songs()
playlists = jellyfin_service.get_all_playlists()
song_ids_in_playlist = list(
set(
song.Id
for playlist in playlists
for song in playlist.Songs
if playlist.Id != jellyfin_service.unindexed_playlist_id
and playlist.Id != jellyfin_service.all_songs_playlist_id
)
)
unindexed_playlist = next(
p for p in playlists if p.Id == jellyfin_service.unindexed_playlist_id
)
unindexed_songs_ids = [song.Id for song in unindexed_playlist.Songs]
for song in all_songs:
if song.Id not in song_ids_in_playlist:
if song.Id not in unindexed_songs_ids:
print(f"adding {song.Name} to unindexed playlist")
jellyfin_service.add_song_to_playlist(
song.Id, jellyfin_service.unindexed_playlist_id
)
for song in unindexed_playlist.Songs:
if song.Id in song_ids_in_playlist:
print(f"removing {song.Name} from unindexed playlist")
# pprint(song)
jellyfin_service.remove_song_from_playlist(
song.PlaylistItemId, jellyfin_service.unindexed_playlist_id
)
jellyfin_service.logout()

View File

@@ -0,0 +1,6 @@
# sources
https://ranchermanager.docs.rancher.com/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster
install r3k for manager cluster: https://docs.k3s.io/quick-start

View File

@@ -0,0 +1,52 @@
version: "3.8"
services:
swag:
image: ghcr.io/linuxserver/swag
container_name: swag-proxy
cap_add:
- NET_ADMIN
environment:
- PUID=1000
- PGID=1000
- TZ=America/Denver
- URL=alexmickelson.guru
- SUBDOMAINS=wildcard
- VALIDATION=dns
- DNSPLUGIN=cloudflare
volumes:
- ./nginx/default.conf:/config/nginx/site-confs/default
- /data/swag:/config
- /var/www/html:/var/www/html:ro
ports:
- 443:443
- 80:80 #optional
restart: unless-stopped
networks:
linode-web:
proxy:
pihole:
container_name: pihole
image: pihole/pihole
ports:
- 0.0.0.0:53:53/tcp
- 0.0.0.0:53:53/udp
# - 67:67/udp # dhcp
#- "80:80/tcp"
environment:
TZ: 'America/Denver'
VIRTUAL_HOST: alexmickelson.guru
WEBPASSWORD: chaos-concise-nickname
volumes:
- /data/pihole/etc-pihole/:/etc/pihole/
- /data/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/
cap_add:
- NET_ADMIN
restart: unless-stopped
networks:
linode-web:
networks:
linode-web:
proxy:
external: true

View File

@@ -0,0 +1,58 @@
error_page 502 /502.html;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 443 ssl http2 default_server;
listen [::]:443 ssl http2 default_server;
include /config/nginx/ssl.conf;
include /config/nginx/proxy.conf;
include /config/nginx/resolver.conf;
root /var/www/html;
index index.html index.htm index.php;
server_name alexmickelson.guru;
location /admin/ {
rewrite /(.*) /$1 break;
proxy_pass http://pihole;
proxy_set_header Host $http_host;
# allow 172.18.0.0/24;
# deny all;
}
location / {
try_files $uri $uri/ /index.html;
allow all;
}
# allow 172.18.0.0/24;
# deny all;
}
# server {
# listen 443 ssl http2;
# listen [::]:443 ssl http2;
# include /config/nginx/ssl.conf;
# include /config/nginx/proxy.conf;
# include /config/nginx/resolver.conf;
# root /config/www;
# index index.html index.htm index.php;
# server_name wg.alexmickelson.guru;
# location / {
# proxy_pass http://wireguard-web:51821/;
# }
# allow 172.18.0.0/24;
# deny all;
# }
proxy_cache_path cache/ keys_zone=auth_cache:10m;

View File

@@ -0,0 +1,35 @@
version: "3.8"
services:
wg-easy:
environment:
- WG_HOST=45.79.102.212
- WG_DEFAULT_ADDRESS=10.11.0.x
- WG_ALLOWED_IPS=0.0.0.0/0, ::/0
- WG_PERSISTENT_KEEPALIVE=25
- WG_DEFAULT_DNS=45.79.102.212
# - WG_PORT=51820
env_file:
- ./wg-easy.env
image: weejewel/wg-easy
container_name: wireguard-web
volumes:
- /data/wireguard:/etc/wireguard
ports:
- 51820:51820/udp
- 51821:51821/tcp
restart: unless-stopped
cap_add:
- NET_ADMIN
- SYS_MODULE
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
networks:
default:
proxy:
networks:
default:
proxy:
external: true

0
notes/gpu-passthrough.md Normal file
View File

41
notes/ufw.md Normal file
View File

@@ -0,0 +1,41 @@
# ufw
### read logs
```
sudo dmesg | grep '\\[UFW'
```
### interactions
```bash
ufw allow from 172.19.0.2/32 to any port 443
```
### docker config in /etc/ufw/after.rules
https://stackoverflow.com/questions/30383845/what-is-the-best-practice-of-docker-ufw-under-ubuntu
```bash
# BEGIN UFW AND DOCKER
*filter
:ufw-user-forward - [0:0]
:DOCKER-USER - [0:0]
-A DOCKER-USER -j RETURN -s 10.0.0.0/8
-A DOCKER-USER -j RETURN -s 172.16.0.0/12
-A DOCKER-USER -j RETURN -s 192.168.0.0/16
-A DOCKER-USER -j ufw-user-forward
-A DOCKER-USER -j DROP -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 192.168.0.0/16
-A DOCKER-USER -j DROP -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 10.0.0.0/8
-A DOCKER-USER -j DROP -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 172.16.0.0/12
-A DOCKER-USER -j DROP -p udp -m udp --dport 0:32767 -d 192.168.0.0/16
-A DOCKER-USER -j DROP -p udp -m udp --dport 0:32767 -d 10.0.0.0/8
-A DOCKER-USER -j DROP -p udp -m udp --dport 0:32767 -d 172.16.0.0/12
-A DOCKER-USER -j RETURN
COMMIT
# END UFW AND DOCKER
```

11
outbound-proxy/Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM tailscale/tailscale:latest
RUN apk add --no-cache \
openssh-client \
bash \
fish \
shadow
RUN echo "/usr/bin/fish" >> /etc/shells && sed -i 's|/root:/bin/ash|/root:/usr/bin/fish|' /etc/passwd
COPY ./ssh-config.sh /ssh-config.sh
RUN chmod +x /ssh-config.sh

View File

@@ -0,0 +1,31 @@
services:
tailscale-outbound:
build: .
hostname: tailscale-outbound
env_file:
- .env # TS_AUTHKEY
environment:
# - TS_EXTRA_ARGS=--advertise-tags=tag:container
- TS_STATE_DIR=/var/lib/tailscale
- TS_USERSPACE=false
- TS_OUTBOUND_HTTP_PROXY_LISTEN=:1055
- TS_SOCKS5_SERVER=:1055
volumes:
- tailscale-data:/var/lib/tailscale
# - ./ts-serve-config.json:/config/config.json
- /dev/net/tun:/dev/net/tun
# - $HOME/.ssh:/root/.ssh:ro
restart: unless-stopped
ports:
- 1055:1055
privileged: true
# cap_add:
# - NET_ADMIN
# - sys_module
# nginx:
# image: nginx
# depends_on:
# - tailscale-outbound
# network_mode: service:tailscale-outbound
volumes:
tailscale-data:

22
outbound-proxy/ssh-config.sh Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
tailscale_status=$(tailscale status)
# Extract the lines containing IP addresses and hostnames
# Example lines we are interested in:
# 100.101.102.103 server1 linux active; direct 192.168.1.101:41641, tx 3867808 rx 7391200
# 100.101.102.104 server2 windows active; direct 192.168.1.102:41641, tx 3867808 rx 7391200
ssh_entries=$(echo "$tailscale_status" | awk '/^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/ {print "Host " $2 "\n HostName " $1 "\n User alex\n"}')
ssh_config_content="# SSH config - generated by tailscale script\n\n"
ssh_config_content+="$ssh_entries"
output_file="$HOME/.ssh/config"
mkdir -p /root/.ssh
echo -e "$ssh_config_content" > "$output_file"
chmod 600 "$output_file"
echo "SSH config file has been updated with Tailscale hosts at $output_file."