Add K3s dev deployment setup for single-node VPS

Mirrors the prod deploy-k3s/ setup but runs all services in-cluster
on a single node: PostgreSQL (replaces Neon), MinIO S3-compatible
storage (replaces B2), Redis, API, worker, and admin.

Includes fully automated setup scripts (00-init through 04-verify),
server hardening (SSH, fail2ban, ufw), Let's Encrypt TLS via Traefik,
network policies, RBAC, and security contexts matching prod.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Trey t
2026-03-30 21:30:39 -05:00
parent 00fd674b56
commit 34553f3bec
52 changed files with 5319 additions and 0 deletions

View File

@@ -0,0 +1,124 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=_config.sh
source "${SCRIPT_DIR}/_config.sh"
log() { printf '[provision] %s\n' "$*"; }
die() { printf '[provision][error] %s\n' "$*" >&2; exit 1; }
# --- Prerequisites ---
command -v hetzner-k3s >/dev/null 2>&1 || die "Missing: hetzner-k3s CLI. Install: https://github.com/vitobotta/hetzner-k3s"
command -v kubectl >/dev/null 2>&1 || die "Missing: kubectl"
HCLOUD_TOKEN="$(cfg_require cluster.hcloud_token "Hetzner API token")"
export HCLOUD_TOKEN
# Validate SSH keys
SSH_PUB="$(cfg cluster.ssh_public_key | sed "s|~|${HOME}|g")"
SSH_PRIV="$(cfg cluster.ssh_private_key | sed "s|~|${HOME}|g")"
[[ -f "${SSH_PUB}" ]] || die "SSH public key not found: ${SSH_PUB}"
[[ -f "${SSH_PRIV}" ]] || die "SSH private key not found: ${SSH_PRIV}"
# --- Generate hetzner-k3s cluster config from config.yaml ---
CLUSTER_CONFIG="${DEPLOY_DIR}/cluster-config.yaml"
log "Generating cluster-config.yaml from config.yaml..."
generate_cluster_config > "${CLUSTER_CONFIG}"
# --- Provision ---
INSTANCE_TYPE="$(cfg cluster.instance_type)"
LOCATION="$(cfg cluster.location)"
NODE_COUNT="$(node_count)"
log "Provisioning K3s cluster on Hetzner Cloud..."
log " Nodes: ${NODE_COUNT}x ${INSTANCE_TYPE} in ${LOCATION}"
log " This takes about 5-10 minutes."
echo ""
hetzner-k3s create --config "${CLUSTER_CONFIG}"
KUBECONFIG_PATH="${DEPLOY_DIR}/kubeconfig"
if [[ ! -f "${KUBECONFIG_PATH}" ]]; then
die "Provisioning completed but kubeconfig not found. Check hetzner-k3s output."
fi
# --- Write node IPs back to config.yaml ---
log "Querying node IPs..."
export KUBECONFIG="${KUBECONFIG_PATH}"
python3 -c "
import yaml, subprocess, json
# Get node info from kubectl
result = subprocess.run(
['kubectl', 'get', 'nodes', '-o', 'json'],
capture_output=True, text=True
)
nodes_json = json.loads(result.stdout)
# Build name → IP map
ip_map = {}
for node in nodes_json.get('items', []):
name = node['metadata']['name']
for addr in node.get('status', {}).get('addresses', []):
if addr['type'] == 'ExternalIP':
ip_map[name] = addr['address']
break
else:
for addr in node.get('status', {}).get('addresses', []):
if addr['type'] == 'InternalIP':
ip_map[name] = addr['address']
break
# Update config.yaml with IPs
with open('${CONFIG_FILE}') as f:
config = yaml.safe_load(f)
updated = 0
for i, node in enumerate(config.get('nodes', [])):
for real_name, ip in ip_map.items():
if node['name'] in real_name or real_name in node['name']:
config['nodes'][i]['ip'] = ip
config['nodes'][i]['name'] = real_name
updated += 1
break
if updated == 0 and ip_map:
# Names didn't match — assign by index
for i, (name, ip) in enumerate(sorted(ip_map.items())):
if i < len(config['nodes']):
config['nodes'][i]['name'] = name
config['nodes'][i]['ip'] = ip
updated += 1
with open('${CONFIG_FILE}', 'w') as f:
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
print(f'Updated {updated} node IPs in config.yaml')
for name, ip in sorted(ip_map.items()):
print(f' {name}: {ip}')
"
# --- Label Redis node ---
REDIS_NODE="$(nodes_with_role redis | head -1)"
if [[ -n "${REDIS_NODE}" ]]; then
# Find the actual K8s node name that matches
ACTUAL_NODE="$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | head -1)"
log "Labeling node ${ACTUAL_NODE} for Redis..."
kubectl label node "${ACTUAL_NODE}" honeydue/redis=true --overwrite
fi
log ""
log "Cluster provisioned successfully."
log ""
log "Next steps:"
log " export KUBECONFIG=${KUBECONFIG_PATH}"
log " kubectl get nodes"
log " ./scripts/02-setup-secrets.sh"

View File

@@ -0,0 +1,131 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=_config.sh
source "${SCRIPT_DIR}/_config.sh"
SECRETS_DIR="${DEPLOY_DIR}/secrets"
NAMESPACE="honeydue"
log() { printf '[secrets] %s\n' "$*"; }
warn() { printf '[secrets][warn] %s\n' "$*" >&2; }
die() { printf '[secrets][error] %s\n' "$*" >&2; exit 1; }
# --- Prerequisites ---
command -v kubectl >/dev/null 2>&1 || die "Missing: kubectl"
kubectl get namespace "${NAMESPACE}" >/dev/null 2>&1 || {
log "Creating namespace ${NAMESPACE}..."
kubectl apply -f "${DEPLOY_DIR}/manifests/namespace.yaml"
}
# --- Validate secret files ---
require_file() {
local path="$1" label="$2"
[[ -f "${path}" ]] || die "Missing: ${path} (${label})"
[[ -s "${path}" ]] || die "Empty: ${path} (${label})"
}
require_file "${SECRETS_DIR}/postgres_password.txt" "Postgres password"
require_file "${SECRETS_DIR}/secret_key.txt" "SECRET_KEY"
require_file "${SECRETS_DIR}/email_host_password.txt" "SMTP password"
require_file "${SECRETS_DIR}/fcm_server_key.txt" "FCM server key"
require_file "${SECRETS_DIR}/apns_auth_key.p8" "APNS private key"
require_file "${SECRETS_DIR}/cloudflare-origin.crt" "Cloudflare origin cert"
require_file "${SECRETS_DIR}/cloudflare-origin.key" "Cloudflare origin key"
# Validate APNS key format
if ! grep -q "BEGIN PRIVATE KEY" "${SECRETS_DIR}/apns_auth_key.p8"; then
die "APNS key file does not look like a private key: ${SECRETS_DIR}/apns_auth_key.p8"
fi
# Validate secret_key length (minimum 32 chars)
SECRET_KEY_LEN="$(tr -d '\r\n' < "${SECRETS_DIR}/secret_key.txt" | wc -c | tr -d ' ')"
if (( SECRET_KEY_LEN < 32 )); then
die "secret_key.txt must be at least 32 characters (got ${SECRET_KEY_LEN})."
fi
# --- Read optional config values ---
REDIS_PASSWORD="$(cfg redis.password 2>/dev/null || true)"
ADMIN_AUTH_USER="$(cfg admin.basic_auth_user 2>/dev/null || true)"
ADMIN_AUTH_PASSWORD="$(cfg admin.basic_auth_password 2>/dev/null || true)"
# --- Create app secrets ---
log "Creating honeydue-secrets..."
SECRET_ARGS=(
--namespace="${NAMESPACE}"
--from-literal="POSTGRES_PASSWORD=$(tr -d '\r\n' < "${SECRETS_DIR}/postgres_password.txt")"
--from-literal="SECRET_KEY=$(tr -d '\r\n' < "${SECRETS_DIR}/secret_key.txt")"
--from-literal="EMAIL_HOST_PASSWORD=$(tr -d '\r\n' < "${SECRETS_DIR}/email_host_password.txt")"
--from-literal="FCM_SERVER_KEY=$(tr -d '\r\n' < "${SECRETS_DIR}/fcm_server_key.txt")"
)
if [[ -n "${REDIS_PASSWORD}" ]]; then
log " Including REDIS_PASSWORD in secrets"
SECRET_ARGS+=(--from-literal="REDIS_PASSWORD=${REDIS_PASSWORD}")
fi
kubectl create secret generic honeydue-secrets \
"${SECRET_ARGS[@]}" \
--dry-run=client -o yaml | kubectl apply -f -
# --- Create APNS key secret ---
log "Creating honeydue-apns-key..."
kubectl create secret generic honeydue-apns-key \
--namespace="${NAMESPACE}" \
--from-file="apns_auth_key.p8=${SECRETS_DIR}/apns_auth_key.p8" \
--dry-run=client -o yaml | kubectl apply -f -
# --- Create GHCR registry credentials ---
REGISTRY_SERVER="$(cfg registry.server)"
REGISTRY_USER="$(cfg registry.username)"
REGISTRY_TOKEN="$(cfg registry.token)"
if [[ -n "${REGISTRY_SERVER}" && -n "${REGISTRY_USER}" && -n "${REGISTRY_TOKEN}" ]]; then
log "Creating ghcr-credentials..."
kubectl create secret docker-registry ghcr-credentials \
--namespace="${NAMESPACE}" \
--docker-server="${REGISTRY_SERVER}" \
--docker-username="${REGISTRY_USER}" \
--docker-password="${REGISTRY_TOKEN}" \
--dry-run=client -o yaml | kubectl apply -f -
else
warn "Registry credentials incomplete in config.yaml — skipping ghcr-credentials."
fi
# --- Create Cloudflare origin cert ---
log "Creating cloudflare-origin-cert..."
kubectl create secret tls cloudflare-origin-cert \
--namespace="${NAMESPACE}" \
--cert="${SECRETS_DIR}/cloudflare-origin.crt" \
--key="${SECRETS_DIR}/cloudflare-origin.key" \
--dry-run=client -o yaml | kubectl apply -f -
# --- Create admin basic auth secret ---
if [[ -n "${ADMIN_AUTH_USER}" && -n "${ADMIN_AUTH_PASSWORD}" ]]; then
command -v htpasswd >/dev/null 2>&1 || die "Missing: htpasswd (install apache2-utils)"
log "Creating admin-basic-auth secret..."
HTPASSWD="$(htpasswd -nb "${ADMIN_AUTH_USER}" "${ADMIN_AUTH_PASSWORD}")"
kubectl create secret generic admin-basic-auth \
--namespace="${NAMESPACE}" \
--from-literal=users="${HTPASSWD}" \
--dry-run=client -o yaml | kubectl apply -f -
else
warn "admin.basic_auth_user/password not set in config.yaml — skipping admin-basic-auth."
warn "Admin panel will NOT have basic auth protection."
fi
# --- Done ---
log ""
log "All secrets created in namespace '${NAMESPACE}'."
log "Verify: kubectl get secrets -n ${NAMESPACE}"

143
deploy-k3s/scripts/03-deploy.sh Executable file
View File

@@ -0,0 +1,143 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=_config.sh
source "${SCRIPT_DIR}/_config.sh"
REPO_DIR="$(cd "${DEPLOY_DIR}/.." && pwd)"
NAMESPACE="honeydue"
MANIFESTS="${DEPLOY_DIR}/manifests"
log() { printf '[deploy] %s\n' "$*"; }
warn() { printf '[deploy][warn] %s\n' "$*" >&2; }
die() { printf '[deploy][error] %s\n' "$*" >&2; exit 1; }
# --- Parse arguments ---
SKIP_BUILD=false
DEPLOY_TAG=""
while (( $# > 0 )); do
case "$1" in
--skip-build) SKIP_BUILD=true; shift ;;
--tag)
[[ -n "${2:-}" ]] || die "--tag requires a value"
DEPLOY_TAG="$2"; shift 2 ;;
-h|--help)
cat <<'EOF'
Usage: ./scripts/03-deploy.sh [OPTIONS]
Options:
--skip-build Skip Docker build/push, use existing images
--tag <tag> Image tag (default: git short SHA)
-h, --help Show this help
EOF
exit 0 ;;
*) die "Unknown argument: $1" ;;
esac
done
# --- Prerequisites ---
command -v kubectl >/dev/null 2>&1 || die "Missing: kubectl"
command -v docker >/dev/null 2>&1 || die "Missing: docker"
if [[ -z "${DEPLOY_TAG}" ]]; then
DEPLOY_TAG="$(git -C "${REPO_DIR}" rev-parse --short HEAD 2>/dev/null || echo "latest")"
fi
# --- Read registry config ---
REGISTRY_SERVER="$(cfg_require registry.server "Container registry server")"
REGISTRY_NS="$(cfg_require registry.namespace "Registry namespace")"
REGISTRY_USER="$(cfg_require registry.username "Registry username")"
REGISTRY_TOKEN="$(cfg_require registry.token "Registry token")"
REGISTRY_PREFIX="${REGISTRY_SERVER%/}/${REGISTRY_NS#/}"
API_IMAGE="${REGISTRY_PREFIX}/honeydue-api:${DEPLOY_TAG}"
WORKER_IMAGE="${REGISTRY_PREFIX}/honeydue-worker:${DEPLOY_TAG}"
ADMIN_IMAGE="${REGISTRY_PREFIX}/honeydue-admin:${DEPLOY_TAG}"
# --- Build and push ---
if [[ "${SKIP_BUILD}" == "false" ]]; then
log "Logging in to ${REGISTRY_SERVER}..."
printf '%s' "${REGISTRY_TOKEN}" | docker login "${REGISTRY_SERVER}" -u "${REGISTRY_USER}" --password-stdin >/dev/null
log "Building API image: ${API_IMAGE}"
docker build --target api -t "${API_IMAGE}" "${REPO_DIR}"
log "Building Worker image: ${WORKER_IMAGE}"
docker build --target worker -t "${WORKER_IMAGE}" "${REPO_DIR}"
log "Building Admin image: ${ADMIN_IMAGE}"
docker build --target admin -t "${ADMIN_IMAGE}" "${REPO_DIR}"
log "Pushing images..."
docker push "${API_IMAGE}"
docker push "${WORKER_IMAGE}"
docker push "${ADMIN_IMAGE}"
# Also tag and push :latest
docker tag "${API_IMAGE}" "${REGISTRY_PREFIX}/honeydue-api:latest"
docker tag "${WORKER_IMAGE}" "${REGISTRY_PREFIX}/honeydue-worker:latest"
docker tag "${ADMIN_IMAGE}" "${REGISTRY_PREFIX}/honeydue-admin:latest"
docker push "${REGISTRY_PREFIX}/honeydue-api:latest"
docker push "${REGISTRY_PREFIX}/honeydue-worker:latest"
docker push "${REGISTRY_PREFIX}/honeydue-admin:latest"
else
warn "Skipping build. Using images for tag: ${DEPLOY_TAG}"
fi
# --- Generate and apply ConfigMap from config.yaml ---
log "Generating env from config.yaml..."
ENV_FILE="$(mktemp)"
trap 'rm -f "${ENV_FILE}"' EXIT
generate_env > "${ENV_FILE}"
log "Creating ConfigMap..."
kubectl create configmap honeydue-config \
--namespace="${NAMESPACE}" \
--from-env-file="${ENV_FILE}" \
--dry-run=client -o yaml | kubectl apply -f -
# --- Apply manifests ---
log "Applying manifests..."
kubectl apply -f "${MANIFESTS}/namespace.yaml"
kubectl apply -f "${MANIFESTS}/redis/"
kubectl apply -f "${MANIFESTS}/ingress/"
# Apply deployments with image substitution
sed "s|image: IMAGE_PLACEHOLDER|image: ${API_IMAGE}|" "${MANIFESTS}/api/deployment.yaml" | kubectl apply -f -
kubectl apply -f "${MANIFESTS}/api/service.yaml"
kubectl apply -f "${MANIFESTS}/api/hpa.yaml"
sed "s|image: IMAGE_PLACEHOLDER|image: ${WORKER_IMAGE}|" "${MANIFESTS}/worker/deployment.yaml" | kubectl apply -f -
sed "s|image: IMAGE_PLACEHOLDER|image: ${ADMIN_IMAGE}|" "${MANIFESTS}/admin/deployment.yaml" | kubectl apply -f -
kubectl apply -f "${MANIFESTS}/admin/service.yaml"
# --- Wait for rollouts ---
log "Waiting for rollouts..."
kubectl rollout status deployment/redis -n "${NAMESPACE}" --timeout=120s
kubectl rollout status deployment/api -n "${NAMESPACE}" --timeout=300s
kubectl rollout status deployment/worker -n "${NAMESPACE}" --timeout=300s
kubectl rollout status deployment/admin -n "${NAMESPACE}" --timeout=300s
# --- Done ---
log ""
log "Deploy completed successfully."
log "Tag: ${DEPLOY_TAG}"
log "Images:"
log " API: ${API_IMAGE}"
log " Worker: ${WORKER_IMAGE}"
log " Admin: ${ADMIN_IMAGE}"
log ""
log "Run ./scripts/04-verify.sh to check cluster health."

180
deploy-k3s/scripts/04-verify.sh Executable file
View File

@@ -0,0 +1,180 @@
#!/usr/bin/env bash
set -euo pipefail
NAMESPACE="honeydue"
log() { printf '[verify] %s\n' "$*"; }
sep() { printf '\n%s\n' "--- $1 ---"; }
ok() { printf '[verify] ✓ %s\n' "$*"; }
fail() { printf '[verify] ✗ %s\n' "$*"; }
command -v kubectl >/dev/null 2>&1 || { echo "Missing: kubectl" >&2; exit 1; }
sep "Nodes"
kubectl get nodes -o wide
sep "Pods"
kubectl get pods -n "${NAMESPACE}" -o wide
sep "Services"
kubectl get svc -n "${NAMESPACE}"
sep "Ingress"
kubectl get ingress -n "${NAMESPACE}"
sep "HPA"
kubectl get hpa -n "${NAMESPACE}"
sep "PVCs"
kubectl get pvc -n "${NAMESPACE}"
sep "Secrets (names only)"
kubectl get secrets -n "${NAMESPACE}"
sep "ConfigMap keys"
kubectl get configmap honeydue-config -n "${NAMESPACE}" -o jsonpath='{.data}' 2>/dev/null | python3 -c "
import json, sys
try:
d = json.load(sys.stdin)
for k in sorted(d.keys()):
v = d[k]
if any(s in k.upper() for s in ['PASSWORD', 'SECRET', 'TOKEN', 'KEY']):
v = '***REDACTED***'
print(f' {k}={v}')
except:
print(' (could not parse)')
" 2>/dev/null || log "ConfigMap not found or not parseable"
sep "Warning Events (last 15 min)"
kubectl get events -n "${NAMESPACE}" --field-selector type=Warning --sort-by='.lastTimestamp' 2>/dev/null | tail -20 || log "No warning events"
sep "Pod Restart Counts"
kubectl get pods -n "${NAMESPACE}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range .status.containerStatuses[*]}{.restartCount}{end}{"\n"}{end}' 2>/dev/null || true
sep "In-Cluster Health Check"
API_POD="$(kubectl get pods -n "${NAMESPACE}" -l app.kubernetes.io/name=api -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || true)"
if [[ -n "${API_POD}" ]]; then
log "Running health check from pod ${API_POD}..."
kubectl exec -n "${NAMESPACE}" "${API_POD}" -- curl -sf http://localhost:8000/api/health/ 2>/dev/null && log "Health check: OK" || log "Health check: FAILED"
else
log "No API pod found — skipping in-cluster health check"
fi
sep "Resource Usage"
kubectl top pods -n "${NAMESPACE}" 2>/dev/null || log "Metrics server not available (install with: kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml)"
# =============================================================================
# Security Verification
# =============================================================================
sep "Security: Secret Encryption"
# Check that secrets-encryption is configured on the K3s server
if kubectl get nodes -o jsonpath='{.items[0].metadata.name}' >/dev/null 2>&1; then
# Verify secrets are stored encrypted by checking the encryption config exists
if kubectl -n kube-system get cm k3s-config -o yaml 2>/dev/null | grep -q "secrets-encryption"; then
ok "secrets-encryption found in K3s config"
else
# Alternative: check if etcd stores encrypted data
ENCRYPTED_CHECK="$(kubectl get secret honeydue-secrets -n "${NAMESPACE}" -o jsonpath='{.metadata.name}' 2>/dev/null || true)"
if [[ -n "${ENCRYPTED_CHECK}" ]]; then
ok "honeydue-secrets exists (verify encryption with: k3s secrets-encrypt status)"
else
fail "Cannot verify secret encryption — run 'k3s secrets-encrypt status' on the server"
fi
fi
else
fail "Cannot reach cluster to verify secret encryption"
fi
sep "Security: Network Policies"
NP_COUNT="$(kubectl get networkpolicy -n "${NAMESPACE}" --no-headers 2>/dev/null | wc -l | tr -d ' ')"
if (( NP_COUNT >= 5 )); then
ok "Found ${NP_COUNT} network policies"
kubectl get networkpolicy -n "${NAMESPACE}" --no-headers 2>/dev/null | while read -r line; do
echo " ${line}"
done
else
fail "Expected 5+ network policies, found ${NP_COUNT}"
fi
sep "Security: Service Accounts"
SA_COUNT="$(kubectl get sa -n "${NAMESPACE}" --no-headers 2>/dev/null | grep -cv default | tr -d ' ')"
if (( SA_COUNT >= 4 )); then
ok "Found ${SA_COUNT} custom service accounts (api, worker, admin, redis)"
else
fail "Expected 4 custom service accounts, found ${SA_COUNT}"
fi
kubectl get sa -n "${NAMESPACE}" --no-headers 2>/dev/null | while read -r line; do
echo " ${line}"
done
sep "Security: Pod Security Contexts"
PODS_WITHOUT_SECURITY="$(kubectl get pods -n "${NAMESPACE}" -o json 2>/dev/null | python3 -c "
import json, sys
try:
data = json.load(sys.stdin)
issues = []
for pod in data.get('items', []):
name = pod['metadata']['name']
spec = pod['spec']
sc = spec.get('securityContext', {})
if not sc.get('runAsNonRoot'):
issues.append(f'{name}: missing runAsNonRoot')
for c in spec.get('containers', []):
csc = c.get('securityContext', {})
if csc.get('allowPrivilegeEscalation', True):
issues.append(f'{name}/{c[\"name\"]}: allowPrivilegeEscalation not false')
if not csc.get('readOnlyRootFilesystem'):
issues.append(f'{name}/{c[\"name\"]}: readOnlyRootFilesystem not true')
if issues:
for i in issues:
print(i)
else:
print('OK')
except Exception as e:
print(f'Error: {e}')
" 2>/dev/null || echo "Error parsing pod specs")"
if [[ "${PODS_WITHOUT_SECURITY}" == "OK" ]]; then
ok "All pods have proper security contexts"
else
fail "Pod security context issues:"
echo "${PODS_WITHOUT_SECURITY}" | while read -r line; do
echo " ${line}"
done
fi
sep "Security: Pod Disruption Budgets"
PDB_COUNT="$(kubectl get pdb -n "${NAMESPACE}" --no-headers 2>/dev/null | wc -l | tr -d ' ')"
if (( PDB_COUNT >= 2 )); then
ok "Found ${PDB_COUNT} pod disruption budgets"
else
fail "Expected 2+ PDBs, found ${PDB_COUNT}"
fi
kubectl get pdb -n "${NAMESPACE}" 2>/dev/null || true
sep "Security: Cloudflare-Only Middleware"
CF_MIDDLEWARE="$(kubectl get middleware cloudflare-only -n "${NAMESPACE}" -o name 2>/dev/null || true)"
if [[ -n "${CF_MIDDLEWARE}" ]]; then
ok "cloudflare-only middleware exists"
# Check ingress annotations reference it
INGRESS_ANNOTATIONS="$(kubectl get ingress -n "${NAMESPACE}" -o jsonpath='{.items[*].metadata.annotations.traefik\.ingress\.kubernetes\.io/router\.middlewares}' 2>/dev/null || true)"
if echo "${INGRESS_ANNOTATIONS}" | grep -q "cloudflare-only"; then
ok "Ingress references cloudflare-only middleware"
else
fail "Ingress does NOT reference cloudflare-only middleware"
fi
else
fail "cloudflare-only middleware not found"
fi
sep "Security: Admin Basic Auth"
ADMIN_AUTH="$(kubectl get secret admin-basic-auth -n "${NAMESPACE}" -o name 2>/dev/null || true)"
if [[ -n "${ADMIN_AUTH}" ]]; then
ok "admin-basic-auth secret exists"
else
fail "admin-basic-auth secret not found — admin panel has no additional auth layer"
fi
echo ""
log "Verification complete."

214
deploy-k3s/scripts/_config.sh Executable file
View File

@@ -0,0 +1,214 @@
#!/usr/bin/env bash
# Shared config helper — sourced by all deploy scripts.
# Provides cfg() to read values from config.yaml.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DEPLOY_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)"
CONFIG_FILE="${DEPLOY_DIR}/config.yaml"
if [[ ! -f "${CONFIG_FILE}" ]]; then
if [[ -f "${CONFIG_FILE}.example" ]]; then
echo "[error] config.yaml not found. Run: cp config.yaml.example config.yaml" >&2
else
echo "[error] config.yaml not found." >&2
fi
exit 1
fi
# cfg "dotted.key.path" — reads a value from config.yaml
# Examples: cfg database.host, cfg nodes.0.ip, cfg features.push_enabled
cfg() {
python3 -c "
import yaml, json, sys
with open(sys.argv[1]) as f:
c = yaml.safe_load(f)
keys = sys.argv[2].split('.')
v = c
for k in keys:
if isinstance(v, list):
v = v[int(k)]
else:
v = v[k]
if isinstance(v, bool):
print(str(v).lower())
elif isinstance(v, (dict, list)):
print(json.dumps(v))
else:
print('' if v is None else v)
" "${CONFIG_FILE}" "$1" 2>/dev/null
}
# cfg_require "key" "label" — reads value and dies if empty
cfg_require() {
local val
val="$(cfg "$1")"
if [[ -z "${val}" ]]; then
echo "[error] Missing required config: $1 ($2)" >&2
exit 1
fi
printf '%s' "${val}"
}
# node_count — returns number of nodes
node_count() {
python3 -c "
import yaml
with open('${CONFIG_FILE}') as f:
c = yaml.safe_load(f)
print(len(c.get('nodes', [])))
"
}
# nodes_with_role "role" — returns node names with a given role
nodes_with_role() {
python3 -c "
import yaml
with open('${CONFIG_FILE}') as f:
c = yaml.safe_load(f)
for n in c.get('nodes', []):
if '$1' in n.get('roles', []):
print(n['name'])
"
}
# generate_env — writes the flat env file the app expects to stdout
generate_env() {
python3 -c "
import yaml
with open('${CONFIG_FILE}') as f:
c = yaml.safe_load(f)
d = c['domains']
db = c['database']
em = c['email']
ps = c['push']
st = c['storage']
wk = c['worker']
ft = c['features']
aa = c.get('apple_auth', {})
ga = c.get('google_auth', {})
rd = c.get('redis', {})
def b(v):
return str(v).lower() if isinstance(v, bool) else str(v)
def val(v):
return '' if v is None else str(v)
lines = [
# API
'DEBUG=false',
f\"ALLOWED_HOSTS={d['api']},{d['base']}\",
f\"CORS_ALLOWED_ORIGINS=https://{d['base']},https://{d['admin']}\",
'TIMEZONE=UTC',
f\"BASE_URL=https://{d['base']}\",
'PORT=8000',
# Admin
f\"NEXT_PUBLIC_API_URL=https://{d['api']}\",
f\"ADMIN_PANEL_URL=https://{d['admin']}\",
# Database
f\"DB_HOST={val(db['host'])}\",
f\"DB_PORT={db['port']}\",
f\"POSTGRES_USER={val(db['user'])}\",
f\"POSTGRES_DB={db['name']}\",
f\"DB_SSLMODE={db['sslmode']}\",
f\"DB_MAX_OPEN_CONNS={db['max_open_conns']}\",
f\"DB_MAX_IDLE_CONNS={db['max_idle_conns']}\",
f\"DB_MAX_LIFETIME={db['max_lifetime']}\",
# Redis (K8s internal DNS — password injected if configured)
f\"REDIS_URL=redis://{':%s@' % val(rd.get('password')) if rd.get('password') else ''}redis.honeydue.svc.cluster.local:6379/0\",
'REDIS_DB=0',
# Email
f\"EMAIL_HOST={em['host']}\",
f\"EMAIL_PORT={em['port']}\",
f\"EMAIL_USE_TLS={b(em['use_tls'])}\",
f\"EMAIL_HOST_USER={val(em['user'])}\",
f\"DEFAULT_FROM_EMAIL={val(em['from'])}\",
# Push
'APNS_AUTH_KEY_PATH=/secrets/apns/apns_auth_key.p8',
f\"APNS_AUTH_KEY_ID={val(ps['apns_key_id'])}\",
f\"APNS_TEAM_ID={val(ps['apns_team_id'])}\",
f\"APNS_TOPIC={ps['apns_topic']}\",
f\"APNS_USE_SANDBOX={b(ps['apns_use_sandbox'])}\",
f\"APNS_PRODUCTION={b(ps['apns_production'])}\",
# Worker
f\"TASK_REMINDER_HOUR={wk['task_reminder_hour']}\",
f\"OVERDUE_REMINDER_HOUR={wk['overdue_reminder_hour']}\",
f\"DAILY_DIGEST_HOUR={wk['daily_digest_hour']}\",
# B2 Storage
f\"B2_KEY_ID={val(st['b2_key_id'])}\",
f\"B2_APP_KEY={val(st['b2_app_key'])}\",
f\"B2_BUCKET_NAME={val(st['b2_bucket'])}\",
f\"B2_ENDPOINT={val(st['b2_endpoint'])}\",
f\"STORAGE_MAX_FILE_SIZE={st['max_file_size']}\",
f\"STORAGE_ALLOWED_TYPES={st['allowed_types']}\",
# Features
f\"FEATURE_PUSH_ENABLED={b(ft['push_enabled'])}\",
f\"FEATURE_EMAIL_ENABLED={b(ft['email_enabled'])}\",
f\"FEATURE_WEBHOOKS_ENABLED={b(ft['webhooks_enabled'])}\",
f\"FEATURE_ONBOARDING_EMAILS_ENABLED={b(ft['onboarding_emails_enabled'])}\",
f\"FEATURE_PDF_REPORTS_ENABLED={b(ft['pdf_reports_enabled'])}\",
f\"FEATURE_WORKER_ENABLED={b(ft['worker_enabled'])}\",
# Apple auth/IAP
f\"APPLE_CLIENT_ID={val(aa.get('client_id'))}\",
f\"APPLE_TEAM_ID={val(aa.get('team_id'))}\",
f\"APPLE_IAP_KEY_ID={val(aa.get('iap_key_id'))}\",
f\"APPLE_IAP_ISSUER_ID={val(aa.get('iap_issuer_id'))}\",
f\"APPLE_IAP_BUNDLE_ID={val(aa.get('iap_bundle_id'))}\",
f\"APPLE_IAP_KEY_PATH={val(aa.get('iap_key_path'))}\",
f\"APPLE_IAP_SANDBOX={b(aa.get('iap_sandbox', False))}\",
# Google auth/IAP
f\"GOOGLE_CLIENT_ID={val(ga.get('client_id'))}\",
f\"GOOGLE_ANDROID_CLIENT_ID={val(ga.get('android_client_id'))}\",
f\"GOOGLE_IOS_CLIENT_ID={val(ga.get('ios_client_id'))}\",
f\"GOOGLE_IAP_PACKAGE_NAME={val(ga.get('iap_package_name'))}\",
f\"GOOGLE_IAP_SERVICE_ACCOUNT_PATH={val(ga.get('iap_service_account_path'))}\",
]
print('\n'.join(lines))
"
}
# generate_cluster_config — writes hetzner-k3s YAML to stdout
generate_cluster_config() {
python3 -c "
import yaml
with open('${CONFIG_FILE}') as f:
c = yaml.safe_load(f)
cl = c['cluster']
config = {
'cluster_name': 'honeydue',
'kubeconfig_path': './kubeconfig',
'k3s_version': cl['k3s_version'],
'networking': {
'ssh': {
'port': 22,
'use_agent': False,
'public_key_path': cl['ssh_public_key'],
'private_key_path': cl['ssh_private_key'],
},
'allowed_networks': {
'ssh': ['0.0.0.0/0'],
'api': ['0.0.0.0/0'],
},
},
'api_server_hostname': '',
'schedule_workloads_on_masters': True,
'masters_pool': {
'instance_type': cl['instance_type'],
'instance_count': len(c.get('nodes', [])),
'location': cl['location'],
'image': 'ubuntu-24.04',
},
'additional_packages': ['open-iscsi'],
'post_create_commands': ['sudo systemctl enable --now iscsid'],
'k3s_config_file': 'secrets-encryption: true\n',
}
print(yaml.dump(config, default_flow_style=False, sort_keys=False))
"
}

61
deploy-k3s/scripts/rollback.sh Executable file
View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -euo pipefail
NAMESPACE="honeydue"
log() { printf '[rollback] %s\n' "$*"; }
die() { printf '[rollback][error] %s\n' "$*" >&2; exit 1; }
command -v kubectl >/dev/null 2>&1 || die "Missing: kubectl"
DEPLOYMENTS=("api" "worker" "admin")
# --- Show current state ---
echo "=== Current Rollout History ==="
for deploy in "${DEPLOYMENTS[@]}"; do
echo ""
echo "--- ${deploy} ---"
kubectl rollout history deployment/"${deploy}" -n "${NAMESPACE}" 2>/dev/null || echo " (not found)"
done
echo ""
echo "=== Current Images ==="
for deploy in "${DEPLOYMENTS[@]}"; do
IMAGE="$(kubectl get deployment "${deploy}" -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null || echo "n/a")"
echo " ${deploy}: ${IMAGE}"
done
# --- Confirm ---
echo ""
read -rp "Roll back all deployments to previous revision? [y/N] " confirm
if [[ "${confirm}" != "y" && "${confirm}" != "Y" ]]; then
log "Aborted."
exit 0
fi
# --- Rollback ---
for deploy in "${DEPLOYMENTS[@]}"; do
log "Rolling back ${deploy}..."
kubectl rollout undo deployment/"${deploy}" -n "${NAMESPACE}" 2>/dev/null || log "Skipping ${deploy} (not found or no previous revision)"
done
# --- Wait ---
log "Waiting for rollouts..."
for deploy in "${DEPLOYMENTS[@]}"; do
kubectl rollout status deployment/"${deploy}" -n "${NAMESPACE}" --timeout=300s 2>/dev/null || true
done
# --- Verify ---
echo ""
echo "=== Post-Rollback Images ==="
for deploy in "${DEPLOYMENTS[@]}"; do
IMAGE="$(kubectl get deployment "${deploy}" -n "${NAMESPACE}" -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null || echo "n/a")"
echo " ${deploy}: ${IMAGE}"
done
log "Rollback complete. Run ./scripts/04-verify.sh to check health."