Add K3s dev deployment setup for single-node VPS
Mirrors the prod deploy-k3s/ setup but runs all services in-cluster on a single node: PostgreSQL (replaces Neon), MinIO S3-compatible storage (replaces B2), Redis, API, worker, and admin. Includes fully automated setup scripts (00-init through 04-verify), server hardening (SSH, fail2ban, ufw), Let's Encrypt TLS via Traefik, network policies, RBAC, and security contexts matching prod. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
180
deploy-k3s/scripts/04-verify.sh
Executable file
180
deploy-k3s/scripts/04-verify.sh
Executable file
@@ -0,0 +1,180 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
NAMESPACE="honeydue"
|
||||
|
||||
log() { printf '[verify] %s\n' "$*"; }
|
||||
sep() { printf '\n%s\n' "--- $1 ---"; }
|
||||
ok() { printf '[verify] ✓ %s\n' "$*"; }
|
||||
fail() { printf '[verify] ✗ %s\n' "$*"; }
|
||||
|
||||
command -v kubectl >/dev/null 2>&1 || { echo "Missing: kubectl" >&2; exit 1; }
|
||||
|
||||
sep "Nodes"
|
||||
kubectl get nodes -o wide
|
||||
|
||||
sep "Pods"
|
||||
kubectl get pods -n "${NAMESPACE}" -o wide
|
||||
|
||||
sep "Services"
|
||||
kubectl get svc -n "${NAMESPACE}"
|
||||
|
||||
sep "Ingress"
|
||||
kubectl get ingress -n "${NAMESPACE}"
|
||||
|
||||
sep "HPA"
|
||||
kubectl get hpa -n "${NAMESPACE}"
|
||||
|
||||
sep "PVCs"
|
||||
kubectl get pvc -n "${NAMESPACE}"
|
||||
|
||||
sep "Secrets (names only)"
|
||||
kubectl get secrets -n "${NAMESPACE}"
|
||||
|
||||
sep "ConfigMap keys"
|
||||
kubectl get configmap honeydue-config -n "${NAMESPACE}" -o jsonpath='{.data}' 2>/dev/null | python3 -c "
|
||||
import json, sys
|
||||
try:
|
||||
d = json.load(sys.stdin)
|
||||
for k in sorted(d.keys()):
|
||||
v = d[k]
|
||||
if any(s in k.upper() for s in ['PASSWORD', 'SECRET', 'TOKEN', 'KEY']):
|
||||
v = '***REDACTED***'
|
||||
print(f' {k}={v}')
|
||||
except:
|
||||
print(' (could not parse)')
|
||||
" 2>/dev/null || log "ConfigMap not found or not parseable"
|
||||
|
||||
sep "Warning Events (last 15 min)"
|
||||
kubectl get events -n "${NAMESPACE}" --field-selector type=Warning --sort-by='.lastTimestamp' 2>/dev/null | tail -20 || log "No warning events"
|
||||
|
||||
sep "Pod Restart Counts"
|
||||
kubectl get pods -n "${NAMESPACE}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{range .status.containerStatuses[*]}{.restartCount}{end}{"\n"}{end}' 2>/dev/null || true
|
||||
|
||||
sep "In-Cluster Health Check"
|
||||
API_POD="$(kubectl get pods -n "${NAMESPACE}" -l app.kubernetes.io/name=api -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || true)"
|
||||
if [[ -n "${API_POD}" ]]; then
|
||||
log "Running health check from pod ${API_POD}..."
|
||||
kubectl exec -n "${NAMESPACE}" "${API_POD}" -- curl -sf http://localhost:8000/api/health/ 2>/dev/null && log "Health check: OK" || log "Health check: FAILED"
|
||||
else
|
||||
log "No API pod found — skipping in-cluster health check"
|
||||
fi
|
||||
|
||||
sep "Resource Usage"
|
||||
kubectl top pods -n "${NAMESPACE}" 2>/dev/null || log "Metrics server not available (install with: kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml)"
|
||||
|
||||
# =============================================================================
|
||||
# Security Verification
|
||||
# =============================================================================
|
||||
|
||||
sep "Security: Secret Encryption"
|
||||
# Check that secrets-encryption is configured on the K3s server
|
||||
if kubectl get nodes -o jsonpath='{.items[0].metadata.name}' >/dev/null 2>&1; then
|
||||
# Verify secrets are stored encrypted by checking the encryption config exists
|
||||
if kubectl -n kube-system get cm k3s-config -o yaml 2>/dev/null | grep -q "secrets-encryption"; then
|
||||
ok "secrets-encryption found in K3s config"
|
||||
else
|
||||
# Alternative: check if etcd stores encrypted data
|
||||
ENCRYPTED_CHECK="$(kubectl get secret honeydue-secrets -n "${NAMESPACE}" -o jsonpath='{.metadata.name}' 2>/dev/null || true)"
|
||||
if [[ -n "${ENCRYPTED_CHECK}" ]]; then
|
||||
ok "honeydue-secrets exists (verify encryption with: k3s secrets-encrypt status)"
|
||||
else
|
||||
fail "Cannot verify secret encryption — run 'k3s secrets-encrypt status' on the server"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
fail "Cannot reach cluster to verify secret encryption"
|
||||
fi
|
||||
|
||||
sep "Security: Network Policies"
|
||||
NP_COUNT="$(kubectl get networkpolicy -n "${NAMESPACE}" --no-headers 2>/dev/null | wc -l | tr -d ' ')"
|
||||
if (( NP_COUNT >= 5 )); then
|
||||
ok "Found ${NP_COUNT} network policies"
|
||||
kubectl get networkpolicy -n "${NAMESPACE}" --no-headers 2>/dev/null | while read -r line; do
|
||||
echo " ${line}"
|
||||
done
|
||||
else
|
||||
fail "Expected 5+ network policies, found ${NP_COUNT}"
|
||||
fi
|
||||
|
||||
sep "Security: Service Accounts"
|
||||
SA_COUNT="$(kubectl get sa -n "${NAMESPACE}" --no-headers 2>/dev/null | grep -cv default | tr -d ' ')"
|
||||
if (( SA_COUNT >= 4 )); then
|
||||
ok "Found ${SA_COUNT} custom service accounts (api, worker, admin, redis)"
|
||||
else
|
||||
fail "Expected 4 custom service accounts, found ${SA_COUNT}"
|
||||
fi
|
||||
kubectl get sa -n "${NAMESPACE}" --no-headers 2>/dev/null | while read -r line; do
|
||||
echo " ${line}"
|
||||
done
|
||||
|
||||
sep "Security: Pod Security Contexts"
|
||||
PODS_WITHOUT_SECURITY="$(kubectl get pods -n "${NAMESPACE}" -o json 2>/dev/null | python3 -c "
|
||||
import json, sys
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
issues = []
|
||||
for pod in data.get('items', []):
|
||||
name = pod['metadata']['name']
|
||||
spec = pod['spec']
|
||||
sc = spec.get('securityContext', {})
|
||||
if not sc.get('runAsNonRoot'):
|
||||
issues.append(f'{name}: missing runAsNonRoot')
|
||||
for c in spec.get('containers', []):
|
||||
csc = c.get('securityContext', {})
|
||||
if csc.get('allowPrivilegeEscalation', True):
|
||||
issues.append(f'{name}/{c[\"name\"]}: allowPrivilegeEscalation not false')
|
||||
if not csc.get('readOnlyRootFilesystem'):
|
||||
issues.append(f'{name}/{c[\"name\"]}: readOnlyRootFilesystem not true')
|
||||
if issues:
|
||||
for i in issues:
|
||||
print(i)
|
||||
else:
|
||||
print('OK')
|
||||
except Exception as e:
|
||||
print(f'Error: {e}')
|
||||
" 2>/dev/null || echo "Error parsing pod specs")"
|
||||
|
||||
if [[ "${PODS_WITHOUT_SECURITY}" == "OK" ]]; then
|
||||
ok "All pods have proper security contexts"
|
||||
else
|
||||
fail "Pod security context issues:"
|
||||
echo "${PODS_WITHOUT_SECURITY}" | while read -r line; do
|
||||
echo " ${line}"
|
||||
done
|
||||
fi
|
||||
|
||||
sep "Security: Pod Disruption Budgets"
|
||||
PDB_COUNT="$(kubectl get pdb -n "${NAMESPACE}" --no-headers 2>/dev/null | wc -l | tr -d ' ')"
|
||||
if (( PDB_COUNT >= 2 )); then
|
||||
ok "Found ${PDB_COUNT} pod disruption budgets"
|
||||
else
|
||||
fail "Expected 2+ PDBs, found ${PDB_COUNT}"
|
||||
fi
|
||||
kubectl get pdb -n "${NAMESPACE}" 2>/dev/null || true
|
||||
|
||||
sep "Security: Cloudflare-Only Middleware"
|
||||
CF_MIDDLEWARE="$(kubectl get middleware cloudflare-only -n "${NAMESPACE}" -o name 2>/dev/null || true)"
|
||||
if [[ -n "${CF_MIDDLEWARE}" ]]; then
|
||||
ok "cloudflare-only middleware exists"
|
||||
# Check ingress annotations reference it
|
||||
INGRESS_ANNOTATIONS="$(kubectl get ingress -n "${NAMESPACE}" -o jsonpath='{.items[*].metadata.annotations.traefik\.ingress\.kubernetes\.io/router\.middlewares}' 2>/dev/null || true)"
|
||||
if echo "${INGRESS_ANNOTATIONS}" | grep -q "cloudflare-only"; then
|
||||
ok "Ingress references cloudflare-only middleware"
|
||||
else
|
||||
fail "Ingress does NOT reference cloudflare-only middleware"
|
||||
fi
|
||||
else
|
||||
fail "cloudflare-only middleware not found"
|
||||
fi
|
||||
|
||||
sep "Security: Admin Basic Auth"
|
||||
ADMIN_AUTH="$(kubectl get secret admin-basic-auth -n "${NAMESPACE}" -o name 2>/dev/null || true)"
|
||||
if [[ -n "${ADMIN_AUTH}" ]]; then
|
||||
ok "admin-basic-auth secret exists"
|
||||
else
|
||||
fail "admin-basic-auth secret not found — admin panel has no additional auth layer"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
log "Verification complete."
|
||||
Reference in New Issue
Block a user