Compare commits

...

2 Commits

Author SHA1 Message Date
Trey T
4ec4bbbfe8 Auto-seed lookups + admin + templates on first API boot
Some checks failed
Backend CI / Test (push) Has been cancelled
Backend CI / Contract Tests (push) Has been cancelled
Backend CI / Lint (push) Has been cancelled
Backend CI / Secret Scanning (push) Has been cancelled
Backend CI / Build (push) Has been cancelled
Add a data_migration that runs seeds/001_lookups.sql,
seeds/003_admin_user.sql, and seeds/003_task_templates.sql exactly
once on startup and invalidates the Redis seeded_data cache afterwards
so /api/static_data/ returns fresh results. Removes the need to
remember `./dev.sh seed-all`; the data_migrations tracking row prevents
re-runs, and each INSERT uses ON CONFLICT DO UPDATE so re-execution is
safe.
2026-04-15 08:37:55 -05:00
Trey T
58e6997eee Fix migration numbering collision and bump Dockerfile to Go 1.25
Some checks failed
Backend CI / Test (push) Has been cancelled
Backend CI / Contract Tests (push) Has been cancelled
Backend CI / Build (push) Has been cancelled
Backend CI / Lint (push) Has been cancelled
Backend CI / Secret Scanning (push) Has been cancelled
The `000016_task_template_id` and `000017_drop_task_template_regions_join`
migrations introduced on gitea collided with the existing unpadded 016/017
migrations (authtoken_created_at, fk_indexes). Renamed them to 021/022 so
they extend the shipped sequence instead of replacing real migrations.
Also removed the padded 000012-000015 files which were duplicate content
of the shipped 012-015 unpadded migrations.

Dockerfile builder image bumped from golang:1.24-alpine to 1.25-alpine to
match go.mod's `go 1.25` directive.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-14 16:17:23 -05:00
15 changed files with 137 additions and 54 deletions

View File

@@ -16,7 +16,7 @@ COPY admin/ .
RUN npm run build
# Go build stage
FROM --platform=$BUILDPLATFORM golang:1.24-alpine AS builder
FROM --platform=$BUILDPLATFORM golang:1.25-alpine AS builder
ARG TARGETARCH
# Install build dependencies

View File

@@ -81,6 +81,13 @@ func main() {
cache = nil
} else {
defer cache.Close()
if database.SeedInitialDataApplied {
if err := cache.InvalidateSeededData(context.Background()); err != nil {
log.Warn().Err(err).Msg("Failed to invalidate seeded data cache after initial seed")
} else {
log.Info().Msg("Invalidated seeded_data cache after initial seed migration")
}
}
}
// Initialize monitoring service (if Redis is available)

View File

@@ -0,0 +1,129 @@
package database
import (
"fmt"
"os"
"path/filepath"
"strings"
"gorm.io/gorm"
)
// Seed files run on first boot. Order matters: lookups first, then rows
// that depend on them (admin user is independent; task templates reference
// lookup categories).
var initialSeedFiles = []string{
"001_lookups.sql",
"003_admin_user.sql",
"003_task_templates.sql",
}
// SeedInitialDataApplied is set true during startup if the seed migration
// just ran. main.go reads it post-cache-init to invalidate stale Redis
// entries for /api/static_data (24h TTL) so clients see the new lookups.
var SeedInitialDataApplied bool
func init() {
RegisterDataMigration("20260414_seed_initial_data", seedInitialData)
}
// seedInitialData executes the baseline SQL seed files exactly once. Because
// each INSERT uses ON CONFLICT DO UPDATE, rerunning the files is safe if the
// tracking row is ever lost.
func seedInitialData(tx *gorm.DB) error {
sqlDB, err := tx.DB()
if err != nil {
return fmt.Errorf("get underlying sql.DB: %w", err)
}
for _, filename := range initialSeedFiles {
content, err := readSeedFile(filename)
if err != nil {
return fmt.Errorf("read seed %s: %w", filename, err)
}
for i, stmt := range splitSQL(content) {
if _, err := sqlDB.Exec(stmt); err != nil {
preview := stmt
if len(preview) > 120 {
preview = preview[:120] + "..."
}
return fmt.Errorf("seed %s statement %d failed: %w\nstatement: %s", filename, i+1, err, preview)
}
}
}
SeedInitialDataApplied = true
return nil
}
func readSeedFile(filename string) (string, error) {
paths := []string{
filepath.Join("seeds", filename),
filepath.Join("./seeds", filename),
filepath.Join("/app/seeds", filename),
}
var lastErr error
for _, p := range paths {
content, err := os.ReadFile(p)
if err == nil {
return string(content), nil
}
lastErr = err
}
return "", lastErr
}
// splitSQL splits raw SQL into individual statements, respecting single-quoted
// string literals (including '' escapes) and skipping comment-only fragments.
func splitSQL(sqlContent string) []string {
var out []string
var current strings.Builder
inString := false
stringChar := byte(0)
for i := 0; i < len(sqlContent); i++ {
c := sqlContent[i]
if (c == '\'' || c == '"') && (i == 0 || sqlContent[i-1] != '\\') {
if !inString {
inString = true
stringChar = c
} else if c == stringChar {
if c == '\'' && i+1 < len(sqlContent) && sqlContent[i+1] == '\'' {
current.WriteByte(c)
i++
current.WriteByte(sqlContent[i])
continue
}
inString = false
}
}
if c == ';' && !inString {
current.WriteByte(c)
stmt := strings.TrimSpace(current.String())
if stmt != "" && !isSQLCommentOnly(stmt) {
out = append(out, stmt)
}
current.Reset()
continue
}
current.WriteByte(c)
}
if stmt := strings.TrimSpace(current.String()); stmt != "" && !isSQLCommentOnly(stmt) {
out = append(out, stmt)
}
return out
}
func isSQLCommentOnly(stmt string) bool {
for _, line := range strings.Split(stmt, "\n") {
line = strings.TrimSpace(line)
if line != "" && !strings.HasPrefix(line, "--") {
return false
}
}
return true
}

View File

@@ -1 +0,0 @@
DROP TABLE IF EXISTS webhook_event_log;

View File

@@ -1,9 +0,0 @@
CREATE TABLE IF NOT EXISTS webhook_event_log (
id SERIAL PRIMARY KEY,
event_id VARCHAR(255) NOT NULL,
provider VARCHAR(20) NOT NULL,
event_type VARCHAR(100) NOT NULL,
processed_at TIMESTAMPTZ DEFAULT NOW(),
payload_hash VARCHAR(64),
UNIQUE(provider, event_id)
);

View File

@@ -1,5 +0,0 @@
ALTER TABLE notifications_notificationpreference DROP CONSTRAINT IF EXISTS uq_notif_pref_user;
ALTER TABLE subscriptions_usersubscription DROP CONSTRAINT IF EXISTS uq_subscription_user;
ALTER TABLE notifications_notification DROP CONSTRAINT IF EXISTS chk_notification_sent_consistency;
ALTER TABLE subscriptions_usersubscription DROP CONSTRAINT IF EXISTS chk_subscription_tier;
ALTER TABLE task_task DROP CONSTRAINT IF EXISTS chk_task_not_cancelled_and_archived;

View File

@@ -1,19 +0,0 @@
-- Prevent task from being both cancelled and archived simultaneously
ALTER TABLE task_task ADD CONSTRAINT chk_task_not_cancelled_and_archived
CHECK (NOT (is_cancelled = true AND is_archived = true));
-- Subscription tier must be a valid value
ALTER TABLE subscriptions_usersubscription ADD CONSTRAINT chk_subscription_tier
CHECK (tier IN ('free', 'pro'));
-- Notification: sent_at must be set when sent is true
ALTER TABLE notifications_notification ADD CONSTRAINT chk_notification_sent_consistency
CHECK ((sent = false) OR (sent = true AND sent_at IS NOT NULL));
-- One subscription per user
ALTER TABLE subscriptions_usersubscription ADD CONSTRAINT uq_subscription_user
UNIQUE (user_id);
-- One notification preference per user
ALTER TABLE notifications_notificationpreference ADD CONSTRAINT uq_notif_pref_user
UNIQUE (user_id);

View File

@@ -1 +0,0 @@
ALTER TABLE task_task DROP COLUMN IF EXISTS version;

View File

@@ -1 +0,0 @@
ALTER TABLE task_task ADD COLUMN IF NOT EXISTS version INTEGER NOT NULL DEFAULT 1;

View File

@@ -1,3 +0,0 @@
DROP INDEX IF EXISTS idx_document_residence_active;
DROP INDEX IF EXISTS idx_notification_user_unread;
DROP INDEX IF EXISTS idx_task_kanban_query;

View File

@@ -1,14 +0,0 @@
-- Kanban: composite partial index for active task queries by residence with due date ordering
CREATE INDEX IF NOT EXISTS idx_task_kanban_query
ON task_task (residence_id, next_due_date, due_date)
WHERE is_cancelled = false AND is_archived = false;
-- Notifications: partial index for unread count (hot query)
CREATE INDEX IF NOT EXISTS idx_notification_user_unread
ON notifications_notification (user_id, read)
WHERE read = false;
-- Documents: partial index for active documents by residence
CREATE INDEX IF NOT EXISTS idx_document_residence_active
ON documents_document (residence_id, is_active)
WHERE is_active = true;