Compare commits
2 Commits
237c6b84ee
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ec4bbbfe8 | ||
|
|
58e6997eee |
@@ -16,7 +16,7 @@ COPY admin/ .
|
||||
RUN npm run build
|
||||
|
||||
# Go build stage
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24-alpine AS builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.25-alpine AS builder
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install build dependencies
|
||||
|
||||
@@ -81,6 +81,13 @@ func main() {
|
||||
cache = nil
|
||||
} else {
|
||||
defer cache.Close()
|
||||
if database.SeedInitialDataApplied {
|
||||
if err := cache.InvalidateSeededData(context.Background()); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to invalidate seeded data cache after initial seed")
|
||||
} else {
|
||||
log.Info().Msg("Invalidated seeded_data cache after initial seed migration")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize monitoring service (if Redis is available)
|
||||
|
||||
129
internal/database/migration_seed_initial_data.go
Normal file
129
internal/database/migration_seed_initial_data.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Seed files run on first boot. Order matters: lookups first, then rows
|
||||
// that depend on them (admin user is independent; task templates reference
|
||||
// lookup categories).
|
||||
var initialSeedFiles = []string{
|
||||
"001_lookups.sql",
|
||||
"003_admin_user.sql",
|
||||
"003_task_templates.sql",
|
||||
}
|
||||
|
||||
// SeedInitialDataApplied is set true during startup if the seed migration
|
||||
// just ran. main.go reads it post-cache-init to invalidate stale Redis
|
||||
// entries for /api/static_data (24h TTL) so clients see the new lookups.
|
||||
var SeedInitialDataApplied bool
|
||||
|
||||
func init() {
|
||||
RegisterDataMigration("20260414_seed_initial_data", seedInitialData)
|
||||
}
|
||||
|
||||
// seedInitialData executes the baseline SQL seed files exactly once. Because
|
||||
// each INSERT uses ON CONFLICT DO UPDATE, rerunning the files is safe if the
|
||||
// tracking row is ever lost.
|
||||
func seedInitialData(tx *gorm.DB) error {
|
||||
sqlDB, err := tx.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get underlying sql.DB: %w", err)
|
||||
}
|
||||
|
||||
for _, filename := range initialSeedFiles {
|
||||
content, err := readSeedFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read seed %s: %w", filename, err)
|
||||
}
|
||||
|
||||
for i, stmt := range splitSQL(content) {
|
||||
if _, err := sqlDB.Exec(stmt); err != nil {
|
||||
preview := stmt
|
||||
if len(preview) > 120 {
|
||||
preview = preview[:120] + "..."
|
||||
}
|
||||
return fmt.Errorf("seed %s statement %d failed: %w\nstatement: %s", filename, i+1, err, preview)
|
||||
}
|
||||
}
|
||||
}
|
||||
SeedInitialDataApplied = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func readSeedFile(filename string) (string, error) {
|
||||
paths := []string{
|
||||
filepath.Join("seeds", filename),
|
||||
filepath.Join("./seeds", filename),
|
||||
filepath.Join("/app/seeds", filename),
|
||||
}
|
||||
var lastErr error
|
||||
for _, p := range paths {
|
||||
content, err := os.ReadFile(p)
|
||||
if err == nil {
|
||||
return string(content), nil
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return "", lastErr
|
||||
}
|
||||
|
||||
// splitSQL splits raw SQL into individual statements, respecting single-quoted
|
||||
// string literals (including '' escapes) and skipping comment-only fragments.
|
||||
func splitSQL(sqlContent string) []string {
|
||||
var out []string
|
||||
var current strings.Builder
|
||||
inString := false
|
||||
stringChar := byte(0)
|
||||
|
||||
for i := 0; i < len(sqlContent); i++ {
|
||||
c := sqlContent[i]
|
||||
|
||||
if (c == '\'' || c == '"') && (i == 0 || sqlContent[i-1] != '\\') {
|
||||
if !inString {
|
||||
inString = true
|
||||
stringChar = c
|
||||
} else if c == stringChar {
|
||||
if c == '\'' && i+1 < len(sqlContent) && sqlContent[i+1] == '\'' {
|
||||
current.WriteByte(c)
|
||||
i++
|
||||
current.WriteByte(sqlContent[i])
|
||||
continue
|
||||
}
|
||||
inString = false
|
||||
}
|
||||
}
|
||||
|
||||
if c == ';' && !inString {
|
||||
current.WriteByte(c)
|
||||
stmt := strings.TrimSpace(current.String())
|
||||
if stmt != "" && !isSQLCommentOnly(stmt) {
|
||||
out = append(out, stmt)
|
||||
}
|
||||
current.Reset()
|
||||
continue
|
||||
}
|
||||
|
||||
current.WriteByte(c)
|
||||
}
|
||||
|
||||
if stmt := strings.TrimSpace(current.String()); stmt != "" && !isSQLCommentOnly(stmt) {
|
||||
out = append(out, stmt)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isSQLCommentOnly(stmt string) bool {
|
||||
for _, line := range strings.Split(stmt, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" && !strings.HasPrefix(line, "--") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
DROP TABLE IF EXISTS webhook_event_log;
|
||||
@@ -1,9 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS webhook_event_log (
|
||||
id SERIAL PRIMARY KEY,
|
||||
event_id VARCHAR(255) NOT NULL,
|
||||
provider VARCHAR(20) NOT NULL,
|
||||
event_type VARCHAR(100) NOT NULL,
|
||||
processed_at TIMESTAMPTZ DEFAULT NOW(),
|
||||
payload_hash VARCHAR(64),
|
||||
UNIQUE(provider, event_id)
|
||||
);
|
||||
@@ -1,5 +0,0 @@
|
||||
ALTER TABLE notifications_notificationpreference DROP CONSTRAINT IF EXISTS uq_notif_pref_user;
|
||||
ALTER TABLE subscriptions_usersubscription DROP CONSTRAINT IF EXISTS uq_subscription_user;
|
||||
ALTER TABLE notifications_notification DROP CONSTRAINT IF EXISTS chk_notification_sent_consistency;
|
||||
ALTER TABLE subscriptions_usersubscription DROP CONSTRAINT IF EXISTS chk_subscription_tier;
|
||||
ALTER TABLE task_task DROP CONSTRAINT IF EXISTS chk_task_not_cancelled_and_archived;
|
||||
@@ -1,19 +0,0 @@
|
||||
-- Prevent task from being both cancelled and archived simultaneously
|
||||
ALTER TABLE task_task ADD CONSTRAINT chk_task_not_cancelled_and_archived
|
||||
CHECK (NOT (is_cancelled = true AND is_archived = true));
|
||||
|
||||
-- Subscription tier must be a valid value
|
||||
ALTER TABLE subscriptions_usersubscription ADD CONSTRAINT chk_subscription_tier
|
||||
CHECK (tier IN ('free', 'pro'));
|
||||
|
||||
-- Notification: sent_at must be set when sent is true
|
||||
ALTER TABLE notifications_notification ADD CONSTRAINT chk_notification_sent_consistency
|
||||
CHECK ((sent = false) OR (sent = true AND sent_at IS NOT NULL));
|
||||
|
||||
-- One subscription per user
|
||||
ALTER TABLE subscriptions_usersubscription ADD CONSTRAINT uq_subscription_user
|
||||
UNIQUE (user_id);
|
||||
|
||||
-- One notification preference per user
|
||||
ALTER TABLE notifications_notificationpreference ADD CONSTRAINT uq_notif_pref_user
|
||||
UNIQUE (user_id);
|
||||
@@ -1 +0,0 @@
|
||||
ALTER TABLE task_task DROP COLUMN IF EXISTS version;
|
||||
@@ -1 +0,0 @@
|
||||
ALTER TABLE task_task ADD COLUMN IF NOT EXISTS version INTEGER NOT NULL DEFAULT 1;
|
||||
@@ -1,3 +0,0 @@
|
||||
DROP INDEX IF EXISTS idx_document_residence_active;
|
||||
DROP INDEX IF EXISTS idx_notification_user_unread;
|
||||
DROP INDEX IF EXISTS idx_task_kanban_query;
|
||||
@@ -1,14 +0,0 @@
|
||||
-- Kanban: composite partial index for active task queries by residence with due date ordering
|
||||
CREATE INDEX IF NOT EXISTS idx_task_kanban_query
|
||||
ON task_task (residence_id, next_due_date, due_date)
|
||||
WHERE is_cancelled = false AND is_archived = false;
|
||||
|
||||
-- Notifications: partial index for unread count (hot query)
|
||||
CREATE INDEX IF NOT EXISTS idx_notification_user_unread
|
||||
ON notifications_notification (user_id, read)
|
||||
WHERE read = false;
|
||||
|
||||
-- Documents: partial index for active documents by residence
|
||||
CREATE INDEX IF NOT EXISTS idx_document_residence_active
|
||||
ON documents_document (residence_id, is_active)
|
||||
WHERE is_active = true;
|
||||
Reference in New Issue
Block a user