Fix 113 hardening issues across entire Go backend

Security:
- Replace all binding: tags with validate: + c.Validate() in admin handlers
- Add rate limiting to auth endpoints (login, register, password reset)
- Add security headers (HSTS, XSS protection, nosniff, frame options)
- Wire Google Pub/Sub token verification into webhook handler
- Replace ParseUnverified with proper OIDC/JWKS key verification
- Verify inner Apple JWS signatures in webhook handler
- Add io.LimitReader (1MB) to all webhook body reads
- Add ownership verification to file deletion
- Move hardcoded admin credentials to env vars
- Add uniqueIndex to User.Email
- Hide ConfirmationCode from JSON serialization
- Mask confirmation codes in admin responses
- Use http.DetectContentType for upload validation
- Fix path traversal in storage service
- Replace os.Getenv with Viper in stripe service
- Sanitize Redis URLs before logging
- Separate DEBUG_FIXED_CODES from DEBUG flag
- Reject weak SECRET_KEY in production
- Add host check on /_next/* proxy routes
- Use explicit localhost CORS origins in debug mode
- Replace err.Error() with generic messages in all admin error responses

Critical fixes:
- Rewrite FCM to HTTP v1 API with OAuth 2.0 service account auth
- Fix user_customuser -> auth_user table names in raw SQL
- Fix dashboard verified query to use UserProfile model
- Add escapeLikeWildcards() to prevent SQL wildcard injection

Bug fixes:
- Add bounds checks for days/expiring_soon query params (1-3650)
- Add receipt_data/transaction_id empty-check to RestoreSubscription
- Change Active bool -> *bool in device handler
- Check all unchecked GORM/FindByIDWithProfile errors
- Add validation for notification hour fields (0-23)
- Add max=10000 validation on task description updates

Transactions & data integrity:
- Wrap registration flow in transaction
- Wrap QuickComplete in transaction
- Move image creation inside completion transaction
- Wrap SetSpecialties in transaction
- Wrap GetOrCreateToken in transaction
- Wrap completion+image deletion in transaction

Performance:
- Batch completion summaries (2 queries vs 2N)
- Reuse single http.Client in IAP validation
- Cache dashboard counts (30s TTL)
- Batch COUNT queries in admin user list
- Add Limit(500) to document queries
- Add reminder_stage+due_date filters to reminder queries
- Parse AllowedTypes once at init
- In-memory user cache in auth middleware (30s TTL)
- Timezone change detection cache
- Optimize P95 with per-endpoint sorted buffers
- Replace crypto/md5 with hash/fnv for ETags

Code quality:
- Add sync.Once to all monitoring Stop()/Close() methods
- Replace 8 fmt.Printf with zerolog in auth service
- Log previously discarded errors
- Standardize delete response shapes
- Route hardcoded English through i18n
- Remove FileURL from DocumentResponse (keep MediaURL only)
- Thread user timezone through kanban board responses
- Initialize empty slices to prevent null JSON
- Extract shared field map for task Update/UpdateTx
- Delete unused SoftDeleteModel, min(), formatCron, legacy handlers

Worker & jobs:
- Wire Asynq email infrastructure into worker
- Register HandleReminderLogCleanup with daily 3AM cron
- Use per-user timezone in HandleSmartReminder
- Replace direct DB queries with repository calls
- Delete legacy reminder handlers (~200 lines)
- Delete unused task type constants

Dependencies:
- Replace archived jung-kurt/gofpdf with go-pdf/fpdf
- Replace unmaintained gomail.v2 with wneessen/go-mail
- Add TODO for Echo jwt v3 transitive dep removal

Test infrastructure:
- Fix MakeRequest/SeedLookupData error handling
- Replace os.Exit(0) with t.Skip() in scope/consistency tests
- Add 11 new FCM v1 tests

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Trey t
2026-03-18 23:14:13 -05:00
parent 3b86d0aae1
commit 42a5533a56
95 changed files with 2892 additions and 1783 deletions

View File

@@ -124,29 +124,33 @@ func (r *ContractorRepository) GetTasksForContractor(contractorID uint) ([]model
return tasks, err
}
// SetSpecialties sets the specialties for a contractor
// SetSpecialties sets the specialties for a contractor.
// Wrapped in a transaction so that clearing existing specialties and
// appending new ones are atomic -- a failure in either step rolls back both.
func (r *ContractorRepository) SetSpecialties(contractorID uint, specialtyIDs []uint) error {
var contractor models.Contractor
if err := r.db.First(&contractor, contractorID).Error; err != nil {
return err
}
return r.db.Transaction(func(tx *gorm.DB) error {
var contractor models.Contractor
if err := tx.First(&contractor, contractorID).Error; err != nil {
return err
}
// Clear existing specialties
if err := r.db.Model(&contractor).Association("Specialties").Clear(); err != nil {
return err
}
// Clear existing specialties
if err := tx.Model(&contractor).Association("Specialties").Clear(); err != nil {
return err
}
if len(specialtyIDs) == 0 {
return nil
}
if len(specialtyIDs) == 0 {
return nil
}
// Add new specialties
var specialties []models.ContractorSpecialty
if err := r.db.Where("id IN ?", specialtyIDs).Find(&specialties).Error; err != nil {
return err
}
// Add new specialties
var specialties []models.ContractorSpecialty
if err := tx.Where("id IN ?", specialtyIDs).Find(&specialties).Error; err != nil {
return err
}
return r.db.Model(&contractor).Association("Specialties").Append(specialties)
return tx.Model(&contractor).Association("Specialties").Append(specialties)
})
}
// CountByResidence counts contractors in a residence

View File

@@ -98,7 +98,7 @@ func (r *DocumentRepository) FindByUserFiltered(residenceIDs []uint, filter *Doc
}
var documents []models.Document
err := query.Order("created_at DESC").Find(&documents).Error
err := query.Order("created_at DESC").Limit(500).Find(&documents).Error
return documents, err
}

View File

@@ -88,10 +88,33 @@ func (r *ReminderRepository) HasSentReminderBatch(keys []ReminderKey) (map[int]b
userIDs = append(userIDs, id)
}
// Query all matching reminder logs in one query
// Collect unique stages and due dates for tighter SQL filtering
stageSet := make(map[models.ReminderStage]bool)
dueDateSet := make(map[string]bool)
var minDueDate, maxDueDate time.Time
for _, k := range keys {
stageSet[k.Stage] = true
dueDateOnly := time.Date(k.DueDate.Year(), k.DueDate.Month(), k.DueDate.Day(), 0, 0, 0, 0, time.UTC)
dueDateSet[dueDateOnly.Format("2006-01-02")] = true
if minDueDate.IsZero() || dueDateOnly.Before(minDueDate) {
minDueDate = dueDateOnly
}
if maxDueDate.IsZero() || dueDateOnly.After(maxDueDate) {
maxDueDate = dueDateOnly
}
}
stages := make([]models.ReminderStage, 0, len(stageSet))
for s := range stageSet {
stages = append(stages, s)
}
// Query matching reminder logs with tighter filters to reduce result set.
// Filter on reminder_stage and due_date range in addition to task_id/user_id.
var logs []models.TaskReminderLog
err := r.db.Where("task_id IN ? AND user_id IN ?", taskIDs, userIDs).
Find(&logs).Error
err := r.db.Where(
"task_id IN ? AND user_id IN ? AND reminder_stage IN ? AND due_date >= ? AND due_date <= ?",
taskIDs, userIDs, stages, minDueDate, maxDueDate,
).Find(&logs).Error
if err != nil {
return nil, err
}

View File

@@ -196,6 +196,20 @@ func (r *ResidenceRepository) CountByOwner(userID uint) (int64, error) {
return count, err
}
// FindResidenceIDsByOwner returns just the IDs of residences a user owns.
// This is a lightweight alternative to FindOwnedByUser() when only IDs are needed
// for batch queries against related tables (tasks, contractors, documents).
func (r *ResidenceRepository) FindResidenceIDsByOwner(userID uint) ([]uint, error) {
var ids []uint
err := r.db.Model(&models.Residence{}).
Where("owner_id = ? AND is_active = ?", userID, true).
Pluck("id", &ids).Error
if err != nil {
return nil, err
}
return ids, nil
}
// === Share Code Operations ===
// CreateShareCode creates a new share code for a residence

View File

@@ -129,12 +129,21 @@ func (r *SubscriptionRepository) UpdatePurchaseToken(userID uint, token string)
Update("google_purchase_token", token).Error
}
// FindByAppleReceiptContains finds a subscription by Apple transaction ID
// Used by webhooks to find the user associated with a transaction
// FindByAppleReceiptContains finds a subscription by Apple transaction ID.
// Used by webhooks to find the user associated with a transaction.
//
// PERFORMANCE NOTE: This uses a LIKE '%...%' scan on apple_receipt_data which
// cannot use a B-tree index and results in a full table scan. For better
// performance at scale, add a dedicated indexed column:
//
// AppleTransactionID *string `gorm:"column:apple_transaction_id;size:255;index"`
//
// Then look up by exact match: WHERE apple_transaction_id = ?
func (r *SubscriptionRepository) FindByAppleReceiptContains(transactionID string) (*models.UserSubscription, error) {
var sub models.UserSubscription
// Search for transaction ID in the stored receipt data
err := r.db.Where("apple_receipt_data LIKE ?", "%"+transactionID+"%").First(&sub).Error
// Escape LIKE wildcards in the transaction ID to prevent wildcard injection
escaped := escapeLikeWildcards(transactionID)
err := r.db.Where("apple_receipt_data LIKE ?", "%"+escaped+"%").First(&sub).Error
if err != nil {
return nil, err
}

View File

@@ -38,29 +38,40 @@ func (r *TaskRepository) CreateCompletionTx(tx *gorm.DB, completion *models.Task
return tx.Create(completion).Error
}
// taskUpdateFields returns the canonical field map used by both Update and UpdateTx.
// Centralised here so the two methods never drift out of sync.
func taskUpdateFields(t *models.Task) map[string]interface{} {
return map[string]interface{}{
"title": t.Title,
"description": t.Description,
"category_id": t.CategoryID,
"priority_id": t.PriorityID,
"frequency_id": t.FrequencyID,
"custom_interval_days": t.CustomIntervalDays,
"in_progress": t.InProgress,
"assigned_to_id": t.AssignedToID,
"due_date": t.DueDate,
"next_due_date": t.NextDueDate,
"estimated_cost": t.EstimatedCost,
"actual_cost": t.ActualCost,
"contractor_id": t.ContractorID,
"is_cancelled": t.IsCancelled,
"is_archived": t.IsArchived,
"version": gorm.Expr("version + 1"),
}
}
// taskUpdateOmitAssociations lists the association fields to omit during task updates.
var taskUpdateOmitAssociations = []string{
"Residence", "CreatedBy", "AssignedTo", "Category", "Priority", "Frequency", "ParentTask", "Completions",
}
// UpdateTx updates a task with optimistic locking within an existing transaction.
func (r *TaskRepository) UpdateTx(tx *gorm.DB, task *models.Task) error {
result := tx.Model(task).
Where("id = ? AND version = ?", task.ID, task.Version).
Omit("Residence", "CreatedBy", "AssignedTo", "Category", "Priority", "Frequency", "ParentTask", "Completions").
Updates(map[string]interface{}{
"title": task.Title,
"description": task.Description,
"category_id": task.CategoryID,
"priority_id": task.PriorityID,
"frequency_id": task.FrequencyID,
"custom_interval_days": task.CustomIntervalDays,
"in_progress": task.InProgress,
"assigned_to_id": task.AssignedToID,
"due_date": task.DueDate,
"next_due_date": task.NextDueDate,
"estimated_cost": task.EstimatedCost,
"actual_cost": task.ActualCost,
"contractor_id": task.ContractorID,
"is_cancelled": task.IsCancelled,
"is_archived": task.IsArchived,
"version": gorm.Expr("version + 1"),
})
Omit(taskUpdateOmitAssociations...).
Updates(taskUpdateFields(task))
if result.Error != nil {
return result.Error
}
@@ -350,25 +361,8 @@ func (r *TaskRepository) Create(task *models.Task) error {
func (r *TaskRepository) Update(task *models.Task) error {
result := r.db.Model(task).
Where("id = ? AND version = ?", task.ID, task.Version).
Omit("Residence", "CreatedBy", "AssignedTo", "Category", "Priority", "Frequency", "ParentTask", "Completions").
Updates(map[string]interface{}{
"title": task.Title,
"description": task.Description,
"category_id": task.CategoryID,
"priority_id": task.PriorityID,
"frequency_id": task.FrequencyID,
"custom_interval_days": task.CustomIntervalDays,
"in_progress": task.InProgress,
"assigned_to_id": task.AssignedToID,
"due_date": task.DueDate,
"next_due_date": task.NextDueDate,
"estimated_cost": task.EstimatedCost,
"actual_cost": task.ActualCost,
"contractor_id": task.ContractorID,
"is_cancelled": task.IsCancelled,
"is_archived": task.IsArchived,
"version": gorm.Expr("version + 1"),
})
Omit(taskUpdateOmitAssociations...).
Updates(taskUpdateFields(task))
if result.Error != nil {
return result.Error
}
@@ -728,13 +722,18 @@ func (r *TaskRepository) UpdateCompletion(completion *models.TaskCompletion) err
return r.db.Omit("Task", "CompletedBy", "Images").Save(completion).Error
}
// DeleteCompletion deletes a task completion
// DeleteCompletion deletes a task completion and its associated images atomically.
// Wrapped in a transaction so that if the completion delete fails, image
// deletions are rolled back as well.
func (r *TaskRepository) DeleteCompletion(id uint) error {
// Delete images first
if err := r.db.Where("completion_id = ?", id).Delete(&models.TaskCompletionImage{}).Error; err != nil {
log.Error().Err(err).Uint("completion_id", id).Msg("Failed to delete completion images")
}
return r.db.Delete(&models.TaskCompletion{}, id).Error
return r.db.Transaction(func(tx *gorm.DB) error {
// Delete images first
if err := tx.Where("completion_id = ?", id).Delete(&models.TaskCompletionImage{}).Error; err != nil {
log.Error().Err(err).Uint("completion_id", id).Msg("Failed to delete completion images")
return err
}
return tx.Delete(&models.TaskCompletion{}, id).Error
})
}
// CreateCompletionImage creates a new completion image
@@ -912,3 +911,128 @@ func (r *TaskRepository) GetCompletionSummary(residenceID uint, now time.Time, m
Months: months,
}, nil
}
// GetBatchCompletionSummaries returns completion summaries for multiple residences
// in two queries total (one for all-time counts, one for monthly breakdowns),
// instead of 2*N queries when calling GetCompletionSummary per residence.
func (r *TaskRepository) GetBatchCompletionSummaries(residenceIDs []uint, now time.Time, maxPerMonth int) (map[uint]*responses.CompletionSummary, error) {
result := make(map[uint]*responses.CompletionSummary, len(residenceIDs))
if len(residenceIDs) == 0 {
return result, nil
}
// 1. Total all-time completions per residence (single query)
type allTimeRow struct {
ResidenceID uint
Count int64
}
var allTimeRows []allTimeRow
err := r.db.Model(&models.TaskCompletion{}).
Select("task_task.residence_id, COUNT(*) as count").
Joins("JOIN task_task ON task_task.id = task_taskcompletion.task_id").
Where("task_task.residence_id IN ?", residenceIDs).
Group("task_task.residence_id").
Scan(&allTimeRows).Error
if err != nil {
return nil, err
}
allTimeMap := make(map[uint]int64, len(allTimeRows))
for _, row := range allTimeRows {
allTimeMap[row.ResidenceID] = row.Count
}
// 2. Monthly breakdown for last 12 months across all residences (single query)
startDate := time.Date(now.Year()-1, now.Month(), 1, 0, 0, 0, 0, now.Location())
dateExpr := "TO_CHAR(task_taskcompletion.completed_at, 'YYYY-MM')"
if r.db.Dialector.Name() == "sqlite" {
dateExpr = "strftime('%Y-%m', task_taskcompletion.completed_at)"
}
var rows []completionAggRow
err = r.db.Model(&models.TaskCompletion{}).
Select(fmt.Sprintf("task_task.residence_id, task_taskcompletion.completed_from_column, %s as completed_month, COUNT(*) as count", dateExpr)).
Joins("JOIN task_task ON task_task.id = task_taskcompletion.task_id").
Where("task_task.residence_id IN ? AND task_taskcompletion.completed_at >= ?", residenceIDs, startDate).
Group(fmt.Sprintf("task_task.residence_id, task_taskcompletion.completed_from_column, %s", dateExpr)).
Order("completed_month ASC").
Scan(&rows).Error
if err != nil {
return nil, err
}
// 3. Build per-residence summaries
type monthData struct {
columns map[string]int
total int
}
// Initialize all residences with empty month maps
residenceMonths := make(map[uint]map[string]*monthData, len(residenceIDs))
for _, rid := range residenceIDs {
mm := make(map[string]*monthData, 12)
for i := 0; i < 12; i++ {
m := startDate.AddDate(0, i, 0)
key := m.Format("2006-01")
mm[key] = &monthData{columns: make(map[string]int)}
}
residenceMonths[rid] = mm
}
// Populate from query results
residenceLast12 := make(map[uint]int, len(residenceIDs))
for _, row := range rows {
mm, ok := residenceMonths[row.ResidenceID]
if !ok {
continue
}
md, ok := mm[row.CompletedMonth]
if !ok {
continue
}
md.columns[row.CompletedFromColumn] = int(row.Count)
md.total += int(row.Count)
residenceLast12[row.ResidenceID] += int(row.Count)
}
// Convert to response DTOs per residence
for _, rid := range residenceIDs {
mm := residenceMonths[rid]
months := make([]responses.MonthlyCompletionSummary, 0, 12)
for i := 0; i < 12; i++ {
m := startDate.AddDate(0, i, 0)
key := m.Format("2006-01")
md := mm[key]
completions := make([]responses.ColumnCompletionCount, 0)
for col, count := range md.columns {
completions = append(completions, responses.ColumnCompletionCount{
Column: col,
Color: KanbanColumnColor(col),
Count: count,
})
}
overflow := 0
if md.total > maxPerMonth {
overflow = md.total - maxPerMonth
}
months = append(months, responses.MonthlyCompletionSummary{
Month: key,
Completions: completions,
Total: md.total,
Overflow: overflow,
})
}
result[rid] = &responses.CompletionSummary{
TotalAllTime: int(allTimeMap[rid]),
TotalLast12Months: residenceLast12[rid],
Months: months,
}
}
return result, nil
}

View File

@@ -34,6 +34,16 @@ func NewUserRepository(db *gorm.DB) *UserRepository {
return &UserRepository{db: db}
}
// Transaction runs fn inside a database transaction. The callback receives a
// new UserRepository backed by the transaction so all operations within fn
// share the same transactional connection.
func (r *UserRepository) Transaction(fn func(txRepo *UserRepository) error) error {
return r.db.Transaction(func(tx *gorm.DB) error {
txRepo := &UserRepository{db: tx}
return fn(txRepo)
})
}
// FindByID finds a user by ID
func (r *UserRepository) FindByID(id uint) (*models.User, error) {
var user models.User
@@ -130,18 +140,28 @@ func (r *UserRepository) ExistsByEmail(email string) (bool, error) {
// --- Auth Token Methods ---
// GetOrCreateToken gets or creates an auth token for a user
// GetOrCreateToken gets or creates an auth token for a user.
// Wrapped in a transaction to prevent race conditions where two
// concurrent requests could create duplicate tokens for the same user.
func (r *UserRepository) GetOrCreateToken(userID uint) (*models.AuthToken, error) {
var token models.AuthToken
result := r.db.Where("user_id = ?", userID).First(&token)
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
token = models.AuthToken{UserID: userID}
if err := r.db.Create(&token).Error; err != nil {
return nil, err
err := r.db.Transaction(func(tx *gorm.DB) error {
result := tx.Where("user_id = ?", userID).First(&token)
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
token = models.AuthToken{UserID: userID}
if err := tx.Create(&token).Error; err != nil {
return err
}
} else if result.Error != nil {
return result.Error
}
} else if result.Error != nil {
return nil, result.Error
return nil
})
if err != nil {
return nil, err
}
return &token, nil
@@ -341,7 +361,7 @@ func (r *UserRepository) SearchUsers(query string, limit, offset int) ([]models.
var users []models.User
var total int64
searchQuery := "%" + strings.ToLower(query) + "%"
searchQuery := "%" + escapeLikeWildcards(strings.ToLower(query)) + "%"
baseQuery := r.db.Model(&models.User{}).
Where("LOWER(username) LIKE ? OR LOWER(email) LIKE ? OR LOWER(first_name) LIKE ? OR LOWER(last_name) LIKE ?",
@@ -384,7 +404,7 @@ func (r *UserRepository) FindUsersInSharedResidences(userID uint) ([]models.User
// 2. Members of residences owned by current user
// 3. Members of residences where current user is also a member
err := r.db.Raw(`
SELECT DISTINCT u.* FROM user_customuser u
SELECT DISTINCT u.* FROM auth_user u
WHERE u.id != ? AND u.is_active = true AND (
-- Users who own residences where current user is a shared user
u.id IN (
@@ -417,7 +437,7 @@ func (r *UserRepository) FindUserIfSharedResidence(targetUserID, requestingUserI
var user models.User
err := r.db.Raw(`
SELECT u.* FROM user_customuser u
SELECT u.* FROM auth_user u
WHERE u.id = ? AND u.is_active = true AND (
u.id = ? OR
-- Target owns a residence where requester is a member
@@ -460,7 +480,7 @@ func (r *UserRepository) FindProfilesInSharedResidences(userID uint) ([]models.U
err := r.db.Raw(`
SELECT p.* FROM user_userprofile p
INNER JOIN user_customuser u ON p.user_id = u.id
INNER JOIN auth_user u ON p.user_id = u.id
WHERE u.is_active = true AND (
u.id = ? OR
-- Users who own residences where current user is a shared user