Fix 113 hardening issues across entire Go backend
Security: - Replace all binding: tags with validate: + c.Validate() in admin handlers - Add rate limiting to auth endpoints (login, register, password reset) - Add security headers (HSTS, XSS protection, nosniff, frame options) - Wire Google Pub/Sub token verification into webhook handler - Replace ParseUnverified with proper OIDC/JWKS key verification - Verify inner Apple JWS signatures in webhook handler - Add io.LimitReader (1MB) to all webhook body reads - Add ownership verification to file deletion - Move hardcoded admin credentials to env vars - Add uniqueIndex to User.Email - Hide ConfirmationCode from JSON serialization - Mask confirmation codes in admin responses - Use http.DetectContentType for upload validation - Fix path traversal in storage service - Replace os.Getenv with Viper in stripe service - Sanitize Redis URLs before logging - Separate DEBUG_FIXED_CODES from DEBUG flag - Reject weak SECRET_KEY in production - Add host check on /_next/* proxy routes - Use explicit localhost CORS origins in debug mode - Replace err.Error() with generic messages in all admin error responses Critical fixes: - Rewrite FCM to HTTP v1 API with OAuth 2.0 service account auth - Fix user_customuser -> auth_user table names in raw SQL - Fix dashboard verified query to use UserProfile model - Add escapeLikeWildcards() to prevent SQL wildcard injection Bug fixes: - Add bounds checks for days/expiring_soon query params (1-3650) - Add receipt_data/transaction_id empty-check to RestoreSubscription - Change Active bool -> *bool in device handler - Check all unchecked GORM/FindByIDWithProfile errors - Add validation for notification hour fields (0-23) - Add max=10000 validation on task description updates Transactions & data integrity: - Wrap registration flow in transaction - Wrap QuickComplete in transaction - Move image creation inside completion transaction - Wrap SetSpecialties in transaction - Wrap GetOrCreateToken in transaction - Wrap completion+image deletion in transaction Performance: - Batch completion summaries (2 queries vs 2N) - Reuse single http.Client in IAP validation - Cache dashboard counts (30s TTL) - Batch COUNT queries in admin user list - Add Limit(500) to document queries - Add reminder_stage+due_date filters to reminder queries - Parse AllowedTypes once at init - In-memory user cache in auth middleware (30s TTL) - Timezone change detection cache - Optimize P95 with per-endpoint sorted buffers - Replace crypto/md5 with hash/fnv for ETags Code quality: - Add sync.Once to all monitoring Stop()/Close() methods - Replace 8 fmt.Printf with zerolog in auth service - Log previously discarded errors - Standardize delete response shapes - Route hardcoded English through i18n - Remove FileURL from DocumentResponse (keep MediaURL only) - Thread user timezone through kanban board responses - Initialize empty slices to prevent null JSON - Extract shared field map for task Update/UpdateTx - Delete unused SoftDeleteModel, min(), formatCron, legacy handlers Worker & jobs: - Wire Asynq email infrastructure into worker - Register HandleReminderLogCleanup with daily 3AM cron - Use per-user timezone in HandleSmartReminder - Replace direct DB queries with repository calls - Delete legacy reminder handlers (~200 lines) - Delete unused task type constants Dependencies: - Replace archived jung-kurt/gofpdf with go-pdf/fpdf - Replace unmaintained gomail.v2 with wneessen/go-mail - Add TODO for Echo jwt v3 transitive dep removal Test infrastructure: - Fix MakeRequest/SeedLookupData error handling - Replace os.Exit(0) with t.Skip() in scope/consistency tests - Add 11 new FCM v1 tests Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -38,29 +38,40 @@ func (r *TaskRepository) CreateCompletionTx(tx *gorm.DB, completion *models.Task
|
||||
return tx.Create(completion).Error
|
||||
}
|
||||
|
||||
// taskUpdateFields returns the canonical field map used by both Update and UpdateTx.
|
||||
// Centralised here so the two methods never drift out of sync.
|
||||
func taskUpdateFields(t *models.Task) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"title": t.Title,
|
||||
"description": t.Description,
|
||||
"category_id": t.CategoryID,
|
||||
"priority_id": t.PriorityID,
|
||||
"frequency_id": t.FrequencyID,
|
||||
"custom_interval_days": t.CustomIntervalDays,
|
||||
"in_progress": t.InProgress,
|
||||
"assigned_to_id": t.AssignedToID,
|
||||
"due_date": t.DueDate,
|
||||
"next_due_date": t.NextDueDate,
|
||||
"estimated_cost": t.EstimatedCost,
|
||||
"actual_cost": t.ActualCost,
|
||||
"contractor_id": t.ContractorID,
|
||||
"is_cancelled": t.IsCancelled,
|
||||
"is_archived": t.IsArchived,
|
||||
"version": gorm.Expr("version + 1"),
|
||||
}
|
||||
}
|
||||
|
||||
// taskUpdateOmitAssociations lists the association fields to omit during task updates.
|
||||
var taskUpdateOmitAssociations = []string{
|
||||
"Residence", "CreatedBy", "AssignedTo", "Category", "Priority", "Frequency", "ParentTask", "Completions",
|
||||
}
|
||||
|
||||
// UpdateTx updates a task with optimistic locking within an existing transaction.
|
||||
func (r *TaskRepository) UpdateTx(tx *gorm.DB, task *models.Task) error {
|
||||
result := tx.Model(task).
|
||||
Where("id = ? AND version = ?", task.ID, task.Version).
|
||||
Omit("Residence", "CreatedBy", "AssignedTo", "Category", "Priority", "Frequency", "ParentTask", "Completions").
|
||||
Updates(map[string]interface{}{
|
||||
"title": task.Title,
|
||||
"description": task.Description,
|
||||
"category_id": task.CategoryID,
|
||||
"priority_id": task.PriorityID,
|
||||
"frequency_id": task.FrequencyID,
|
||||
"custom_interval_days": task.CustomIntervalDays,
|
||||
"in_progress": task.InProgress,
|
||||
"assigned_to_id": task.AssignedToID,
|
||||
"due_date": task.DueDate,
|
||||
"next_due_date": task.NextDueDate,
|
||||
"estimated_cost": task.EstimatedCost,
|
||||
"actual_cost": task.ActualCost,
|
||||
"contractor_id": task.ContractorID,
|
||||
"is_cancelled": task.IsCancelled,
|
||||
"is_archived": task.IsArchived,
|
||||
"version": gorm.Expr("version + 1"),
|
||||
})
|
||||
Omit(taskUpdateOmitAssociations...).
|
||||
Updates(taskUpdateFields(task))
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
@@ -350,25 +361,8 @@ func (r *TaskRepository) Create(task *models.Task) error {
|
||||
func (r *TaskRepository) Update(task *models.Task) error {
|
||||
result := r.db.Model(task).
|
||||
Where("id = ? AND version = ?", task.ID, task.Version).
|
||||
Omit("Residence", "CreatedBy", "AssignedTo", "Category", "Priority", "Frequency", "ParentTask", "Completions").
|
||||
Updates(map[string]interface{}{
|
||||
"title": task.Title,
|
||||
"description": task.Description,
|
||||
"category_id": task.CategoryID,
|
||||
"priority_id": task.PriorityID,
|
||||
"frequency_id": task.FrequencyID,
|
||||
"custom_interval_days": task.CustomIntervalDays,
|
||||
"in_progress": task.InProgress,
|
||||
"assigned_to_id": task.AssignedToID,
|
||||
"due_date": task.DueDate,
|
||||
"next_due_date": task.NextDueDate,
|
||||
"estimated_cost": task.EstimatedCost,
|
||||
"actual_cost": task.ActualCost,
|
||||
"contractor_id": task.ContractorID,
|
||||
"is_cancelled": task.IsCancelled,
|
||||
"is_archived": task.IsArchived,
|
||||
"version": gorm.Expr("version + 1"),
|
||||
})
|
||||
Omit(taskUpdateOmitAssociations...).
|
||||
Updates(taskUpdateFields(task))
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
@@ -728,13 +722,18 @@ func (r *TaskRepository) UpdateCompletion(completion *models.TaskCompletion) err
|
||||
return r.db.Omit("Task", "CompletedBy", "Images").Save(completion).Error
|
||||
}
|
||||
|
||||
// DeleteCompletion deletes a task completion
|
||||
// DeleteCompletion deletes a task completion and its associated images atomically.
|
||||
// Wrapped in a transaction so that if the completion delete fails, image
|
||||
// deletions are rolled back as well.
|
||||
func (r *TaskRepository) DeleteCompletion(id uint) error {
|
||||
// Delete images first
|
||||
if err := r.db.Where("completion_id = ?", id).Delete(&models.TaskCompletionImage{}).Error; err != nil {
|
||||
log.Error().Err(err).Uint("completion_id", id).Msg("Failed to delete completion images")
|
||||
}
|
||||
return r.db.Delete(&models.TaskCompletion{}, id).Error
|
||||
return r.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Delete images first
|
||||
if err := tx.Where("completion_id = ?", id).Delete(&models.TaskCompletionImage{}).Error; err != nil {
|
||||
log.Error().Err(err).Uint("completion_id", id).Msg("Failed to delete completion images")
|
||||
return err
|
||||
}
|
||||
return tx.Delete(&models.TaskCompletion{}, id).Error
|
||||
})
|
||||
}
|
||||
|
||||
// CreateCompletionImage creates a new completion image
|
||||
@@ -912,3 +911,128 @@ func (r *TaskRepository) GetCompletionSummary(residenceID uint, now time.Time, m
|
||||
Months: months,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBatchCompletionSummaries returns completion summaries for multiple residences
|
||||
// in two queries total (one for all-time counts, one for monthly breakdowns),
|
||||
// instead of 2*N queries when calling GetCompletionSummary per residence.
|
||||
func (r *TaskRepository) GetBatchCompletionSummaries(residenceIDs []uint, now time.Time, maxPerMonth int) (map[uint]*responses.CompletionSummary, error) {
|
||||
result := make(map[uint]*responses.CompletionSummary, len(residenceIDs))
|
||||
if len(residenceIDs) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// 1. Total all-time completions per residence (single query)
|
||||
type allTimeRow struct {
|
||||
ResidenceID uint
|
||||
Count int64
|
||||
}
|
||||
var allTimeRows []allTimeRow
|
||||
err := r.db.Model(&models.TaskCompletion{}).
|
||||
Select("task_task.residence_id, COUNT(*) as count").
|
||||
Joins("JOIN task_task ON task_task.id = task_taskcompletion.task_id").
|
||||
Where("task_task.residence_id IN ?", residenceIDs).
|
||||
Group("task_task.residence_id").
|
||||
Scan(&allTimeRows).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allTimeMap := make(map[uint]int64, len(allTimeRows))
|
||||
for _, row := range allTimeRows {
|
||||
allTimeMap[row.ResidenceID] = row.Count
|
||||
}
|
||||
|
||||
// 2. Monthly breakdown for last 12 months across all residences (single query)
|
||||
startDate := time.Date(now.Year()-1, now.Month(), 1, 0, 0, 0, 0, now.Location())
|
||||
|
||||
dateExpr := "TO_CHAR(task_taskcompletion.completed_at, 'YYYY-MM')"
|
||||
if r.db.Dialector.Name() == "sqlite" {
|
||||
dateExpr = "strftime('%Y-%m', task_taskcompletion.completed_at)"
|
||||
}
|
||||
|
||||
var rows []completionAggRow
|
||||
err = r.db.Model(&models.TaskCompletion{}).
|
||||
Select(fmt.Sprintf("task_task.residence_id, task_taskcompletion.completed_from_column, %s as completed_month, COUNT(*) as count", dateExpr)).
|
||||
Joins("JOIN task_task ON task_task.id = task_taskcompletion.task_id").
|
||||
Where("task_task.residence_id IN ? AND task_taskcompletion.completed_at >= ?", residenceIDs, startDate).
|
||||
Group(fmt.Sprintf("task_task.residence_id, task_taskcompletion.completed_from_column, %s", dateExpr)).
|
||||
Order("completed_month ASC").
|
||||
Scan(&rows).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. Build per-residence summaries
|
||||
type monthData struct {
|
||||
columns map[string]int
|
||||
total int
|
||||
}
|
||||
|
||||
// Initialize all residences with empty month maps
|
||||
residenceMonths := make(map[uint]map[string]*monthData, len(residenceIDs))
|
||||
for _, rid := range residenceIDs {
|
||||
mm := make(map[string]*monthData, 12)
|
||||
for i := 0; i < 12; i++ {
|
||||
m := startDate.AddDate(0, i, 0)
|
||||
key := m.Format("2006-01")
|
||||
mm[key] = &monthData{columns: make(map[string]int)}
|
||||
}
|
||||
residenceMonths[rid] = mm
|
||||
}
|
||||
|
||||
// Populate from query results
|
||||
residenceLast12 := make(map[uint]int, len(residenceIDs))
|
||||
for _, row := range rows {
|
||||
mm, ok := residenceMonths[row.ResidenceID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
md, ok := mm[row.CompletedMonth]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
md.columns[row.CompletedFromColumn] = int(row.Count)
|
||||
md.total += int(row.Count)
|
||||
residenceLast12[row.ResidenceID] += int(row.Count)
|
||||
}
|
||||
|
||||
// Convert to response DTOs per residence
|
||||
for _, rid := range residenceIDs {
|
||||
mm := residenceMonths[rid]
|
||||
months := make([]responses.MonthlyCompletionSummary, 0, 12)
|
||||
for i := 0; i < 12; i++ {
|
||||
m := startDate.AddDate(0, i, 0)
|
||||
key := m.Format("2006-01")
|
||||
md := mm[key]
|
||||
|
||||
completions := make([]responses.ColumnCompletionCount, 0)
|
||||
for col, count := range md.columns {
|
||||
completions = append(completions, responses.ColumnCompletionCount{
|
||||
Column: col,
|
||||
Color: KanbanColumnColor(col),
|
||||
Count: count,
|
||||
})
|
||||
}
|
||||
|
||||
overflow := 0
|
||||
if md.total > maxPerMonth {
|
||||
overflow = md.total - maxPerMonth
|
||||
}
|
||||
|
||||
months = append(months, responses.MonthlyCompletionSummary{
|
||||
Month: key,
|
||||
Completions: completions,
|
||||
Total: md.total,
|
||||
Overflow: overflow,
|
||||
})
|
||||
}
|
||||
|
||||
result[rid] = &responses.CompletionSummary{
|
||||
TotalAllTime: int(allTimeMap[rid]),
|
||||
TotalLast12Months: residenceLast12[rid],
|
||||
Months: months,
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user