Security: - Replace all binding: tags with validate: + c.Validate() in admin handlers - Add rate limiting to auth endpoints (login, register, password reset) - Add security headers (HSTS, XSS protection, nosniff, frame options) - Wire Google Pub/Sub token verification into webhook handler - Replace ParseUnverified with proper OIDC/JWKS key verification - Verify inner Apple JWS signatures in webhook handler - Add io.LimitReader (1MB) to all webhook body reads - Add ownership verification to file deletion - Move hardcoded admin credentials to env vars - Add uniqueIndex to User.Email - Hide ConfirmationCode from JSON serialization - Mask confirmation codes in admin responses - Use http.DetectContentType for upload validation - Fix path traversal in storage service - Replace os.Getenv with Viper in stripe service - Sanitize Redis URLs before logging - Separate DEBUG_FIXED_CODES from DEBUG flag - Reject weak SECRET_KEY in production - Add host check on /_next/* proxy routes - Use explicit localhost CORS origins in debug mode - Replace err.Error() with generic messages in all admin error responses Critical fixes: - Rewrite FCM to HTTP v1 API with OAuth 2.0 service account auth - Fix user_customuser -> auth_user table names in raw SQL - Fix dashboard verified query to use UserProfile model - Add escapeLikeWildcards() to prevent SQL wildcard injection Bug fixes: - Add bounds checks for days/expiring_soon query params (1-3650) - Add receipt_data/transaction_id empty-check to RestoreSubscription - Change Active bool -> *bool in device handler - Check all unchecked GORM/FindByIDWithProfile errors - Add validation for notification hour fields (0-23) - Add max=10000 validation on task description updates Transactions & data integrity: - Wrap registration flow in transaction - Wrap QuickComplete in transaction - Move image creation inside completion transaction - Wrap SetSpecialties in transaction - Wrap GetOrCreateToken in transaction - Wrap completion+image deletion in transaction Performance: - Batch completion summaries (2 queries vs 2N) - Reuse single http.Client in IAP validation - Cache dashboard counts (30s TTL) - Batch COUNT queries in admin user list - Add Limit(500) to document queries - Add reminder_stage+due_date filters to reminder queries - Parse AllowedTypes once at init - In-memory user cache in auth middleware (30s TTL) - Timezone change detection cache - Optimize P95 with per-endpoint sorted buffers - Replace crypto/md5 with hash/fnv for ETags Code quality: - Add sync.Once to all monitoring Stop()/Close() methods - Replace 8 fmt.Printf with zerolog in auth service - Log previously discarded errors - Standardize delete response shapes - Route hardcoded English through i18n - Remove FileURL from DocumentResponse (keep MediaURL only) - Thread user timezone through kanban board responses - Initialize empty slices to prevent null JSON - Extract shared field map for task Update/UpdateTx - Delete unused SoftDeleteModel, min(), formatCron, legacy handlers Worker & jobs: - Wire Asynq email infrastructure into worker - Register HandleReminderLogCleanup with daily 3AM cron - Use per-user timezone in HandleSmartReminder - Replace direct DB queries with repository calls - Delete legacy reminder handlers (~200 lines) - Delete unused task type constants Dependencies: - Replace archived jung-kurt/gofpdf with go-pdf/fpdf - Replace unmaintained gomail.v2 with wneessen/go-mail - Add TODO for Echo jwt v3 transitive dep removal Test infrastructure: - Fix MakeRequest/SeedLookupData error handling - Replace os.Exit(0) with t.Skip() in scope/consistency tests - Add 11 new FCM v1 tests Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
218 lines
5.0 KiB
Go
218 lines
5.0 KiB
Go
package monitoring
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"net/http"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/labstack/echo/v4"
|
|
"github.com/gorilla/websocket"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
var upgrader = websocket.Upgrader{
|
|
ReadBufferSize: 1024,
|
|
WriteBufferSize: 1024,
|
|
CheckOrigin: func(r *http.Request) bool {
|
|
origin := r.Header.Get("Origin")
|
|
if origin == "" {
|
|
// Same-origin requests may omit the Origin header
|
|
return true
|
|
}
|
|
// Allow if origin matches the request host
|
|
return strings.HasPrefix(origin, "https://"+r.Host) || strings.HasPrefix(origin, "http://"+r.Host)
|
|
},
|
|
}
|
|
|
|
// Handler provides HTTP handlers for monitoring endpoints
|
|
type Handler struct {
|
|
logBuffer *LogBuffer
|
|
statsStore *StatsStore
|
|
}
|
|
|
|
// NewHandler creates a new monitoring handler
|
|
func NewHandler(logBuffer *LogBuffer, statsStore *StatsStore) *Handler {
|
|
return &Handler{
|
|
logBuffer: logBuffer,
|
|
statsStore: statsStore,
|
|
}
|
|
}
|
|
|
|
// GetLogs returns filtered log entries
|
|
// GET /api/admin/monitoring/logs
|
|
func (h *Handler) GetLogs(c echo.Context) error {
|
|
var filters LogFilters
|
|
if err := c.Bind(&filters); err != nil {
|
|
return c.JSON(http.StatusBadRequest, map[string]interface{}{"error": "Invalid filters"})
|
|
}
|
|
|
|
limit := filters.GetLimit()
|
|
|
|
// Get more entries than requested for filtering
|
|
entries, err := h.logBuffer.GetRecent(limit * 2)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to get logs from buffer")
|
|
return c.JSON(http.StatusInternalServerError, map[string]interface{}{"error": "Failed to retrieve logs"})
|
|
}
|
|
|
|
// Apply filters
|
|
filtered := make([]LogEntry, 0, len(entries))
|
|
for _, e := range entries {
|
|
// Level filter
|
|
if filters.Level != "" && e.Level != filters.Level {
|
|
continue
|
|
}
|
|
|
|
// Process filter
|
|
if filters.Process != "" && e.Process != filters.Process {
|
|
continue
|
|
}
|
|
|
|
// Search filter (case-insensitive)
|
|
if filters.Search != "" {
|
|
searchLower := strings.ToLower(filters.Search)
|
|
messageLower := strings.ToLower(e.Message)
|
|
if !strings.Contains(messageLower, searchLower) {
|
|
continue
|
|
}
|
|
}
|
|
|
|
filtered = append(filtered, e)
|
|
if len(filtered) >= limit {
|
|
break
|
|
}
|
|
}
|
|
|
|
return c.JSON(http.StatusOK, map[string]interface{}{
|
|
"logs": filtered,
|
|
"total": len(filtered),
|
|
})
|
|
}
|
|
|
|
// GetStats returns system statistics for all processes
|
|
// GET /api/admin/monitoring/stats
|
|
func (h *Handler) GetStats(c echo.Context) error {
|
|
allStats, err := h.statsStore.GetAllStats()
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to get stats from store")
|
|
return c.JSON(http.StatusInternalServerError, map[string]interface{}{"error": "Failed to retrieve stats"})
|
|
}
|
|
|
|
return c.JSON(http.StatusOK, allStats)
|
|
}
|
|
|
|
// ClearLogs clears all logs from the buffer
|
|
// DELETE /api/admin/monitoring/logs
|
|
func (h *Handler) ClearLogs(c echo.Context) error {
|
|
if err := h.logBuffer.Clear(); err != nil {
|
|
log.Error().Err(err).Msg("Failed to clear logs")
|
|
return c.JSON(http.StatusInternalServerError, map[string]interface{}{"error": "Failed to clear logs"})
|
|
}
|
|
|
|
return c.JSON(http.StatusOK, map[string]interface{}{"message": "Logs cleared"})
|
|
}
|
|
|
|
// WebSocket handles real-time log streaming
|
|
// GET /api/admin/monitoring/ws
|
|
func (h *Handler) WebSocket(c echo.Context) error {
|
|
conn, err := upgrader.Upgrade(c.Response().Writer, c.Request(), nil)
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("Failed to upgrade WebSocket connection")
|
|
return err
|
|
}
|
|
defer conn.Close()
|
|
|
|
// Create context that cancels when connection closes
|
|
ctx, cancel := context.WithCancel(c.Request().Context())
|
|
defer cancel()
|
|
|
|
// Subscribe to Redis pubsub for logs
|
|
pubsub := h.logBuffer.Subscribe(ctx)
|
|
defer pubsub.Close()
|
|
|
|
// Handle incoming messages (for filter changes, ping, etc.)
|
|
var wsMu sync.Mutex
|
|
go func() {
|
|
for {
|
|
_, _, err := conn.ReadMessage()
|
|
if err != nil {
|
|
cancel()
|
|
}
|
|
}
|
|
}()
|
|
|
|
// Stream logs from pubsub
|
|
ch := pubsub.Channel()
|
|
statsTicker := time.NewTicker(5 * time.Second)
|
|
defer statsTicker.Stop()
|
|
|
|
// Send initial stats
|
|
if err := h.sendStats(conn, &wsMu); err != nil {
|
|
cancel()
|
|
return nil
|
|
}
|
|
|
|
for {
|
|
select {
|
|
case msg := <-ch:
|
|
// Parse log entry
|
|
var entry LogEntry
|
|
if err := json.Unmarshal([]byte(msg.Payload), &entry); err != nil {
|
|
continue
|
|
}
|
|
|
|
// Send log message
|
|
wsMsg := WSMessage{
|
|
Type: WSMessageTypeLog,
|
|
Data: entry,
|
|
}
|
|
|
|
wsMu.Lock()
|
|
err := conn.WriteJSON(wsMsg)
|
|
wsMu.Unlock()
|
|
|
|
if err != nil {
|
|
log.Debug().Err(err).Msg("WebSocket write error")
|
|
cancel()
|
|
return nil
|
|
}
|
|
|
|
case <-statsTicker.C:
|
|
// Send periodic stats update
|
|
if err := h.sendStats(conn, &wsMu); err != nil {
|
|
cancel()
|
|
return nil
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
return nil
|
|
}
|
|
}
|
|
}
|
|
|
|
func (h *Handler) sendStats(conn *websocket.Conn, mu *sync.Mutex) error {
|
|
allStats, err := h.statsStore.GetAllStats()
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("failed to send stats")
|
|
return err
|
|
}
|
|
|
|
wsMsg := WSMessage{
|
|
Type: WSMessageTypeStats,
|
|
Data: allStats,
|
|
}
|
|
|
|
mu.Lock()
|
|
err = conn.WriteJSON(wsMsg)
|
|
mu.Unlock()
|
|
|
|
if err != nil {
|
|
log.Debug().Err(err).Msg("WebSocket write error sending stats")
|
|
}
|
|
|
|
return err
|
|
}
|