Add S3-compatible storage backend (B2, MinIO, AWS S3)

Introduces a StorageBackend interface with local filesystem and S3
implementations. The StorageService delegates raw I/O to the backend
while keeping validation, encryption, and URL generation unchanged.

Backend selection is config-driven: set B2_ENDPOINT + B2_KEY_ID +
B2_APP_KEY + B2_BUCKET_NAME for S3 mode, or STORAGE_UPLOAD_DIR for
local mode. STORAGE_USE_SSL=false for in-cluster MinIO (HTTP).

All existing tests pass unchanged — the local backend preserves
identical behavior to the previous direct-filesystem implementation.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Trey t
2026-03-30 21:31:24 -05:00
parent 34553f3bec
commit 2e10822e5a
8 changed files with 359 additions and 132 deletions

View File

@@ -0,0 +1,21 @@
package services
import "io"
// StorageBackend abstracts where files are physically stored.
// The StorageService handles validation, encryption, and URL generation,
// then delegates raw I/O to the backend.
type StorageBackend interface {
// Write stores data at the given key (e.g., "images/20240101_uuid.jpg").
Write(key string, data []byte) error
// Read returns the raw bytes stored at the given key.
Read(key string) ([]byte, error)
// Delete removes the object at the given key. Returns nil if not found.
Delete(key string) error
// ReadStream returns a reader for the object (used for large files).
// Callers must close the returned ReadCloser.
ReadStream(key string) (io.ReadCloser, error)
}

View File

@@ -0,0 +1,70 @@
package services
import (
"fmt"
"io"
"os"
"path/filepath"
)
// LocalBackend stores files on the local filesystem.
type LocalBackend struct {
baseDir string
}
// NewLocalBackend creates a local filesystem storage backend.
// It ensures the base directory and standard subdirectories exist.
func NewLocalBackend(baseDir string) (*LocalBackend, error) {
if err := os.MkdirAll(baseDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create upload directory: %w", err)
}
for _, subdir := range []string{"images", "documents", "completions"} {
path := filepath.Join(baseDir, subdir)
if err := os.MkdirAll(path, 0755); err != nil {
return nil, fmt.Errorf("failed to create subdirectory %s: %w", subdir, err)
}
}
return &LocalBackend{baseDir: baseDir}, nil
}
func (b *LocalBackend) Write(key string, data []byte) error {
destPath, err := SafeResolvePath(b.baseDir, key)
if err != nil {
return fmt.Errorf("invalid path: %w", err)
}
return os.WriteFile(destPath, data, 0644)
}
func (b *LocalBackend) Read(key string) ([]byte, error) {
fullPath, err := SafeResolvePath(b.baseDir, key)
if err != nil {
return nil, fmt.Errorf("invalid path: %w", err)
}
return os.ReadFile(fullPath)
}
func (b *LocalBackend) Delete(key string) error {
fullPath, err := SafeResolvePath(b.baseDir, key)
if err != nil {
return nil // invalid path = nothing to delete
}
if err := os.Remove(fullPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to delete file: %w", err)
}
return nil
}
func (b *LocalBackend) ReadStream(key string) (io.ReadCloser, error) {
fullPath, err := SafeResolvePath(b.baseDir, key)
if err != nil {
return nil, fmt.Errorf("invalid path: %w", err)
}
return os.Open(fullPath)
}
// BaseDir returns the local storage base directory.
func (b *LocalBackend) BaseDir() string {
return b.baseDir
}

View File

@@ -0,0 +1,103 @@
package services
import (
"bytes"
"context"
"fmt"
"io"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/rs/zerolog/log"
)
// S3Backend stores files in S3-compatible storage (Backblaze B2, MinIO, AWS S3).
type S3Backend struct {
client *minio.Client
bucket string
}
// NewS3Backend creates an S3-compatible storage backend.
func NewS3Backend(endpoint, keyID, appKey, bucket string, useSSL bool, region string) (*S3Backend, error) {
if region == "" {
region = "us-east-1"
}
client, err := minio.New(endpoint, &minio.Options{
Creds: credentials.NewStaticV4(keyID, appKey, ""),
Secure: useSSL,
Region: region,
})
if err != nil {
return nil, fmt.Errorf("failed to create S3 client: %w", err)
}
// Verify bucket exists
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
exists, err := client.BucketExists(ctx, bucket)
if err != nil {
return nil, fmt.Errorf("failed to check bucket %q: %w", bucket, err)
}
if !exists {
return nil, fmt.Errorf("bucket %q does not exist", bucket)
}
log.Info().
Str("endpoint", endpoint).
Str("bucket", bucket).
Bool("ssl", useSSL).
Msg("S3 storage backend initialized")
return &S3Backend{client: client, bucket: bucket}, nil
}
func (b *S3Backend) Write(key string, data []byte) error {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
_, err := b.client.PutObject(ctx, b.bucket, key, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})
if err != nil {
return fmt.Errorf("failed to upload to S3: %w", err)
}
return nil
}
func (b *S3Backend) Read(key string) ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
obj, err := b.client.GetObject(ctx, b.bucket, key, minio.GetObjectOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get S3 object: %w", err)
}
defer obj.Close()
data, err := io.ReadAll(obj)
if err != nil {
return nil, fmt.Errorf("failed to read S3 object: %w", err)
}
return data, nil
}
func (b *S3Backend) Delete(key string) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
err := b.client.RemoveObject(ctx, b.bucket, key, minio.RemoveObjectOptions{})
if err != nil {
return fmt.Errorf("failed to delete S3 object: %w", err)
}
return nil
}
func (b *S3Backend) ReadStream(key string) (io.ReadCloser, error) {
ctx := context.Background() // caller controls lifetime by closing the reader
obj, err := b.client.GetObject(ctx, b.bucket, key, minio.GetObjectOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get S3 object stream: %w", err)
}
return obj, nil
}

View File

@@ -5,7 +5,6 @@ import (
"io"
"mime/multipart"
"net/http"
"os"
"path/filepath"
"strings"
"time"
@@ -16,9 +15,11 @@ import (
"github.com/treytartt/honeydue-api/internal/config"
)
// StorageService handles file uploads to local filesystem
// StorageService handles file uploads, validation, encryption, and URL generation.
// It delegates raw I/O to a StorageBackend (local filesystem or S3-compatible).
type StorageService struct {
cfg *config.StorageConfig
backend StorageBackend
allowedTypes map[string]struct{} // P-12: Parsed once at init for O(1) lookups
encryptionSvc *EncryptionService
}
@@ -31,37 +32,40 @@ type UploadResult struct {
MimeType string `json:"mime_type"`
}
// NewStorageService creates a new storage service
// NewStorageService creates a new storage service with the appropriate backend.
// If S3 config is set, uses S3-compatible storage (B2, MinIO).
// Otherwise, uses local filesystem.
func NewStorageService(cfg *config.StorageConfig) (*StorageService, error) {
// Ensure upload directory exists
if err := os.MkdirAll(cfg.UploadDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create upload directory: %w", err)
}
var backend StorageBackend
var err error
// Create subdirectories for organization
subdirs := []string{"images", "documents", "completions"}
for _, subdir := range subdirs {
path := filepath.Join(cfg.UploadDir, subdir)
if err := os.MkdirAll(path, 0755); err != nil {
return nil, fmt.Errorf("failed to create subdirectory %s: %w", subdir, err)
if cfg.IsS3() {
backend, err = NewS3Backend(cfg.S3Endpoint, cfg.S3KeyID, cfg.S3AppKey, cfg.S3Bucket, cfg.S3UseSSL, cfg.S3Region)
if err != nil {
return nil, fmt.Errorf("failed to initialize S3 storage: %w", err)
}
log.Info().
Str("endpoint", cfg.S3Endpoint).
Str("bucket", cfg.S3Bucket).
Bool("ssl", cfg.S3UseSSL).
Msg("Storage service initialized (S3)")
} else {
backend, err = NewLocalBackend(cfg.UploadDir)
if err != nil {
return nil, fmt.Errorf("failed to initialize local storage: %w", err)
}
log.Info().
Str("upload_dir", cfg.UploadDir).
Msg("Storage service initialized (local)")
}
// P-12: Parse AllowedTypes once at initialization for O(1) lookups
allowedTypes := make(map[string]struct{})
for _, t := range strings.Split(cfg.AllowedTypes, ",") {
trimmed := strings.TrimSpace(t)
if trimmed != "" {
allowedTypes[trimmed] = struct{}{}
}
}
allowedTypes := parseAllowedTypes(cfg.AllowedTypes)
log.Info().Str("upload_dir", cfg.UploadDir).Int("allowed_types", len(allowedTypes)).Msg("Storage service initialized")
return &StorageService{cfg: cfg, allowedTypes: allowedTypes}, nil
return &StorageService{cfg: cfg, backend: backend, allowedTypes: allowedTypes}, nil
}
// Upload saves a file to the local filesystem
// Upload saves a file to storage (local or S3)
func (s *StorageService) Upload(file *multipart.FileHeader, category string) (*UploadResult, error) {
// Validate file size
if file.Size > s.cfg.MaxFileSize {
@@ -90,13 +94,10 @@ func (s *StorageService) Upload(file *multipart.FileHeader, category string) (*U
detectedMimeType := http.DetectContentType(sniffBuf[:n])
// Validate that the detected type matches the claimed type (at the category level)
// Allow application/octet-stream from detection since DetectContentType may not
// recognize all valid types, but the claimed type must still be in our allowed list
if detectedMimeType != "application/octet-stream" && !s.mimeTypesCompatible(claimedMimeType, detectedMimeType) {
return nil, fmt.Errorf("file content type mismatch: claimed %s but detected %s", claimedMimeType, detectedMimeType)
}
// Use the claimed MIME type (which is more specific) if it's allowed
mimeType := claimedMimeType
// Validate MIME type against allowed list
@@ -131,11 +132,8 @@ func (s *StorageService) Upload(file *multipart.FileHeader, category string) (*U
storedFilename = newFilename + ".enc"
}
// S-18: Sanitize path to prevent traversal attacks
destPath, err := SafeResolvePath(s.cfg.UploadDir, filepath.Join(subdir, storedFilename))
if err != nil {
return nil, fmt.Errorf("invalid upload path: %w", err)
}
// Build the storage key (e.g., "images/20240101_uuid.jpg")
key := subdir + "/" + storedFilename
// Read all file content into memory for potential encryption
fileData, err := io.ReadAll(src)
@@ -151,13 +149,13 @@ func (s *StorageService) Upload(file *multipart.FileHeader, category string) (*U
}
}
// Write file content to disk
if err := os.WriteFile(destPath, fileData, 0644); err != nil {
// Write to backend
if err := s.backend.Write(key, fileData); err != nil {
return nil, fmt.Errorf("failed to save file: %w", err)
}
written := int64(len(fileData))
// Generate URL (always uses the original filename without .enc suffix for the public URL)
// Generate URL (always uses the original filename without .enc suffix)
url := fmt.Sprintf("%s/%s/%s", s.cfg.BaseURL, subdir, newFilename)
log.Info().
@@ -165,6 +163,7 @@ func (s *StorageService) Upload(file *multipart.FileHeader, category string) (*U
Str("category", category).
Int64("size", written).
Str("mime_type", mimeType).
Bool("s3", s.cfg.IsS3()).
Msg("File uploaded successfully")
return &UploadResult{
@@ -183,33 +182,24 @@ func (s *StorageService) ReadFile(storedURL string) ([]byte, string, error) {
return nil, "", fmt.Errorf("empty file URL")
}
// Strip base URL prefix to get relative path
relativePath := strings.TrimPrefix(storedURL, s.cfg.BaseURL)
relativePath = strings.TrimPrefix(relativePath, "/")
// Strip base URL prefix to get relative key
relativeKey := strings.TrimPrefix(storedURL, s.cfg.BaseURL)
relativeKey = strings.TrimPrefix(relativeKey, "/")
// Try .enc variant first, then plain file
var fullPath string
var data []byte
var encrypted bool
var err error
encPath, err := SafeResolvePath(s.cfg.UploadDir, relativePath+".enc")
data, err = s.backend.Read(relativeKey + ".enc")
if err == nil {
if _, statErr := os.Stat(encPath); statErr == nil {
fullPath = encPath
encrypted = true
}
}
if fullPath == "" {
plainPath, err := SafeResolvePath(s.cfg.UploadDir, relativePath)
encrypted = true
} else {
// Fall back to plain file
data, err = s.backend.Read(relativeKey)
if err != nil {
return nil, "", fmt.Errorf("invalid file path: %w", err)
return nil, "", fmt.Errorf("failed to read file: %w", err)
}
fullPath = plainPath
}
data, err := os.ReadFile(fullPath)
if err != nil {
return nil, "", fmt.Errorf("failed to read file: %w", err)
}
// Decrypt if this is an encrypted file
@@ -231,58 +221,45 @@ func (s *StorageService) ReadFile(storedURL string) ([]byte, string, error) {
// Delete removes a file from storage, handling both plain and .enc variants
func (s *StorageService) Delete(fileURL string) error {
// Convert URL to file path
relativePath := strings.TrimPrefix(fileURL, s.cfg.BaseURL)
relativePath = strings.TrimPrefix(relativePath, "/")
// S-18: Use SafeResolvePath to prevent path traversal
fullPath, err := SafeResolvePath(s.cfg.UploadDir, relativePath)
if err != nil {
return fmt.Errorf("invalid file path: %w", err)
}
// Delete both plain and .enc variants (ignore not-found errors)
plainErr := s.backend.Delete(relativePath)
encErr := s.backend.Delete(relativePath + ".enc")
// Try to delete the plain file
plainDeleted := false
if err := os.Remove(fullPath); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to delete file: %w", err)
}
} else {
plainDeleted = true
log.Info().Str("path", fullPath).Msg("File deleted")
// Only return an error if both failed for reasons other than not-found
if plainErr != nil {
log.Debug().Err(plainErr).Str("key", relativePath).Msg("Delete plain file")
}
// Also try to delete the .enc variant
encPath, err := SafeResolvePath(s.cfg.UploadDir, relativePath+".enc")
if err == nil {
if err := os.Remove(encPath); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to delete encrypted file: %w", err)
}
} else {
log.Info().Str("path", encPath).Msg("Encrypted file deleted")
return nil
}
}
if !plainDeleted {
// Neither file existed — that's OK
return nil
if encErr != nil {
log.Debug().Err(encErr).Str("key", relativePath+".enc").Msg("Delete enc file")
}
return nil
}
// GetUploadDir returns the upload directory path.
// For S3 backends, returns empty string.
func (s *StorageService) GetUploadDir() string {
if lb, ok := s.backend.(*LocalBackend); ok {
return lb.BaseDir()
}
return s.cfg.UploadDir
}
// SetEncryptionService sets the encryption service for encrypting files at rest
func (s *StorageService) SetEncryptionService(svc *EncryptionService) {
s.encryptionSvc = svc
}
// isAllowedType checks if the MIME type is in the allowed list.
// P-12: Uses the pre-parsed allowedTypes map for O(1) lookups instead of
// splitting the config string on every call.
func (s *StorageService) isAllowedType(mimeType string) bool {
_, ok := s.allowedTypes[mimeType]
return ok
}
// mimeTypesCompatible checks if the claimed and detected MIME types are compatible.
// Two MIME types are compatible if they share the same primary type (e.g., both "image/*").
func (s *StorageService) mimeTypesCompatible(claimed, detected string) bool {
claimedParts := strings.SplitN(claimed, "/", 2)
detectedParts := strings.SplitN(detected, "/", 2)
@@ -307,25 +284,24 @@ func (s *StorageService) getExtensionFromMimeType(mimeType string) string {
return ""
}
// GetUploadDir returns the upload directory path
func (s *StorageService) GetUploadDir() string {
return s.cfg.UploadDir
}
// SetEncryptionService sets the encryption service for encrypting files at rest
func (s *StorageService) SetEncryptionService(svc *EncryptionService) {
s.encryptionSvc = svc
// parseAllowedTypes splits a comma-separated MIME type string into a set.
func parseAllowedTypes(types string) map[string]struct{} {
allowed := make(map[string]struct{})
for _, t := range strings.Split(types, ",") {
trimmed := strings.TrimSpace(t)
if trimmed != "" {
allowed[trimmed] = struct{}{}
}
}
return allowed
}
// NewStorageServiceForTest creates a StorageService without creating directories.
// This is intended only for unit tests that need a StorageService with a known config.
func NewStorageServiceForTest(cfg *config.StorageConfig) *StorageService {
allowedTypes := make(map[string]struct{})
for _, t := range strings.Split(cfg.AllowedTypes, ",") {
trimmed := strings.TrimSpace(t)
if trimmed != "" {
allowedTypes[trimmed] = struct{}{}
}
return &StorageService{
cfg: cfg,
backend: nil, // tests that need a backend must set it up
allowedTypes: parseAllowedTypes(cfg.AllowedTypes),
}
return &StorageService{cfg: cfg, allowedTypes: allowedTypes}
}