diff --git a/README.md b/README.md index 73623e3..a2bcf6c 100644 --- a/README.md +++ b/README.md @@ -446,6 +446,8 @@ CGO_ENABLED=0 go build -ldflags="-s -w" -o bin/qfs ./cmd/qfs | `QFS_DB_PATH` | Полный путь к локальной SQLite БД | OS-specific user state dir | | `QFS_STATE_DIR` | Каталог state (если `QFS_DB_PATH` не задан) | OS-specific user state dir | | `QFS_CONFIG_PATH` | Полный путь к `config.yaml` | OS-specific user state dir | +| `QFS_BACKUP_DIR` | Каталог для ротационных бэкапов локальных данных | `/backups` | +| `QFS_BACKUP_DISABLE` | Отключить автоматические бэкапы (`1/true/yes`) | — | ## Интеграция с существующей БД diff --git a/cmd/qfs/main.go b/cmd/qfs/main.go index 4620b33..f7156e2 100644 --- a/cmd/qfs/main.go +++ b/cmd/qfs/main.go @@ -232,6 +232,10 @@ func main() { syncWorker := sync.NewWorker(syncService, connMgr, backgroundSyncInterval) go syncWorker.Start(workerCtx) + backupCtx, backupCancel := context.WithCancel(context.Background()) + defer backupCancel() + go startBackupScheduler(backupCtx, cfg, resolvedLocalDBPath, resolvedConfigPath) + srv := &http.Server{ Addr: cfg.Address(), Handler: router, @@ -274,6 +278,7 @@ func main() { // Stop background sync worker first syncWorker.Stop() workerCancel() + backupCancel() // Then shutdown HTTP server ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -324,6 +329,9 @@ func setConfigDefaults(cfg *config.Config) { if cfg.Pricing.MinQuotesForMedian == 0 { cfg.Pricing.MinQuotesForMedian = 3 } + if cfg.Backup.Time == "" { + cfg.Backup.Time = "00:00" + } } func ensureDefaultConfigFile(configPath string) error { @@ -347,6 +355,9 @@ func ensureDefaultConfigFile(configPath string) error { read_timeout: 30s write_timeout: 30s +backup: + time: "00:00" + logging: level: "info" format: "json" @@ -373,9 +384,14 @@ type runtimeLoggingConfig struct { Output string `yaml:"output"` } +type runtimeBackupConfig struct { + Time string `yaml:"time"` +} + type runtimeConfigFile struct { Server runtimeServerConfig `yaml:"server"` Logging runtimeLoggingConfig `yaml:"logging"` + Backup runtimeBackupConfig `yaml:"backup"` } // migrateConfigFileToRuntimeShape rewrites config.yaml in a minimal runtime format. @@ -398,6 +414,9 @@ func migrateConfigFileToRuntimeShape(configPath string, cfg *config.Config) erro Format: cfg.Logging.Format, Output: cfg.Logging.Output, }, + Backup: runtimeBackupConfig{ + Time: cfg.Backup.Time, + }, } rendered, err := yaml.Marshal(&runtimeCfg) @@ -416,6 +435,69 @@ func migrateConfigFileToRuntimeShape(configPath string, cfg *config.Config) erro return nil } +func startBackupScheduler(ctx context.Context, cfg *config.Config, dbPath, configPath string) { + if cfg == nil { + return + } + + hour, minute, err := parseBackupTime(cfg.Backup.Time) + if err != nil { + slog.Warn("invalid backup time; using 00:00", "value", cfg.Backup.Time, "error", err) + hour = 0 + minute = 0 + } + + if created, backupErr := appstate.EnsureRotatingLocalBackup(dbPath, configPath); backupErr != nil { + slog.Error("local backup failed", "error", backupErr) + } else if len(created) > 0 { + for _, path := range created { + slog.Info("local backup completed", "archive", path) + } + } + + for { + next := nextBackupTime(time.Now(), hour, minute) + timer := time.NewTimer(time.Until(next)) + + select { + case <-ctx.Done(): + timer.Stop() + return + case <-timer.C: + start := time.Now() + created, backupErr := appstate.EnsureRotatingLocalBackup(dbPath, configPath) + duration := time.Since(start) + if backupErr != nil { + slog.Error("local backup failed", "error", backupErr, "duration", duration) + } else { + for _, path := range created { + slog.Info("local backup completed", "archive", path, "duration", duration) + } + } + } + } +} + +func parseBackupTime(value string) (int, int, error) { + if strings.TrimSpace(value) == "" { + return 0, 0, fmt.Errorf("empty backup time") + } + parsed, err := time.Parse("15:04", value) + if err != nil { + return 0, 0, err + } + return parsed.Hour(), parsed.Minute(), nil +} + +func nextBackupTime(now time.Time, hour, minute int) time.Time { + location := now.Location() + target := time.Date(now.Year(), now.Month(), now.Day(), hour, minute, 0, 0, location) + if !now.Before(target) { + target = target.Add(24 * time.Hour) + } + return target +} + // runSetupMode starts a minimal server that only serves the setup page func runSetupMode(local *localdb.LocalDB) { restartSig := make(chan struct{}, 1) @@ -1336,31 +1418,30 @@ func setupRouter(cfg *config.Config, local *localdb.LocalDB, connMgr *db.Connect }) }) + // GET /api/projects/all - Returns all projects without pagination for UI dropdowns + projects.GET("/all", func(c *gin.Context) { + allProjects, err := projectService.ListByUser(dbUsername, true) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } - // GET /api/projects/all - Returns all projects without pagination for UI dropdowns - projects.GET("/all", func(c *gin.Context) { - allProjects, err := projectService.ListByUser(dbUsername, true) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } + // Return simplified list of all projects (UUID + Name only) + type ProjectSimple struct { + UUID string `json:"uuid"` + Name string `json:"name"` + } - // Return simplified list of all projects (UUID + Name only) - type ProjectSimple struct { - UUID string `json:"uuid"` - Name string `json:"name"` - } + simplified := make([]ProjectSimple, 0, len(allProjects)) + for _, p := range allProjects { + simplified = append(simplified, ProjectSimple{ + UUID: p.UUID, + Name: p.Name, + }) + } - simplified := make([]ProjectSimple, 0, len(allProjects)) - for _, p := range allProjects { - simplified = append(simplified, ProjectSimple{ - UUID: p.UUID, - Name: p.Name, - }) - } - - c.JSON(http.StatusOK, simplified) - }) + c.JSON(http.StatusOK, simplified) + }) projects.POST("", func(c *gin.Context) { var req services.CreateProjectRequest diff --git a/config.example.yaml b/config.example.yaml index 30c5b66..09c18d0 100644 --- a/config.example.yaml +++ b/config.example.yaml @@ -37,6 +37,9 @@ export: max_file_age: "1h" company_name: "Your Company Name" +backup: + time: "00:00" + alerts: enabled: true check_interval: "1h" diff --git a/internal/appstate/backup.go b/internal/appstate/backup.go new file mode 100644 index 0000000..9f51fd6 --- /dev/null +++ b/internal/appstate/backup.go @@ -0,0 +1,273 @@ +package appstate + +import ( + "archive/zip" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +type backupPeriod struct { + name string + retention int + key func(time.Time) string + date func(time.Time) string +} + +var backupPeriods = []backupPeriod{ + { + name: "daily", + retention: 7, + key: func(t time.Time) string { + return t.Format("2006-01-02") + }, + date: func(t time.Time) string { + return t.Format("2006-01-02") + }, + }, + { + name: "weekly", + retention: 4, + key: func(t time.Time) string { + y, w := t.ISOWeek() + return fmt.Sprintf("%04d-W%02d", y, w) + }, + date: func(t time.Time) string { + return t.Format("2006-01-02") + }, + }, + { + name: "monthly", + retention: 12, + key: func(t time.Time) string { + return t.Format("2006-01") + }, + date: func(t time.Time) string { + return t.Format("2006-01-02") + }, + }, + { + name: "yearly", + retention: 10, + key: func(t time.Time) string { + return t.Format("2006") + }, + date: func(t time.Time) string { + return t.Format("2006-01-02") + }, + }, +} + +const ( + envBackupDisable = "QFS_BACKUP_DISABLE" + envBackupDir = "QFS_BACKUP_DIR" +) + +var backupNow = time.Now + +// EnsureRotatingLocalBackup creates or refreshes daily/weekly/monthly/yearly backups +// for the local database and config. It keeps a limited number per period. +func EnsureRotatingLocalBackup(dbPath, configPath string) ([]string, error) { + if isBackupDisabled() { + return nil, nil + } + if dbPath == "" { + return nil, nil + } + + if _, err := os.Stat(dbPath); err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("stat db: %w", err) + } + + root := resolveBackupRoot(dbPath) + now := backupNow() + + created := make([]string, 0) + for _, period := range backupPeriods { + newFiles, err := ensurePeriodBackup(root, period, now, dbPath, configPath) + if err != nil { + return created, err + } + if len(newFiles) > 0 { + created = append(created, newFiles...) + } + } + + return created, nil +} + +func resolveBackupRoot(dbPath string) string { + if fromEnv := strings.TrimSpace(os.Getenv(envBackupDir)); fromEnv != "" { + return filepath.Clean(fromEnv) + } + return filepath.Join(filepath.Dir(dbPath), "backups") +} + +func isBackupDisabled() bool { + val := strings.ToLower(strings.TrimSpace(os.Getenv(envBackupDisable))) + return val == "1" || val == "true" || val == "yes" +} + +func ensurePeriodBackup(root string, period backupPeriod, now time.Time, dbPath, configPath string) ([]string, error) { + key := period.key(now) + periodDir := filepath.Join(root, period.name) + if err := os.MkdirAll(periodDir, 0755); err != nil { + return nil, fmt.Errorf("create %s backup dir: %w", period.name, err) + } + + if hasBackupForKey(periodDir, key) { + return nil, nil + } + + archiveName := fmt.Sprintf("qfs-backp-%s.zip", period.date(now)) + archivePath := filepath.Join(periodDir, archiveName) + + if err := createBackupArchive(archivePath, dbPath, configPath); err != nil { + return nil, fmt.Errorf("create %s backup archive: %w", period.name, err) + } + + if err := writePeriodMarker(periodDir, key); err != nil { + return []string{archivePath}, err + } + + if err := pruneOldBackups(periodDir, period.retention); err != nil { + return []string{archivePath}, err + } + + return []string{archivePath}, nil +} + +func hasBackupForKey(periodDir, key string) bool { + marker := periodMarker{Key: ""} + data, err := os.ReadFile(periodMarkerPath(periodDir)) + if err != nil { + return false + } + if err := json.Unmarshal(data, &marker); err != nil { + return false + } + return marker.Key == key +} + +type periodMarker struct { + Key string `json:"key"` +} + +func periodMarkerPath(periodDir string) string { + return filepath.Join(periodDir, ".period.json") +} + +func writePeriodMarker(periodDir, key string) error { + data, err := json.MarshalIndent(periodMarker{Key: key}, "", " ") + if err != nil { + return err + } + return os.WriteFile(periodMarkerPath(periodDir), data, 0644) +} + +func pruneOldBackups(periodDir string, keep int) error { + entries, err := os.ReadDir(periodDir) + if err != nil { + return fmt.Errorf("read backups dir: %w", err) + } + + files := make([]os.DirEntry, 0, len(entries)) + for _, entry := range entries { + if entry.IsDir() { + continue + } + if strings.HasSuffix(entry.Name(), ".zip") { + files = append(files, entry) + } + } + + if len(files) <= keep { + return nil + } + + sort.Slice(files, func(i, j int) bool { + infoI, errI := files[i].Info() + infoJ, errJ := files[j].Info() + if errI != nil || errJ != nil { + return files[i].Name() < files[j].Name() + } + return infoI.ModTime().Before(infoJ.ModTime()) + }) + + for i := 0; i < len(files)-keep; i++ { + path := filepath.Join(periodDir, files[i].Name()) + if err := os.Remove(path); err != nil { + return fmt.Errorf("remove old backup %s: %w", path, err) + } + } + + return nil +} + +func createBackupArchive(destPath, dbPath, configPath string) error { + file, err := os.Create(destPath) + if err != nil { + return err + } + defer file.Close() + + zipWriter := zip.NewWriter(file) + if err := addZipFile(zipWriter, dbPath); err != nil { + _ = zipWriter.Close() + return err + } + _ = addZipOptionalFile(zipWriter, dbPath+"-wal") + _ = addZipOptionalFile(zipWriter, dbPath+"-shm") + + if strings.TrimSpace(configPath) != "" { + _ = addZipOptionalFile(zipWriter, configPath) + } + + if err := zipWriter.Close(); err != nil { + return err + } + return file.Sync() +} + +func addZipOptionalFile(writer *zip.Writer, path string) error { + if _, err := os.Stat(path); err != nil { + return nil + } + return addZipFile(writer, path) +} + +func addZipFile(writer *zip.Writer, path string) error { + in, err := os.Open(path) + if err != nil { + return err + } + defer in.Close() + + info, err := in.Stat() + if err != nil { + return err + } + + header, err := zip.FileInfoHeader(info) + if err != nil { + return err + } + header.Name = filepath.Base(path) + header.Method = zip.Deflate + + out, err := writer.CreateHeader(header) + if err != nil { + return err + } + + _, err = io.Copy(out, in) + return err +} diff --git a/internal/appstate/backup_test.go b/internal/appstate/backup_test.go new file mode 100644 index 0000000..b07406f --- /dev/null +++ b/internal/appstate/backup_test.go @@ -0,0 +1,83 @@ +package appstate + +import ( + "os" + "path/filepath" + "testing" + "time" +) + +func TestEnsureRotatingLocalBackupCreatesAndRotates(t *testing.T) { + temp := t.TempDir() + dbPath := filepath.Join(temp, "qfs.db") + cfgPath := filepath.Join(temp, "config.yaml") + + if err := os.WriteFile(dbPath, []byte("db"), 0644); err != nil { + t.Fatalf("write db: %v", err) + } + if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil { + t.Fatalf("write config: %v", err) + } + + prevNow := backupNow + defer func() { backupNow = prevNow }() + backupNow = func() time.Time { return time.Date(2026, 2, 11, 10, 0, 0, 0, time.UTC) } + + created, err := EnsureRotatingLocalBackup(dbPath, cfgPath) + if err != nil { + t.Fatalf("backup: %v", err) + } + if len(created) == 0 { + t.Fatalf("expected backup to be created") + } + + dailyArchive := filepath.Join(temp, "backups", "daily", "qfs-backp-2026-02-11.zip") + if _, err := os.Stat(dailyArchive); err != nil { + t.Fatalf("daily archive missing: %v", err) + } + + backupNow = func() time.Time { return time.Date(2026, 2, 12, 10, 0, 0, 0, time.UTC) } + created, err = EnsureRotatingLocalBackup(dbPath, cfgPath) + if err != nil { + t.Fatalf("backup rotate: %v", err) + } + if len(created) == 0 { + t.Fatalf("expected backup to be created for new day") + } + + dailyArchive = filepath.Join(temp, "backups", "daily", "qfs-backp-2026-02-12.zip") + if _, err := os.Stat(dailyArchive); err != nil { + t.Fatalf("daily archive missing after rotate: %v", err) + } +} + +func TestEnsureRotatingLocalBackupEnvControls(t *testing.T) { + temp := t.TempDir() + dbPath := filepath.Join(temp, "qfs.db") + cfgPath := filepath.Join(temp, "config.yaml") + + if err := os.WriteFile(dbPath, []byte("db"), 0644); err != nil { + t.Fatalf("write db: %v", err) + } + if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil { + t.Fatalf("write config: %v", err) + } + + backupRoot := filepath.Join(temp, "custom_backups") + t.Setenv(envBackupDir, backupRoot) + + if _, err := EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil { + t.Fatalf("backup with env: %v", err) + } + if _, err := os.Stat(filepath.Join(backupRoot, "daily", "meta.json")); err != nil { + t.Fatalf("expected backup in custom dir: %v", err) + } + + t.Setenv(envBackupDisable, "1") + if _, err := EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil { + t.Fatalf("backup disabled: %v", err) + } + if _, err := os.Stat(filepath.Join(backupRoot, "daily", "meta.json")); err != nil { + t.Fatalf("backup should remain from previous run: %v", err) + } +} diff --git a/internal/config/config.go b/internal/config/config.go index d0cf829..b0fe72b 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -20,6 +20,7 @@ type Config struct { Alerts AlertsConfig `yaml:"alerts"` Notifications NotificationsConfig `yaml:"notifications"` Logging LoggingConfig `yaml:"logging"` + Backup BackupConfig `yaml:"backup"` } type ServerConfig struct { @@ -101,6 +102,10 @@ type LoggingConfig struct { FilePath string `yaml:"file_path"` } +type BackupConfig struct { + Time string `yaml:"time"` +} + func Load(path string) (*Config, error) { data, err := os.ReadFile(path) if err != nil { @@ -182,6 +187,10 @@ func (c *Config) setDefaults() { if c.Logging.Output == "" { c.Logging.Output = "stdout" } + + if c.Backup.Time == "" { + c.Backup.Time = "00:00" + } } func (c *Config) Address() string { diff --git a/internal/localdb/localdb.go b/internal/localdb/localdb.go index 756e218..8972a95 100644 --- a/internal/localdb/localdb.go +++ b/internal/localdb/localdb.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "git.mchus.pro/mchus/quoteforge/internal/appstate" "git.mchus.pro/mchus/quoteforge/internal/appmeta" "github.com/glebarez/sqlite" mysqlDriver "github.com/go-sql-driver/mysql" @@ -49,6 +50,14 @@ func New(dbPath string) (*LocalDB, error) { return nil, fmt.Errorf("creating data directory: %w", err) } + if cfgPath, err := appstate.ResolveConfigPathNearDB("", dbPath); err == nil { + if _, err := appstate.EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil { + return nil, fmt.Errorf("backup local data: %w", err) + } + } else { + return nil, fmt.Errorf("resolve config path: %w", err) + } + db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{ Logger: logger.Default.LogMode(logger.Silent), }) diff --git a/man/backup.md b/man/backup.md new file mode 100644 index 0000000..0406c46 --- /dev/null +++ b/man/backup.md @@ -0,0 +1,77 @@ +# Backup Policy (QuoteForge) + +## Overview +QuoteForge performs automatic backups of local runtime data on a daily schedule. Backups are stored as ZIP archives and rotated per period (daily/weekly/monthly/yearly). + +The policy is designed to be easily replicated across installations and user environments without additional dependencies. + +## What Gets Backed Up +Each backup archive contains: +- Local SQLite database file (`qfs.db`) +- SQLite sidecar files (`qfs.db-wal`, `qfs.db-shm`) if present +- Runtime config file (`config.yaml`) if present + +## Schedule +Backups run once per day at a configured time. +- Config key: `backup.time` +- Default: `00:00` +- Format: `HH:MM` (24-hour local time) + +## Storage Location +Default location is next to the local DB: +``` +/backups/ +``` +Period-specific subdirectories: +``` +backups/daily +backups/weekly +backups/monthly +backups/yearly +``` + +Optional override: +- `QFS_BACKUP_DIR` — absolute or relative path to the backup root + +## Naming Convention +Backup files are ZIP archives named by creation date: +``` +qfs-backp-YYYY-MM-DD.zip +``` +Each period keeps its own copy under its directory. + +## Retention +Rotation keeps a fixed number of archives per period: +- Daily: 7 archives +- Weekly: 4 archives +- Monthly: 12 archives +- Yearly: 10 archives + +Older archives beyond these limits are deleted automatically. + +## Period Dedupe +A marker file stored inside each period directory tracks the last backup key to avoid duplicate backups within the same period. +- File: `.period.json` +- Content: `{ "key": "" }` + +## Logs +Successful backup creation is logged with the archive path: +``` +local backup completed archive=/path/to/.../qfs-backp-YYYY-MM-DD.zip duration=... +``` + +Failures are logged with: +``` +local backup failed error=... duration=... +``` + +## Disable Backups +Set environment variable: +``` +QFS_BACKUP_DISABLE=1 +``` +Accepted values: `1`, `true`, `yes` (case-insensitive). + +## Notes +- Backups are performed on startup if the current period has no backup. +- All paths are resolved relative to runtime DB/config paths.