394 lines
8.6 KiB
Go
394 lines
8.6 KiB
Go
package appstate
|
|
|
|
import (
|
|
"archive/zip"
|
|
"encoding/json"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"sort"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/glebarez/sqlite"
|
|
"gorm.io/gorm"
|
|
"gorm.io/gorm/logger"
|
|
)
|
|
|
|
type backupPeriod struct {
|
|
name string
|
|
retention int
|
|
key func(time.Time) string
|
|
date func(time.Time) string
|
|
}
|
|
|
|
var backupPeriods = []backupPeriod{
|
|
{
|
|
name: "daily",
|
|
retention: 7,
|
|
key: func(t time.Time) string {
|
|
return t.Format("2006-01-02")
|
|
},
|
|
date: func(t time.Time) string {
|
|
return t.Format("2006-01-02")
|
|
},
|
|
},
|
|
{
|
|
name: "weekly",
|
|
retention: 4,
|
|
key: func(t time.Time) string {
|
|
y, w := t.ISOWeek()
|
|
return fmt.Sprintf("%04d-W%02d", y, w)
|
|
},
|
|
date: func(t time.Time) string {
|
|
return t.Format("2006-01-02")
|
|
},
|
|
},
|
|
{
|
|
name: "monthly",
|
|
retention: 12,
|
|
key: func(t time.Time) string {
|
|
return t.Format("2006-01")
|
|
},
|
|
date: func(t time.Time) string {
|
|
return t.Format("2006-01-02")
|
|
},
|
|
},
|
|
{
|
|
name: "yearly",
|
|
retention: 10,
|
|
key: func(t time.Time) string {
|
|
return t.Format("2006")
|
|
},
|
|
date: func(t time.Time) string {
|
|
return t.Format("2006-01-02")
|
|
},
|
|
},
|
|
}
|
|
|
|
const (
|
|
envBackupDisable = "QFS_BACKUP_DISABLE"
|
|
envBackupDir = "QFS_BACKUP_DIR"
|
|
)
|
|
|
|
var backupNow = time.Now
|
|
|
|
// EnsureRotatingLocalBackup creates or refreshes daily/weekly/monthly/yearly backups
|
|
// for the local database and config. It keeps a limited number per period.
|
|
func EnsureRotatingLocalBackup(dbPath, configPath string) ([]string, error) {
|
|
if isBackupDisabled() {
|
|
return nil, nil
|
|
}
|
|
if dbPath == "" {
|
|
return nil, nil
|
|
}
|
|
|
|
if _, err := os.Stat(dbPath); err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil, nil
|
|
}
|
|
return nil, fmt.Errorf("stat db: %w", err)
|
|
}
|
|
|
|
root := resolveBackupRoot(dbPath)
|
|
if err := validateBackupRoot(root); err != nil {
|
|
return nil, err
|
|
}
|
|
now := backupNow()
|
|
|
|
created := make([]string, 0)
|
|
for _, period := range backupPeriods {
|
|
newFiles, err := ensurePeriodBackup(root, period, now, dbPath, configPath)
|
|
if err != nil {
|
|
return created, err
|
|
}
|
|
if len(newFiles) > 0 {
|
|
created = append(created, newFiles...)
|
|
}
|
|
}
|
|
|
|
return created, nil
|
|
}
|
|
|
|
func resolveBackupRoot(dbPath string) string {
|
|
if fromEnv := strings.TrimSpace(os.Getenv(envBackupDir)); fromEnv != "" {
|
|
return filepath.Clean(fromEnv)
|
|
}
|
|
return filepath.Join(filepath.Dir(dbPath), "backups")
|
|
}
|
|
|
|
func validateBackupRoot(root string) error {
|
|
absRoot, err := filepath.Abs(root)
|
|
if err != nil {
|
|
return fmt.Errorf("resolve backup root: %w", err)
|
|
}
|
|
|
|
if gitRoot, ok := findGitWorktreeRoot(absRoot); ok {
|
|
return fmt.Errorf("backup root must stay outside git worktree: %s is inside %s", absRoot, gitRoot)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func findGitWorktreeRoot(path string) (string, bool) {
|
|
current := filepath.Clean(path)
|
|
info, err := os.Stat(current)
|
|
if err == nil && !info.IsDir() {
|
|
current = filepath.Dir(current)
|
|
}
|
|
|
|
for {
|
|
gitPath := filepath.Join(current, ".git")
|
|
if _, err := os.Stat(gitPath); err == nil {
|
|
return current, true
|
|
}
|
|
|
|
parent := filepath.Dir(current)
|
|
if parent == current {
|
|
return "", false
|
|
}
|
|
current = parent
|
|
}
|
|
}
|
|
|
|
func isBackupDisabled() bool {
|
|
val := strings.ToLower(strings.TrimSpace(os.Getenv(envBackupDisable)))
|
|
return val == "1" || val == "true" || val == "yes"
|
|
}
|
|
|
|
func ensurePeriodBackup(root string, period backupPeriod, now time.Time, dbPath, configPath string) ([]string, error) {
|
|
key := period.key(now)
|
|
periodDir := filepath.Join(root, period.name)
|
|
if err := os.MkdirAll(periodDir, 0755); err != nil {
|
|
return nil, fmt.Errorf("create %s backup dir: %w", period.name, err)
|
|
}
|
|
|
|
if hasBackupForKey(periodDir, key) {
|
|
return nil, nil
|
|
}
|
|
|
|
archiveName := fmt.Sprintf("qfs-backp-%s.zip", period.date(now))
|
|
archivePath := filepath.Join(periodDir, archiveName)
|
|
|
|
if err := createBackupArchive(archivePath, dbPath, configPath); err != nil {
|
|
return nil, fmt.Errorf("create %s backup archive: %w", period.name, err)
|
|
}
|
|
|
|
if err := writePeriodMarker(periodDir, key); err != nil {
|
|
return []string{archivePath}, err
|
|
}
|
|
|
|
if err := pruneOldBackups(periodDir, period.retention); err != nil {
|
|
return []string{archivePath}, err
|
|
}
|
|
|
|
return []string{archivePath}, nil
|
|
}
|
|
|
|
func hasBackupForKey(periodDir, key string) bool {
|
|
marker := periodMarker{Key: ""}
|
|
data, err := os.ReadFile(periodMarkerPath(periodDir))
|
|
if err != nil {
|
|
return false
|
|
}
|
|
if err := json.Unmarshal(data, &marker); err != nil {
|
|
return false
|
|
}
|
|
return marker.Key == key
|
|
}
|
|
|
|
type periodMarker struct {
|
|
Key string `json:"key"`
|
|
}
|
|
|
|
func periodMarkerPath(periodDir string) string {
|
|
return filepath.Join(periodDir, ".period.json")
|
|
}
|
|
|
|
func writePeriodMarker(periodDir, key string) error {
|
|
data, err := json.MarshalIndent(periodMarker{Key: key}, "", " ")
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return os.WriteFile(periodMarkerPath(periodDir), data, 0644)
|
|
}
|
|
|
|
func pruneOldBackups(periodDir string, keep int) error {
|
|
entries, err := os.ReadDir(periodDir)
|
|
if err != nil {
|
|
return fmt.Errorf("read backups dir: %w", err)
|
|
}
|
|
|
|
files := make([]os.DirEntry, 0, len(entries))
|
|
for _, entry := range entries {
|
|
if entry.IsDir() {
|
|
continue
|
|
}
|
|
if strings.HasSuffix(entry.Name(), ".zip") {
|
|
files = append(files, entry)
|
|
}
|
|
}
|
|
|
|
if len(files) <= keep {
|
|
return nil
|
|
}
|
|
|
|
sort.Slice(files, func(i, j int) bool {
|
|
infoI, errI := files[i].Info()
|
|
infoJ, errJ := files[j].Info()
|
|
if errI != nil || errJ != nil {
|
|
return files[i].Name() < files[j].Name()
|
|
}
|
|
return infoI.ModTime().Before(infoJ.ModTime())
|
|
})
|
|
|
|
for i := 0; i < len(files)-keep; i++ {
|
|
path := filepath.Join(periodDir, files[i].Name())
|
|
if err := os.Remove(path); err != nil {
|
|
return fmt.Errorf("remove old backup %s: %w", path, err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func createBackupArchive(destPath, dbPath, configPath string) error {
|
|
snapshotPath, cleanup, err := createSQLiteSnapshot(dbPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer cleanup()
|
|
|
|
file, err := os.Create(destPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer file.Close()
|
|
|
|
zipWriter := zip.NewWriter(file)
|
|
if err := addZipFileAs(zipWriter, snapshotPath, filepath.Base(dbPath)); err != nil {
|
|
_ = zipWriter.Close()
|
|
return err
|
|
}
|
|
|
|
if strings.TrimSpace(configPath) != "" {
|
|
_ = addZipOptionalFile(zipWriter, configPath)
|
|
}
|
|
|
|
if err := zipWriter.Close(); err != nil {
|
|
return err
|
|
}
|
|
return file.Sync()
|
|
}
|
|
|
|
func createSQLiteSnapshot(dbPath string) (string, func(), error) {
|
|
tempFile, err := os.CreateTemp("", "qfs-backup-*.db")
|
|
if err != nil {
|
|
return "", func() {}, err
|
|
}
|
|
tempPath := tempFile.Name()
|
|
if err := tempFile.Close(); err != nil {
|
|
_ = os.Remove(tempPath)
|
|
return "", func() {}, err
|
|
}
|
|
if err := os.Remove(tempPath); err != nil && !os.IsNotExist(err) {
|
|
return "", func() {}, err
|
|
}
|
|
|
|
cleanup := func() {
|
|
_ = os.Remove(tempPath)
|
|
}
|
|
|
|
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
Logger: logger.Default.LogMode(logger.Silent),
|
|
})
|
|
if err != nil {
|
|
cleanup()
|
|
return "", func() {}, err
|
|
}
|
|
|
|
sqlDB, err := db.DB()
|
|
if err != nil {
|
|
cleanup()
|
|
return "", func() {}, err
|
|
}
|
|
defer sqlDB.Close()
|
|
|
|
if err := db.Exec("PRAGMA busy_timeout = 5000").Error; err != nil {
|
|
cleanup()
|
|
return "", func() {}, fmt.Errorf("configure sqlite busy_timeout: %w", err)
|
|
}
|
|
|
|
literalPath := strings.ReplaceAll(tempPath, "'", "''")
|
|
if err := vacuumIntoWithRetry(db, literalPath); err != nil {
|
|
cleanup()
|
|
return "", func() {}, err
|
|
}
|
|
|
|
return tempPath, cleanup, nil
|
|
}
|
|
|
|
func vacuumIntoWithRetry(db *gorm.DB, literalPath string) error {
|
|
var lastErr error
|
|
for attempt := 0; attempt < 3; attempt++ {
|
|
if err := db.Exec("VACUUM INTO '" + literalPath + "'").Error; err != nil {
|
|
lastErr = err
|
|
if !isSQLiteBusyError(err) {
|
|
return fmt.Errorf("create sqlite snapshot: %w", err)
|
|
}
|
|
time.Sleep(time.Duration(attempt+1) * 250 * time.Millisecond)
|
|
continue
|
|
}
|
|
return nil
|
|
}
|
|
return fmt.Errorf("create sqlite snapshot after retries: %w", lastErr)
|
|
}
|
|
|
|
func isSQLiteBusyError(err error) bool {
|
|
if err == nil {
|
|
return false
|
|
}
|
|
lower := strings.ToLower(err.Error())
|
|
return strings.Contains(lower, "database is locked") || strings.Contains(lower, "database is busy")
|
|
}
|
|
|
|
func addZipOptionalFile(writer *zip.Writer, path string) error {
|
|
if _, err := os.Stat(path); err != nil {
|
|
return nil
|
|
}
|
|
return addZipFile(writer, path)
|
|
}
|
|
|
|
func addZipFile(writer *zip.Writer, path string) error {
|
|
return addZipFileAs(writer, path, filepath.Base(path))
|
|
}
|
|
|
|
func addZipFileAs(writer *zip.Writer, path string, archiveName string) error {
|
|
in, err := os.Open(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer in.Close()
|
|
|
|
info, err := in.Stat()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
header, err := zip.FileInfoHeader(info)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
header.Name = archiveName
|
|
header.Method = zip.Deflate
|
|
|
|
out, err := writer.CreateHeader(header)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, err = io.Copy(out, in)
|
|
return err
|
|
}
|