Compare commits
21 Commits
v1.6.2
...
65871a8b04
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
65871a8b04 | ||
|
|
b27152b353 | ||
|
|
2e69089bd5 | ||
|
|
be1c962fec | ||
|
|
57215cb7b3 | ||
|
|
31dce9c721 | ||
|
|
06d0e8b14b | ||
|
|
b1b50ce2ef | ||
|
|
6ab1e9899e | ||
|
|
a1d21927a3 | ||
|
|
a90c07c879 | ||
|
|
e9307c4bad | ||
|
|
1b48401828 | ||
|
|
4a86f7b7ba | ||
|
|
955467fbea | ||
|
|
9ddffe48e9 | ||
|
|
4732605925 | ||
|
|
d318a7f462 | ||
|
|
1bec110d91 | ||
|
|
6392e4b4a9 | ||
|
|
8f7defdb8a |
@@ -1,5 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
repo_root="$(git rev-parse --show-toplevel)"
|
|
||||||
"$repo_root/scripts/check-secrets.sh"
|
|
||||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -1,16 +1,5 @@
|
|||||||
# QuoteForge
|
# QuoteForge
|
||||||
config.yaml
|
config.yaml
|
||||||
.env
|
|
||||||
.env.*
|
|
||||||
*.pem
|
|
||||||
*.key
|
|
||||||
*.p12
|
|
||||||
*.pfx
|
|
||||||
*.crt
|
|
||||||
id_rsa
|
|
||||||
id_rsa.*
|
|
||||||
secrets.yaml
|
|
||||||
secrets.yml
|
|
||||||
|
|
||||||
# Local SQLite database (contains encrypted credentials)
|
# Local SQLite database (contains encrypted credentials)
|
||||||
/data/*.db
|
/data/*.db
|
||||||
@@ -23,7 +12,6 @@ secrets.yml
|
|||||||
/importer
|
/importer
|
||||||
/cron
|
/cron
|
||||||
/bin/
|
/bin/
|
||||||
qfs
|
|
||||||
|
|
||||||
# Local Go build cache used in sandboxed runs
|
# Local Go build cache used in sandboxed runs
|
||||||
.gocache/
|
.gocache/
|
||||||
@@ -75,12 +63,4 @@ Network Trash Folder
|
|||||||
Temporary Items
|
Temporary Items
|
||||||
.apdisk
|
.apdisk
|
||||||
|
|
||||||
# Release artifacts (binaries, archives, checksums), but keep markdown notes tracked
|
releases/
|
||||||
releases/*
|
|
||||||
!releases/README.md
|
|
||||||
!releases/memory/
|
|
||||||
!releases/memory/**
|
|
||||||
!releases/**/
|
|
||||||
releases/**/*
|
|
||||||
!releases/README.md
|
|
||||||
!releases/*/RELEASE_NOTES.md
|
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
|||||||
[submodule "bible"]
|
|
||||||
path = bible
|
|
||||||
url = https://git.mchus.pro/mchus/bible.git
|
|
||||||
11
AGENTS.md
11
AGENTS.md
@@ -1,11 +0,0 @@
|
|||||||
# QuoteForge — Instructions for Codex
|
|
||||||
|
|
||||||
## Shared Engineering Rules
|
|
||||||
Read `bible/` — shared rules for all projects (CSV, logging, DB, tables, background tasks, code style).
|
|
||||||
Start with `bible/rules/patterns/` for specific contracts.
|
|
||||||
|
|
||||||
## Project Architecture
|
|
||||||
Read `bible-local/` — QuoteForge specific architecture.
|
|
||||||
Read order: `bible-local/README.md` → relevant files for the task.
|
|
||||||
|
|
||||||
Every architectural decision specific to this project must be recorded in `bible-local/`.
|
|
||||||
168
CLAUDE.md
168
CLAUDE.md
@@ -1,17 +1,163 @@
|
|||||||
# QuoteForge — Instructions for Claude
|
# QuoteForge - Claude Code Instructions
|
||||||
|
|
||||||
## Shared Engineering Rules
|
## Overview
|
||||||
Read `bible/` — shared rules for all projects (CSV, logging, DB, tables, background tasks, code style).
|
Корпоративный конфигуратор серверов и формирование КП. MariaDB (RFQ_LOG) + SQLite для оффлайн.
|
||||||
Start with `bible/rules/patterns/` for specific contracts.
|
|
||||||
|
|
||||||
## Project Architecture
|
## Development Phases
|
||||||
Read `bible-local/` — QuoteForge specific architecture.
|
|
||||||
Read order: `bible-local/README.md` → relevant files for the task.
|
|
||||||
|
|
||||||
Every architectural decision specific to this project must be recorded in `bible-local/`.
|
### Phase 1: Pricelists in MariaDB ✅ DONE
|
||||||
|
### Phase 2: Local SQLite Database ✅ DONE
|
||||||
|
|
||||||
|
### Phase 2.5: Full Offline Mode 🔶 IN PROGRESS
|
||||||
|
**Local-first architecture:** приложение ВСЕГДА работает с SQLite, MariaDB только для синхронизации.
|
||||||
|
|
||||||
|
**Принцип работы:**
|
||||||
|
- ВСЕ операции (CRUD) выполняются в SQLite
|
||||||
|
- При создании конфигурации:
|
||||||
|
1. Если online → проверить новые прайслисты на сервере → скачать если есть
|
||||||
|
2. Далее работаем с local_pricelists (и online, и offline одинаково)
|
||||||
|
- Background sync: push pending_changes → pull updates
|
||||||
|
|
||||||
|
**DONE:**
|
||||||
|
- ✅ Sync queue table (pending_changes) - `internal/localdb/models.go`
|
||||||
|
- ✅ Model converters: MariaDB ↔ SQLite - `internal/localdb/converters.go`
|
||||||
|
- ✅ LocalConfigurationService: все CRUD через SQLite - `internal/services/local_configuration.go`
|
||||||
|
- ✅ Pre-create pricelist check: `SyncPricelistsIfNeeded()` - `internal/services/sync/service.go`
|
||||||
|
- ✅ Push pending changes: `PushPendingChanges()` - sync service + handlers
|
||||||
|
- ✅ Sync API endpoints: `/api/sync/push`, `/pending/count`, `/pending`
|
||||||
|
- ✅ Integrate LocalConfigurationService in main.go (replace ConfigurationService)
|
||||||
|
- ✅ Add routes for new sync endpoints (`/api/sync/push`, `/pending/count`, `/pending`)
|
||||||
|
- ✅ ConfigurationGetter interface for handler compatibility
|
||||||
|
- ✅ Background sync worker: auto-sync every 5min (push + pull) - `internal/services/sync/worker.go`
|
||||||
|
- ✅ UI: sync status indicator (pending badge + sync button + offline/online dot) - `web/templates/partials/sync_status.html`
|
||||||
|
- ✅ RefreshPrices for local mode:
|
||||||
|
- `RefreshPrices()` / `RefreshPricesNoAuth()` в `local_configuration.go`
|
||||||
|
- Берёт цены из `local_components.current_price`
|
||||||
|
- Graceful degradation при отсутствии компонента
|
||||||
|
- Добавлено поле `price_updated_at` в `LocalConfiguration` (models.go:72)
|
||||||
|
- Обновлены converters для PriceUpdatedAt
|
||||||
|
- UI кнопка "Пересчитать цену" работает offline/online
|
||||||
|
- ✅ Fixed sync bugs:
|
||||||
|
- Duplicate entry error при update конфигураций (`sync/service.go:334-365`)
|
||||||
|
- pushConfigurationUpdate теперь проверяет наличие server_id перед update
|
||||||
|
- Если нет ID → получает из LocalConfiguration.ServerID или ищет на сервере
|
||||||
|
- Fixed setup.go: `settings.Password` → `settings.PasswordEncrypted`
|
||||||
|
|
||||||
|
**TODO:**
|
||||||
|
- ❌ Conflict resolution (Phase 4, last-write-wins default)
|
||||||
|
|
||||||
|
### UI Improvements ✅ MOSTLY DONE
|
||||||
|
|
||||||
|
**1. Sync UI + pricelist badge: ✅ DONE**
|
||||||
|
- ✅ `sync_status.html`: SVG иконки Online/Offline (кликабельные → открывают модал)
|
||||||
|
- ✅ Кнопка sync → иконка circular arrows (только full sync)
|
||||||
|
- ✅ Модальное окно "Статус системы" в `base.html` (info о БД, ошибки синхронизации)
|
||||||
|
- ✅ `configs.html`: badge с версией активного прайслиста
|
||||||
|
- ✅ Загрузка через `/api/pricelists/latest` при DOMContentLoaded
|
||||||
|
- ✅ Удалён dropdown с Push changes (упрощение UI)
|
||||||
|
|
||||||
|
**2. Прайслисты → вкладка в "Администратор цен": ✅ DONE**
|
||||||
|
- ✅ `base.html`: убрана ссылка "Прайслисты" из навигации
|
||||||
|
- ✅ `admin_pricing.html`: добавлена вкладка "Прайслисты"
|
||||||
|
- ✅ Логика перенесена из `pricelists.html` (table, create modal, CRUD)
|
||||||
|
- ✅ Route `/pricelists` → редирект на `/admin/pricing?tab=pricelists`
|
||||||
|
- ✅ Поддержка URL param `?tab=pricelists`
|
||||||
|
|
||||||
|
**3. Модал "Настройка цены" - кол-во котировок с учётом периода: ❌ TODO**
|
||||||
|
- Текущее: показывает только общее кол-во котировок
|
||||||
|
- Новое: показывать `N (всего: M)` где N - за выбранный период, M - всего
|
||||||
|
- ❌ `admin_pricing.html`: обновить `#modal-quote-count`
|
||||||
|
- ❌ `admin_pricing_handler.go`: в `/api/admin/pricing/preview` возвращать `quote_count_period` + `quote_count_total`
|
||||||
|
|
||||||
|
**4. Страница настроек: ❌ ОТЛОЖЕНО**
|
||||||
|
- Перенесено в Phase 3 (после основных UI улучшений)
|
||||||
|
|
||||||
|
### Phase 3: Projects and Specifications
|
||||||
|
- qt_projects, qt_specifications tables (MariaDB)
|
||||||
|
- Replace qt_configurations → Project/Specification hierarchy
|
||||||
|
- Fields: opty, customer_requirement, variant, qty, rev
|
||||||
|
- Local projects/specs with server sync
|
||||||
|
|
||||||
|
### Phase 4: Price Versioning
|
||||||
|
- Bind specifications to pricelist versions
|
||||||
|
- Price diff comparison
|
||||||
|
- Auto-cleanup expired pricelists (>1 year, usage_count=0)
|
||||||
|
|
||||||
|
## Tech Stack
|
||||||
|
Go 1.22+ | Gin | GORM | MariaDB 11 | SQLite (glebarez/sqlite) | htmx + Tailwind CDN | excelize
|
||||||
|
|
||||||
|
## Key Tables
|
||||||
|
|
||||||
|
### READ-ONLY (external systems)
|
||||||
|
- `lot` (lot_name PK, lot_description)
|
||||||
|
- `lot_log` (lot, supplier, date, price, quality, comments)
|
||||||
|
- `supplier` (supplier_name PK)
|
||||||
|
|
||||||
|
### MariaDB (qt_* prefix)
|
||||||
|
- `qt_lot_metadata` - component prices, methods, popularity
|
||||||
|
- `qt_categories` - category codes and names
|
||||||
|
- `qt_pricelists` - version snapshots (YYYY-MM-DD-NNN format)
|
||||||
|
- `qt_pricelist_items` - prices per pricelist
|
||||||
|
- `qt_projects` - uuid, opty, customer_requirement, name (Phase 3)
|
||||||
|
- `qt_specifications` - project_id, pricelist_id, variant, rev, qty, items JSON (Phase 3)
|
||||||
|
|
||||||
|
### SQLite (data/quoteforge.db)
|
||||||
|
- `connection_settings` - encrypted DB credentials (PasswordEncrypted field)
|
||||||
|
- `local_pricelists/items` - cached from server
|
||||||
|
- `local_components` - lot cache for offline search (with current_price)
|
||||||
|
- `local_configurations` - UUID, items, price_updated_at, sync_status (pending/synced/conflict), server_id
|
||||||
|
- `local_projects/specifications` - Phase 3
|
||||||
|
- `pending_changes` - sync queue (entity_type, uuid, op, payload, created_at, attempts, last_error)
|
||||||
|
|
||||||
|
## Business Logic
|
||||||
|
|
||||||
|
**Part number parsing:** `CPU_AMD_9654` → category=`CPU`, model=`AMD_9654`
|
||||||
|
|
||||||
|
**Price methods:** manual | median | average | weighted_median
|
||||||
|
|
||||||
|
**Price freshness:** fresh (<30d, ≥3 quotes) | normal (<60d) | stale (<90d) | critical
|
||||||
|
|
||||||
|
**Pricelist version:** `YYYY-MM-DD-NNN` (e.g., `2024-01-31-001`)
|
||||||
|
|
||||||
|
## API Endpoints
|
||||||
|
|
||||||
|
| Group | Endpoints |
|
||||||
|
|-------|-----------|
|
||||||
|
| Setup | GET/POST /setup, POST /setup/test |
|
||||||
|
| Components | GET /api/components, /api/categories |
|
||||||
|
| Pricelists | CRUD /api/pricelists, GET /latest, POST /compare |
|
||||||
|
| Projects | CRUD /api/projects/:uuid (Phase 3) |
|
||||||
|
| Specs | CRUD /api/specs/:uuid, POST /upgrade, GET /diff (Phase 3) |
|
||||||
|
| Configs | POST /:uuid/refresh-prices (обновить цены из local_components) |
|
||||||
|
| Sync | GET /status, POST /components, /pricelists, /push, /pull, /resolve-conflict |
|
||||||
|
| Export | GET /api/specs/:uuid/export, /api/projects/:uuid/export |
|
||||||
|
|
||||||
|
## Commands
|
||||||
```bash
|
```bash
|
||||||
go build ./cmd/qfs && go vet ./... # verify
|
# Development
|
||||||
go run ./cmd/qfs # run
|
go run ./cmd/qfs # Dev server
|
||||||
make build-release # release build
|
make run # Dev server (via Makefile)
|
||||||
|
|
||||||
|
# Production build
|
||||||
|
make build-release # Optimized build with version (recommended)
|
||||||
|
VERSION=$(git describe --tags --always --dirty)
|
||||||
|
CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$VERSION" -o bin/qfs ./cmd/qfs
|
||||||
|
|
||||||
|
# Cron jobs
|
||||||
|
go run ./cmd/cron -job=cleanup-pricelists # Remove old unused pricelists
|
||||||
|
go run ./cmd/cron -job=update-prices # Recalculate all prices
|
||||||
|
go run ./cmd/cron -job=update-popularity # Update popularity scores
|
||||||
|
|
||||||
|
# Check version
|
||||||
|
./bin/qfs -version
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Code Style
|
||||||
|
- gofmt, structured logging (slog), wrap errors with context
|
||||||
|
- snake_case files, PascalCase types
|
||||||
|
- RBAC disabled: DB username = user_id via `models.EnsureDBUser()`
|
||||||
|
|
||||||
|
## UI Guidelines
|
||||||
|
- htmx (hx-get/post/target/swap), Tailwind CDN
|
||||||
|
- Freshness colors: green (fresh) → yellow → orange → red (critical)
|
||||||
|
- Sync status + offline indicator in header
|
||||||
|
|||||||
178
LOCAL_FIRST_INTEGRATION.md
Normal file
178
LOCAL_FIRST_INTEGRATION.md
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
# Local-First Architecture Integration Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
QuoteForge теперь поддерживает local-first архитектуру: приложение ВСЕГДА работает с SQLite (localdb), MariaDB используется только для синхронизации.
|
||||||
|
|
||||||
|
## Реализованные компоненты
|
||||||
|
|
||||||
|
### 1. Конвертеры моделей (`internal/localdb/converters.go`)
|
||||||
|
|
||||||
|
Конвертеры между MariaDB и SQLite моделями:
|
||||||
|
- `ConfigurationToLocal()` / `LocalToConfiguration()`
|
||||||
|
- `PricelistToLocal()` / `LocalToPricelist()`
|
||||||
|
- `ComponentToLocal()` / `LocalToComponent()`
|
||||||
|
|
||||||
|
### 2. LocalDB методы (`internal/localdb/localdb.go`)
|
||||||
|
|
||||||
|
Добавлены методы для работы с pending changes:
|
||||||
|
- `MarkChangesSynced(ids []int64)` - помечает изменения как синхронизированные
|
||||||
|
- `GetPendingCount()` - возвращает количество несинхронизированных изменений
|
||||||
|
|
||||||
|
### 3. Sync Service расширения (`internal/services/sync/service.go`)
|
||||||
|
|
||||||
|
Новые методы:
|
||||||
|
- `SyncPricelistsIfNeeded()` - проверяет и скачивает новые прайслисты при необходимости
|
||||||
|
- `PushPendingChanges()` - отправляет все pending changes на сервер
|
||||||
|
- `pushSingleChange()` - обрабатывает один pending change
|
||||||
|
- `pushConfigurationCreate/Update/Delete()` - специфичные методы для конфигураций
|
||||||
|
|
||||||
|
**ВАЖНО**: Конструктор изменен - теперь требует `ConfigurationRepository`:
|
||||||
|
```go
|
||||||
|
syncService := sync.NewService(pricelistRepo, configRepo, local)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. LocalConfigurationService (`internal/services/local_configuration.go`)
|
||||||
|
|
||||||
|
Новый сервис для работы с конфигурациями в local-first режиме:
|
||||||
|
- Все операции CRUD работают через SQLite
|
||||||
|
- Автоматически добавляет изменения в pending_changes
|
||||||
|
- При создании конфигурации (если online) проверяет новые прайслисты
|
||||||
|
|
||||||
|
```go
|
||||||
|
localConfigService := services.NewLocalConfigurationService(
|
||||||
|
localDB,
|
||||||
|
syncService,
|
||||||
|
quoteService,
|
||||||
|
isOnlineFunc,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Sync Handler расширения (`internal/handlers/sync.go`)
|
||||||
|
|
||||||
|
Новые endpoints:
|
||||||
|
- `POST /api/sync/push` - отправить pending changes на сервер
|
||||||
|
- `GET /api/sync/pending/count` - получить количество pending changes
|
||||||
|
- `GET /api/sync/pending` - получить список pending changes
|
||||||
|
|
||||||
|
## Интеграция
|
||||||
|
|
||||||
|
### Шаг 1: Обновить main.go
|
||||||
|
|
||||||
|
```go
|
||||||
|
// В cmd/qfs/main.go
|
||||||
|
syncService := sync.NewService(pricelistRepo, configRepo, local)
|
||||||
|
|
||||||
|
// Создать isOnline функцию
|
||||||
|
isOnlineFunc := func() bool {
|
||||||
|
sqlDB, err := db.DB()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return sqlDB.Ping() == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Создать LocalConfigurationService
|
||||||
|
localConfigService := services.NewLocalConfigurationService(
|
||||||
|
local,
|
||||||
|
syncService,
|
||||||
|
quoteService,
|
||||||
|
isOnlineFunc,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Шаг 2: Обновить ConfigurationHandler
|
||||||
|
|
||||||
|
Заменить `ConfigurationService` на `LocalConfigurationService` в handlers:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Было:
|
||||||
|
configHandler := handlers.NewConfigurationHandler(configService, exportService)
|
||||||
|
|
||||||
|
// Стало:
|
||||||
|
configHandler := handlers.NewConfigurationHandler(localConfigService, exportService)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Шаг 3: Добавить endpoints для sync
|
||||||
|
|
||||||
|
В роутере добавить:
|
||||||
|
```go
|
||||||
|
syncGroup := router.Group("/api/sync")
|
||||||
|
{
|
||||||
|
syncGroup.POST("/push", syncHandler.PushPendingChanges)
|
||||||
|
syncGroup.GET("/pending/count", syncHandler.GetPendingCount)
|
||||||
|
syncGroup.GET("/pending", syncHandler.GetPendingChanges)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Как это работает
|
||||||
|
|
||||||
|
### Создание конфигурации
|
||||||
|
|
||||||
|
1. Пользователь создает конфигурацию
|
||||||
|
2. `LocalConfigurationService.Create()`:
|
||||||
|
- Если online → `SyncPricelistsIfNeeded()` проверяет новые прайслисты
|
||||||
|
- Сохраняет конфигурацию в SQLite
|
||||||
|
- Добавляет в `pending_changes` с operation="create"
|
||||||
|
3. Конфигурация доступна локально сразу
|
||||||
|
|
||||||
|
### Синхронизация с сервером
|
||||||
|
|
||||||
|
**Manual sync:**
|
||||||
|
```bash
|
||||||
|
POST /api/sync/push
|
||||||
|
```
|
||||||
|
|
||||||
|
**Background sync (TODO):**
|
||||||
|
- Периодический worker вызывает `syncService.PushPendingChanges()`
|
||||||
|
- Проверяет online статус
|
||||||
|
- Отправляет все pending changes на сервер
|
||||||
|
- Удаляет успешно синхронизированные записи
|
||||||
|
|
||||||
|
### Offline режим
|
||||||
|
|
||||||
|
1. Все операции работают нормально через SQLite
|
||||||
|
2. Изменения копятся в `pending_changes`
|
||||||
|
3. При восстановлении соединения автоматически синхронизируются
|
||||||
|
|
||||||
|
## Pending Changes Queue
|
||||||
|
|
||||||
|
Таблица `pending_changes`:
|
||||||
|
```go
|
||||||
|
type PendingChange struct {
|
||||||
|
ID int64 // Auto-increment
|
||||||
|
EntityType string // "configuration", "project", "specification"
|
||||||
|
EntityUUID string // UUID сущности
|
||||||
|
Operation string // "create", "update", "delete"
|
||||||
|
Payload string // JSON snapshot сущности
|
||||||
|
CreatedAt time.Time
|
||||||
|
Attempts int // Счетчик попыток синхронизации
|
||||||
|
LastError string // Последняя ошибка синхронизации
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## TODO для Phase 2.5
|
||||||
|
|
||||||
|
- [ ] Background sync worker (автоматическая синхронизация каждые N минут)
|
||||||
|
- [ ] Conflict resolution (при конфликтах обновления)
|
||||||
|
- [ ] UI: pending counter в header
|
||||||
|
- [ ] UI: manual sync button
|
||||||
|
- [ ] UI: conflict alerts
|
||||||
|
- [ ] Retry logic для failed pending changes
|
||||||
|
- [ ] RefreshPrices для local mode (через local_components)
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Compile
|
||||||
|
go build ./cmd/qfs
|
||||||
|
|
||||||
|
# Run
|
||||||
|
./quoteforge
|
||||||
|
|
||||||
|
# Check pending changes
|
||||||
|
curl http://localhost:8080/api/sync/pending/count
|
||||||
|
|
||||||
|
# Manual sync
|
||||||
|
curl -X POST http://localhost:8080/api/sync/push
|
||||||
|
```
|
||||||
121
MIGRATION_PRICE_REFRESH.md
Normal file
121
MIGRATION_PRICE_REFRESH.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Миграция: Функционал пересчета цен в конфигураторе
|
||||||
|
|
||||||
|
## Описание изменений
|
||||||
|
|
||||||
|
Добавлен функционал автоматического обновления цен компонентов в сохраненных конфигурациях.
|
||||||
|
|
||||||
|
### Новые возможности
|
||||||
|
|
||||||
|
1. **Кнопка "Пересчитать цену"** на странице конфигуратора
|
||||||
|
- Обновляет цены всех компонентов в конфигурации до актуальных значений из базы данных
|
||||||
|
- Сохраняет количество компонентов, обновляя только цены
|
||||||
|
- Отображает время последнего обновления цен
|
||||||
|
|
||||||
|
2. **Поле `price_updated_at`** в таблице конфигураций
|
||||||
|
- Хранит дату и время последнего обновления цен
|
||||||
|
- Отображается на странице конфигуратора в удобном формате ("5 мин. назад", "2 ч. назад" и т.д.)
|
||||||
|
|
||||||
|
### Изменения в базе данных
|
||||||
|
|
||||||
|
Добавлено новое поле в таблицу `qt_configurations`:
|
||||||
|
```sql
|
||||||
|
ALTER TABLE qt_configurations
|
||||||
|
ADD COLUMN price_updated_at TIMESTAMP NULL DEFAULT NULL
|
||||||
|
AFTER server_count;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Новый API endpoint
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /api/configs/:uuid/refresh-prices
|
||||||
|
```
|
||||||
|
|
||||||
|
**Требования:**
|
||||||
|
- Авторизация: Bearer Token
|
||||||
|
- Роль: editor или выше
|
||||||
|
|
||||||
|
**Ответ:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"uuid": "...",
|
||||||
|
"name": "Конфигурация 1",
|
||||||
|
"items": [
|
||||||
|
{
|
||||||
|
"lot_name": "CPU_AMD_9654",
|
||||||
|
"quantity": 2,
|
||||||
|
"unit_price": 11500.00
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"total_price": 23000.00,
|
||||||
|
"price_updated_at": "2026-01-31T12:34:56Z",
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Применение изменений
|
||||||
|
|
||||||
|
### 1. Обновление базы данных
|
||||||
|
|
||||||
|
Запустите сервер с флагом миграции:
|
||||||
|
```bash
|
||||||
|
./quoteforge -migrate -config config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Или выполните SQL миграцию вручную:
|
||||||
|
```bash
|
||||||
|
mysql -u user -p RFQ_LOG < migrations/004_add_price_updated_at.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Перезапуск сервера
|
||||||
|
|
||||||
|
После применения миграции перезапустите сервер:
|
||||||
|
```bash
|
||||||
|
./quoteforge -config config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## Использование
|
||||||
|
|
||||||
|
1. Откройте любую сохраненную конфигурацию в конфигураторе
|
||||||
|
2. Нажмите кнопку **"Пересчитать цену"** рядом с кнопкой "Сохранить"
|
||||||
|
3. Все цены компонентов будут обновлены до актуальных значений
|
||||||
|
4. Конфигурация автоматически сохраняется с обновленными ценами
|
||||||
|
5. Под кнопками отображается время последнего обновления цен
|
||||||
|
|
||||||
|
## Технические детали
|
||||||
|
|
||||||
|
### Измененные файлы
|
||||||
|
|
||||||
|
- `internal/models/configuration.go` - добавлено поле `PriceUpdatedAt`
|
||||||
|
- `internal/services/configuration.go` - добавлен метод `RefreshPrices()`
|
||||||
|
- `internal/handlers/configuration.go` - добавлен обработчик `RefreshPrices()`
|
||||||
|
- `cmd/qfs/main.go` - добавлен маршрут `/api/configs/:uuid/refresh-prices`
|
||||||
|
- `web/templates/index.html` - добавлена кнопка и JavaScript функции
|
||||||
|
- `migrations/004_add_price_updated_at.sql` - SQL миграция
|
||||||
|
- `CLAUDE.md` - обновлена документация
|
||||||
|
|
||||||
|
### Логика обновления цен
|
||||||
|
|
||||||
|
1. Получение конфигурации по UUID
|
||||||
|
2. Проверка прав доступа (пользователь должен быть владельцем)
|
||||||
|
3. Для каждого компонента в конфигурации:
|
||||||
|
- Получение актуальной цены из `qt_lot_metadata.current_price`
|
||||||
|
- Обновление `unit_price` в items
|
||||||
|
4. Пересчет `total_price` с учетом `server_count`
|
||||||
|
5. Установка `price_updated_at` на текущее время
|
||||||
|
6. Сохранение конфигурации
|
||||||
|
|
||||||
|
### Обработка ошибок
|
||||||
|
|
||||||
|
- Если компонент не найден или у него нет цены - сохраняется старая цена
|
||||||
|
- При ошибках доступа возвращается 403 Forbidden
|
||||||
|
- При отсутствии конфигурации возвращается 404 Not Found
|
||||||
|
|
||||||
|
## Отмена изменений (Rollback)
|
||||||
|
|
||||||
|
Для отмены миграции выполните:
|
||||||
|
```sql
|
||||||
|
ALTER TABLE qt_configurations DROP COLUMN price_updated_at;
|
||||||
|
```
|
||||||
|
|
||||||
|
**Внимание:** После отмены миграции функционал пересчета цен перестанет работать корректно.
|
||||||
9
Makefile
9
Makefile
@@ -1,4 +1,4 @@
|
|||||||
.PHONY: build build-release clean test run version install-hooks
|
.PHONY: build build-release clean test run version
|
||||||
|
|
||||||
# Get version from git
|
# Get version from git
|
||||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
||||||
@@ -72,12 +72,6 @@ deps:
|
|||||||
go mod download
|
go mod download
|
||||||
go mod tidy
|
go mod tidy
|
||||||
|
|
||||||
# Install local git hooks
|
|
||||||
install-hooks:
|
|
||||||
git config core.hooksPath .githooks
|
|
||||||
chmod +x .githooks/pre-commit scripts/check-secrets.sh
|
|
||||||
@echo "Installed git hooks from .githooks/"
|
|
||||||
|
|
||||||
# Help
|
# Help
|
||||||
help:
|
help:
|
||||||
@echo "QuoteForge Server (qfs) - Build Commands"
|
@echo "QuoteForge Server (qfs) - Build Commands"
|
||||||
@@ -98,7 +92,6 @@ help:
|
|||||||
@echo " run Run development server"
|
@echo " run Run development server"
|
||||||
@echo " watch Run with auto-restart (requires entr)"
|
@echo " watch Run with auto-restart (requires entr)"
|
||||||
@echo " deps Install/update dependencies"
|
@echo " deps Install/update dependencies"
|
||||||
@echo " install-hooks Install local git hooks (secret scan on commit)"
|
|
||||||
@echo " help Show this help"
|
@echo " help Show this help"
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "Current version: $(VERSION)"
|
@echo "Current version: $(VERSION)"
|
||||||
|
|||||||
425
README.md
425
README.md
@@ -1,53 +1,414 @@
|
|||||||
# QuoteForge
|
# QuoteForge
|
||||||
|
|
||||||
Local-first desktop web app for server configuration, quotation, and project work.
|
**Server Configuration & Quotation Tool**
|
||||||
|
|
||||||
Runtime model:
|
QuoteForge — корпоративный инструмент для конфигурирования серверов и формирования коммерческих предложений (КП). Приложение интегрируется с существующей базой данных RFQ_LOG.
|
||||||
- user work is stored in local SQLite;
|
|
||||||
- MariaDB is used only for setup checks and background sync;
|
|
||||||
- HTTP server binds to loopback only.
|
|
||||||
|
|
||||||
## What the app does
|

|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
- configuration editor with price refresh from synced pricelists;
|
## Возможности
|
||||||
- projects with variants and ordered configurations;
|
|
||||||
- vendor BOM import and PN -> LOT resolution;
|
|
||||||
- revision history with rollback;
|
|
||||||
- rotating local backups.
|
|
||||||
|
|
||||||
## Run
|
### Для пользователей
|
||||||
|
- 📱 **Mobile-first интерфейс** — удобная работа с телефона и планшета
|
||||||
|
- 🖥️ **Конфигуратор серверов** — пошаговый выбор компонентов с проверкой совместимости
|
||||||
|
- 💰 **Автоматический расчёт цен** — актуальные цены на основе истории закупок
|
||||||
|
- 📊 **Экспорт в CSV/XLSX** — готовые спецификации для клиентов
|
||||||
|
- 💾 **Сохранение конфигураций** — история и шаблоны для повторного использования
|
||||||
|
|
||||||
|
### Для ценовых администраторов
|
||||||
|
- 📈 **Умный расчёт цен** — медиана, взвешенная медиана, среднее
|
||||||
|
- 🎯 **Система алертов** — уведомления о популярных компонентах с устаревшими ценами
|
||||||
|
- 📉 **Аналитика использования** — какие компоненты востребованы в КП
|
||||||
|
- ⚙️ **Гибкие настройки** — периоды расчёта, методы, ручные переопределения
|
||||||
|
|
||||||
|
### Индикация актуальности цен
|
||||||
|
| Цвет | Статус | Условие |
|
||||||
|
|------|--------|---------|
|
||||||
|
| 🟢 Зелёный | Свежая | < 30 дней, ≥ 3 источника |
|
||||||
|
| 🟡 Жёлтый | Нормальная | 30-60 дней |
|
||||||
|
| 🟠 Оранжевый | Устаревающая | 60-90 дней |
|
||||||
|
| 🔴 Красный | Устаревшая | > 90 дней или нет данных |
|
||||||
|
|
||||||
|
## Технологии
|
||||||
|
|
||||||
|
- **Backend:** Go 1.22+, Gin, GORM
|
||||||
|
- **Frontend:** HTML, Tailwind CSS, htmx
|
||||||
|
- **Database:** MariaDB 11+
|
||||||
|
- **Export:** excelize (XLSX), encoding/csv
|
||||||
|
|
||||||
|
## Требования
|
||||||
|
|
||||||
|
- Go 1.22 или выше
|
||||||
|
- MariaDB 11.x (или MySQL 8.x)
|
||||||
|
- ~50 MB дискового пространства
|
||||||
|
|
||||||
|
## Установка
|
||||||
|
|
||||||
|
### 1. Клонирование репозитория
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
go run ./cmd/qfs
|
git clone https://github.com/your-company/quoteforge.git
|
||||||
|
cd quoteforge
|
||||||
```
|
```
|
||||||
|
|
||||||
Useful commands:
|
### 2. Настройка конфигурации
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cp config.example.yaml config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Отредактируйте `config.yaml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
server:
|
||||||
|
host: "0.0.0.0"
|
||||||
|
port: 8080
|
||||||
|
mode: "release"
|
||||||
|
|
||||||
|
database:
|
||||||
|
host: "localhost"
|
||||||
|
port: 3306
|
||||||
|
name: "RFQ_LOG"
|
||||||
|
user: "quoteforge"
|
||||||
|
password: "your-secure-password"
|
||||||
|
|
||||||
|
auth:
|
||||||
|
jwt_secret: "your-jwt-secret-min-32-chars"
|
||||||
|
token_expiry: "24h"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Миграции базы данных
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
go run ./cmd/qfs -migrate
|
go run ./cmd/qfs -migrate
|
||||||
|
```
|
||||||
|
|
||||||
|
### Мигратор OPS -> проекты (preview/apply)
|
||||||
|
|
||||||
|
Переносит квоты, чьи названия начинаются с `OPS-xxxx` (где `x` — цифра), в проект `OPS-xxxx`.
|
||||||
|
Если проекта нет, он будет создан; если архивный — реактивирован.
|
||||||
|
|
||||||
|
Сначала всегда смотрите preview:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go run ./cmd/migrate_ops_projects -config config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Применение изменений:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go run ./cmd/migrate_ops_projects -config config.yaml -apply
|
||||||
|
```
|
||||||
|
|
||||||
|
Без интерактивного подтверждения:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go run ./cmd/migrate_ops_projects -config config.yaml -apply -yes
|
||||||
|
```
|
||||||
|
|
||||||
|
### Минимальные права БД для пользователя квотаций
|
||||||
|
|
||||||
|
Если нужен пользователь, который может работать с конфигурациями, но не может создавать/удалять прайслисты:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- 1) Создать пользователя (если его ещё нет)
|
||||||
|
CREATE USER IF NOT EXISTS 'quote_user'@'%' IDENTIFIED BY 'StrongPassword!';
|
||||||
|
|
||||||
|
-- 2) Если пользователь уже существовал, принудительно обновить пароль
|
||||||
|
ALTER USER 'quote_user'@'%' IDENTIFIED BY 'StrongPassword!';
|
||||||
|
|
||||||
|
-- 3) (Опционально, но рекомендуется) удалить дубли пользователя с другими host,
|
||||||
|
-- чтобы не возникало конфликтов вида user@localhost vs user@'%'
|
||||||
|
DROP USER IF EXISTS 'quote_user'@'localhost';
|
||||||
|
DROP USER IF EXISTS 'quote_user'@'127.0.0.1';
|
||||||
|
DROP USER IF EXISTS 'quote_user'@'::1';
|
||||||
|
|
||||||
|
-- 4) Сбросить лишние права
|
||||||
|
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'quote_user'@'%';
|
||||||
|
|
||||||
|
-- 5) Чтение данных для конфигуратора и синка
|
||||||
|
GRANT SELECT ON RFQ_LOG.lot TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_lot_metadata TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_categories TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_pricelists TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_pricelist_items TO 'quote_user'@'%';
|
||||||
|
|
||||||
|
-- 6) Работа с конфигурациями
|
||||||
|
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_configurations TO 'quote_user'@'%';
|
||||||
|
|
||||||
|
FLUSH PRIVILEGES;
|
||||||
|
|
||||||
|
SHOW GRANTS FOR 'quote_user'@'%';
|
||||||
|
SHOW CREATE USER 'quote_user'@'%';
|
||||||
|
```
|
||||||
|
|
||||||
|
Полный набор прав для пользователя квотаций:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT USAGE ON *.* TO 'quote_user'@'%' IDENTIFIED BY 'StrongPassword!';
|
||||||
|
GRANT SELECT ON RFQ_LOG.lot TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_lot_metadata TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_categories TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_pricelists TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT ON RFQ_LOG.qt_pricelist_items TO 'quote_user'@'%';
|
||||||
|
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_configurations TO 'quote_user'@'%';
|
||||||
|
```
|
||||||
|
|
||||||
|
Важно:
|
||||||
|
- не выдавайте `INSERT/UPDATE/DELETE` на `qt_pricelists` и `qt_pricelist_items`, если пользователь не должен управлять прайслистами;
|
||||||
|
- если видите ошибку `Access denied for user ...@'<ip>'`, проверьте, что не осталось других записей `quote_user@host` кроме `quote_user@'%'`;
|
||||||
|
- после смены DB-настроек через `/setup` приложение перезапускается автоматически и подхватывает нового пользователя.
|
||||||
|
|
||||||
|
### 4. Импорт метаданных компонентов
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go run ./cmd/importer
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Запуск
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Development
|
||||||
|
go run ./cmd/qfs
|
||||||
|
|
||||||
|
# Production (with Makefile - recommended)
|
||||||
|
make build-release # Builds with version info
|
||||||
|
./bin/qfs -version # Check version
|
||||||
|
|
||||||
|
# Production (manual)
|
||||||
|
VERSION=$(git describe --tags --always --dirty)
|
||||||
|
CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$VERSION" -o bin/qfs ./cmd/qfs
|
||||||
|
./bin/qfs -version
|
||||||
|
```
|
||||||
|
|
||||||
|
**Makefile команды:**
|
||||||
|
```bash
|
||||||
|
make build-release # Оптимизированная сборка с версией
|
||||||
|
make build-all # Сборка для всех платформ (Linux, macOS, Windows)
|
||||||
|
make build-windows # Только для Windows
|
||||||
|
make run # Запуск dev сервера
|
||||||
|
make test # Запуск тестов
|
||||||
|
make clean # Очистка bin/
|
||||||
|
make help # Показать все команды
|
||||||
|
```
|
||||||
|
|
||||||
|
Приложение будет доступно по адресу: http://localhost:8080
|
||||||
|
|
||||||
|
### Локальная SQLite база (state)
|
||||||
|
|
||||||
|
Локальная база приложения хранится в профиле пользователя и не зависит от расположения бинарника.
|
||||||
|
Имя файла: `qfs.db`.
|
||||||
|
|
||||||
|
- macOS: `~/Library/Application Support/QuoteForge/qfs.db`
|
||||||
|
- Linux: `$XDG_STATE_HOME/quoteforge/qfs.db` (или `~/.local/state/quoteforge/qfs.db`)
|
||||||
|
- Windows: `%LOCALAPPDATA%\\QuoteForge\\qfs.db`
|
||||||
|
|
||||||
|
Можно переопределить путь через `-localdb` или переменную окружения `QFS_DB_PATH`.
|
||||||
|
|
||||||
|
### Версионность конфигураций (local-first)
|
||||||
|
|
||||||
|
Для `local_configurations` используется append-only versioning через полные snapshot-версии:
|
||||||
|
|
||||||
|
- таблица: `local_configuration_versions`
|
||||||
|
- для каждого изменения создаётся новая версия (`version_no = max + 1`)
|
||||||
|
- `local_configurations.current_version_id` указывает на активную версию
|
||||||
|
- старые версии не изменяются и не удаляются в обычном потоке
|
||||||
|
- rollback не "перематывает" историю, а создаёт новую версию из выбранного snapshot
|
||||||
|
|
||||||
|
При backfill (миграция `006_add_local_configuration_versions.sql`) для существующих конфигураций создаётся `v1` и проставляется `current_version_id`.
|
||||||
|
|
||||||
|
#### Rollback
|
||||||
|
|
||||||
|
Rollback выполняется API-методом:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
POST /api/configs/:uuid/rollback
|
||||||
|
{
|
||||||
|
"target_version": 3,
|
||||||
|
"note": "optional"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
- создаётся новая версия `vN` с `data` из целевой версии
|
||||||
|
- `change_note = "rollback to v{target_version}"` (+ note, если передан)
|
||||||
|
- `current_version_id` переключается на новую версию
|
||||||
|
- конфигурация уходит в `sync_status = pending`
|
||||||
|
|
||||||
|
### Локальный config.yaml
|
||||||
|
|
||||||
|
По умолчанию `qfs` ищет `config.yaml` в той же user-state папке, где лежит `qfs.db` (а не рядом с бинарником).
|
||||||
|
Можно переопределить путь через `-config` или `QFS_CONFIG_PATH`.
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Сборка образа
|
||||||
|
docker build -t quoteforge .
|
||||||
|
|
||||||
|
# Запуск с docker-compose
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Структура проекта
|
||||||
|
|
||||||
|
```
|
||||||
|
quoteforge/
|
||||||
|
├── cmd/
|
||||||
|
│ ├── server/main.go # Main HTTP server
|
||||||
|
│ └── importer/main.go # Import metadata from lot table
|
||||||
|
├── internal/
|
||||||
|
│ ├── config/ # Конфигурация
|
||||||
|
│ ├── models/ # GORM модели
|
||||||
|
│ ├── handlers/ # HTTP handlers
|
||||||
|
│ ├── services/ # Бизнес-логика
|
||||||
|
│ ├── middleware/ # Auth, CORS, etc.
|
||||||
|
│ └── repository/ # Работа с БД
|
||||||
|
├── web/
|
||||||
|
│ ├── templates/ # HTML шаблоны
|
||||||
|
│ └── static/ # CSS, JS, изображения
|
||||||
|
├── migrations/ # SQL миграции
|
||||||
|
├── config.yaml # Конфигурация
|
||||||
|
├── Dockerfile
|
||||||
|
├── docker-compose.yml
|
||||||
|
└── go.mod
|
||||||
|
```
|
||||||
|
|
||||||
|
## Роли пользователей
|
||||||
|
|
||||||
|
| Роль | Описание |
|
||||||
|
|------|----------|
|
||||||
|
| `viewer` | Просмотр, создание квот, экспорт |
|
||||||
|
| `editor` | + сохранение конфигураций |
|
||||||
|
| `pricing_admin` | + управление ценами и алертами |
|
||||||
|
| `admin` | Полный доступ, управление пользователями |
|
||||||
|
|
||||||
|
## API
|
||||||
|
|
||||||
|
Документация API доступна по адресу `/api/docs` (в разработке).
|
||||||
|
|
||||||
|
Основные endpoints:
|
||||||
|
|
||||||
|
```
|
||||||
|
POST /api/auth/login # Авторизация
|
||||||
|
GET /api/components # Список компонентов
|
||||||
|
POST /api/quote/calculate # Расчёт цены
|
||||||
|
POST /api/export/xlsx # Экспорт в Excel
|
||||||
|
GET /api/configs # Сохранённые конфигурации
|
||||||
|
GET /api/configs/:uuid/versions # Список версий конфигурации
|
||||||
|
GET /api/configs/:uuid/versions/:version # Получить конкретную версию
|
||||||
|
POST /api/configs/:uuid/rollback # Rollback на указанную версию
|
||||||
|
POST /api/configs/:uuid/reactivate # Вернуть архивную конфигурацию в активные
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sync payload для versioning
|
||||||
|
|
||||||
|
События в `pending_changes` для конфигураций содержат:
|
||||||
|
- `configuration_uuid`
|
||||||
|
- `operation` (`create` / `update` / `rollback`)
|
||||||
|
- `current_version_id` и `current_version_no`
|
||||||
|
- `snapshot` (текущее состояние конфигурации)
|
||||||
|
- `idempotency_key` и `conflict_policy` (`last_write_wins`)
|
||||||
|
|
||||||
|
Это позволяет push-слою отправлять на сервер актуальное состояние и готовит основу для будущего conflict resolution.
|
||||||
|
|
||||||
|
## Cron Jobs
|
||||||
|
|
||||||
|
QuoteForge now includes automated cron jobs for maintenance tasks. These can be run using the built-in cron functionality in the Docker container.
|
||||||
|
|
||||||
|
### Docker Compose Setup
|
||||||
|
|
||||||
|
The Docker setup includes a dedicated cron service that runs the following jobs:
|
||||||
|
|
||||||
|
- **Alerts check**: Every hour (0 * * * *)
|
||||||
|
- **Price updates**: Daily at 2 AM (0 2 * * *)
|
||||||
|
- **Usage counter reset**: Weekly on Sunday at 1 AM (0 1 * * 0)
|
||||||
|
- **Popularity score updates**: Daily at 3 AM (0 3 * * *)
|
||||||
|
|
||||||
|
To enable cron jobs in Docker, run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
### Manual Cron Job Execution
|
||||||
|
|
||||||
|
You can also run cron jobs manually using the quoteforge-cron binary:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check and generate alerts
|
||||||
|
go run ./cmd/cron -job=alerts
|
||||||
|
|
||||||
|
# Recalculate all prices
|
||||||
|
go run ./cmd/cron -job=update-prices
|
||||||
|
|
||||||
|
# Reset usage counters
|
||||||
|
go run ./cmd/cron -job=reset-counters
|
||||||
|
|
||||||
|
# Update popularity scores
|
||||||
|
go run ./cmd/cron -job=update-popularity
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cron Job Details
|
||||||
|
|
||||||
|
- **Alerts check**: Generates alerts for components with high demand and stale prices, trending components without prices, and components with no recent quotes
|
||||||
|
- **Price updates**: Recalculates prices for all components using configured methods (median, weighted median, average)
|
||||||
|
- **Usage counter reset**: Resets weekly and monthly usage counters for components
|
||||||
|
- **Popularity score updates**: Recalculates popularity scores based on supplier quote activity
|
||||||
|
|
||||||
|
## Разработка
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Запуск в режиме разработки (hot reload)
|
||||||
|
go run ./cmd/qfs
|
||||||
|
|
||||||
|
# Запуск тестов
|
||||||
go test ./...
|
go test ./...
|
||||||
go vet ./...
|
|
||||||
make build-release
|
# Сборка для Linux
|
||||||
|
CGO_ENABLED=0 go build -ldflags="-s -w" -o bin/qfs ./cmd/qfs
|
||||||
```
|
```
|
||||||
|
|
||||||
On first run the app creates a minimal `config.yaml`, starts on `http://127.0.0.1:8080`, and opens `/setup` if DB credentials were not saved yet.
|
## Переменные окружения
|
||||||
|
|
||||||
## Documentation
|
| Переменная | Описание | По умолчанию |
|
||||||
|
|------------|----------|--------------|
|
||||||
|
| `QF_DB_HOST` | Хост базы данных | localhost |
|
||||||
|
| `QF_DB_PORT` | Порт базы данных | 3306 |
|
||||||
|
| `QF_DB_NAME` | Имя базы данных | RFQ_LOG |
|
||||||
|
| `QF_DB_USER` | Пользователь БД | — |
|
||||||
|
| `QF_DB_PASSWORD` | Пароль БД | — |
|
||||||
|
| `QF_JWT_SECRET` | Секрет для JWT | — |
|
||||||
|
| `QF_SERVER_PORT` | Порт сервера | 8080 |
|
||||||
|
| `QFS_DB_PATH` | Полный путь к локальной SQLite БД | OS-specific user state dir |
|
||||||
|
| `QFS_STATE_DIR` | Каталог state (если `QFS_DB_PATH` не задан) | OS-specific user state dir |
|
||||||
|
| `QFS_CONFIG_PATH` | Полный путь к `config.yaml` | OS-specific user state dir |
|
||||||
|
|
||||||
- Shared engineering rules: [bible/README.md](bible/README.md)
|
## Интеграция с существующей БД
|
||||||
- Project architecture: [bible-local/README.md](bible-local/README.md)
|
|
||||||
- Release notes: `releases/<version>/RELEASE_NOTES.md`
|
|
||||||
|
|
||||||
`bible-local/` is the source of truth for QuoteForge-specific architecture. If code changes behavior, update the matching file there in the same commit.
|
QuoteForge интегрируется с существующей базой RFQ_LOG:
|
||||||
|
|
||||||
## Repository map
|
- `lot` — справочник компонентов (только чтение)
|
||||||
|
- `lot_log` — история цен от поставщиков (только чтение)
|
||||||
|
- `supplier` — справочник поставщиков (только чтение)
|
||||||
|
|
||||||
```text
|
Новые таблицы QuoteForge имеют префикс `qt_`:
|
||||||
cmd/ entry points and migration tools
|
|
||||||
internal/ application code
|
- `qt_users` — пользователи приложения
|
||||||
web/ templates and static assets
|
- `qt_lot_metadata` — расширенные данные компонентов
|
||||||
bible/ shared engineering rules
|
- `qt_configurations` — сохранённые конфигурации
|
||||||
bible-local/ project architecture and contracts
|
- `qt_pricing_alerts` — алерты для администраторов
|
||||||
releases/ packaged release artifacts and release notes
|
|
||||||
config.example.yaml runtime config reference
|
## Поддержка
|
||||||
```
|
|
||||||
|
По вопросам работы приложения обращайтесь:
|
||||||
|
- Email: mike@mchus.pro
|
||||||
|
- Internal: @mchus
|
||||||
|
|
||||||
|
## Лицензия
|
||||||
|
|
||||||
|
Данное программное обеспечение является собственностью компании и предназначено исключительно для внутреннего использования. Распространение, копирование или модификация без письменного разрешения запрещены.
|
||||||
|
|
||||||
|
См. файл [LICENSE](LICENSE) для подробностей.
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
-- Generated from /Users/mchusavitin/Downloads/acc.csv
|
|
||||||
-- Unambiguous rows only. Rows from headers without a date were skipped.
|
|
||||||
INSERT INTO lot_log (`lot`, `supplier`, `date`, `price`, `quality`, `comments`) VALUES
|
|
||||||
('ACC_RMK_L_Type', '', '2024-04-01', 19, NULL, 'header supplier missing in source (45383)'),
|
|
||||||
('ACC_RMK_SLIDE', '', '2024-04-01', 31, NULL, 'header supplier missing in source (45383)'),
|
|
||||||
('NVLINK_2S_Bridge', '', '2023-01-01', 431, NULL, 'header supplier missing in source (44927)'),
|
|
||||||
('NVLINK_2S_Bridge', 'Jevy Yang', '2025-01-15', 139, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'Wendy', '2025-01-15', 143, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'HONCH (Darian)', '2025-05-06', 155, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'HONCH (Sunny)', '2025-06-17', 155, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'Wendy', '2025-07-02', 145, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'Honch (Sunny)', '2025-07-10', 155, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'Honch (Yan)', '2025-08-07', 155, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'Jevy', '2025-09-09', 155, NULL, NULL),
|
|
||||||
('NVLINK_2S_Bridge', 'Honch (Darian)', '2025-11-17', 102, NULL, NULL),
|
|
||||||
('NVLINK_2W_Bridge(H200)', '', '2023-01-01', 405, NULL, 'header supplier missing in source (44927)'),
|
|
||||||
('NVLINK_2W_Bridge(H200)', 'network logic / Stephen', '2025-02-10', 305, NULL, NULL),
|
|
||||||
('NVLINK_2W_Bridge(H200)', 'JEVY', '2025-02-18', 411, NULL, NULL),
|
|
||||||
('NVLINK_4W_Bridge(H200)', '', '2023-01-01', 820, NULL, 'header supplier missing in source (44927)'),
|
|
||||||
('NVLINK_4W_Bridge(H200)', 'network logic / Stephen', '2025-02-10', 610, NULL, NULL),
|
|
||||||
('NVLINK_4W_Bridge(H200)', 'JEVY', '2025-02-18', 754, NULL, NULL),
|
|
||||||
('25G_SFP28_MMA2P00-AS', 'HONCH (Doris)', '2025-02-19', 65, NULL, NULL),
|
|
||||||
('ACC_SuperCap', '', '2024-04-01', 59, NULL, 'header supplier missing in source (45383)'),
|
|
||||||
('ACC_SuperCap', 'Chiphome', '2025-02-28', 48, NULL, NULL);
|
|
||||||
|
|
||||||
-- Skipped source values due to missing date in header:
|
|
||||||
-- lot=ACC_RMK_L_Type; header=FOB; price=19; reason=header has supplier but no date
|
|
||||||
-- lot=ACC_RMK_SLIDE; header=FOB; price=31; reason=header has supplier but no date
|
|
||||||
-- lot=NVLINK_2S_Bridge; header=FOB; price=155; reason=header has supplier but no date
|
|
||||||
-- lot=NVLINK_2W_Bridge(H200); header=FOB; price=405; reason=header has supplier but no date
|
|
||||||
-- lot=NVLINK_4W_Bridge(H200); header=FOB; price=754; reason=header has supplier but no date
|
|
||||||
-- lot=25G_SFP28_MMA2P00-AS; header=FOB; price=65; reason=header has supplier but no date
|
|
||||||
-- lot=ACC_SuperCap; header=FOB; price=48; reason=header has supplier but no date
|
|
||||||
1
bible
1
bible
Submodule bible deleted from 52444350c1
@@ -1,70 +0,0 @@
|
|||||||
# 01 - Overview
|
|
||||||
|
|
||||||
## Product
|
|
||||||
|
|
||||||
QuoteForge is a local-first tool for server configuration, quotation, and project tracking.
|
|
||||||
|
|
||||||
Core user flows:
|
|
||||||
- create and edit configurations locally;
|
|
||||||
- calculate prices from synced pricelists;
|
|
||||||
- group configurations into projects and variants;
|
|
||||||
- import vendor workspaces and map vendor PNs to internal LOTs;
|
|
||||||
- review revision history and roll back safely.
|
|
||||||
|
|
||||||
## Runtime model
|
|
||||||
|
|
||||||
QuoteForge is a single-user thick client.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- runtime HTTP binds to loopback only;
|
|
||||||
- browser requests are treated as part of the same local user session;
|
|
||||||
- MariaDB is not a live dependency for normal CRUD;
|
|
||||||
- if non-loopback deployment is ever introduced, auth/RBAC must be added first.
|
|
||||||
|
|
||||||
## Product scope
|
|
||||||
|
|
||||||
In scope:
|
|
||||||
- configurator and quote calculation;
|
|
||||||
- projects, variants, and configuration ordering;
|
|
||||||
- local revision history;
|
|
||||||
- read-only pricelist browsing from SQLite cache;
|
|
||||||
- background sync with MariaDB;
|
|
||||||
- rotating local backups.
|
|
||||||
|
|
||||||
Out of scope and intentionally removed:
|
|
||||||
- admin pricing UI/API;
|
|
||||||
- alerts and notification workflows;
|
|
||||||
- stock import tooling;
|
|
||||||
- cron jobs and importer utilities.
|
|
||||||
|
|
||||||
## Tech stack
|
|
||||||
|
|
||||||
| Layer | Stack |
|
|
||||||
| --- | --- |
|
|
||||||
| Backend | Go, Gin, GORM |
|
|
||||||
| Frontend | HTML templates, htmx, Tailwind CSS |
|
|
||||||
| Local storage | SQLite |
|
|
||||||
| Sync transport | MariaDB |
|
|
||||||
| Export | CSV and XLSX generation |
|
|
||||||
|
|
||||||
## Repository map
|
|
||||||
|
|
||||||
```text
|
|
||||||
cmd/
|
|
||||||
qfs/ main HTTP runtime
|
|
||||||
migrate/ server migration tool
|
|
||||||
migrate_ops_projects/ OPS project migration helper
|
|
||||||
internal/
|
|
||||||
appstate/ backup and runtime state
|
|
||||||
config/ runtime config parsing
|
|
||||||
handlers/ HTTP handlers
|
|
||||||
localdb/ SQLite models and migrations
|
|
||||||
repository/ repositories
|
|
||||||
services/ business logic and sync
|
|
||||||
web/
|
|
||||||
templates/ HTML templates
|
|
||||||
static/ static assets
|
|
||||||
bible/ shared engineering rules
|
|
||||||
bible-local/ project-specific architecture
|
|
||||||
releases/ release artifacts and notes
|
|
||||||
```
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
# 02 - Architecture
|
|
||||||
|
|
||||||
## Local-first rule
|
|
||||||
|
|
||||||
SQLite is the runtime source of truth.
|
|
||||||
MariaDB is sync transport plus setup and migration tooling.
|
|
||||||
|
|
||||||
```text
|
|
||||||
browser -> Gin handlers -> SQLite
|
|
||||||
-> pending_changes
|
|
||||||
background sync <------> MariaDB
|
|
||||||
```
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- user CRUD must continue when MariaDB is offline;
|
|
||||||
- runtime handlers and pages must read and write SQLite only;
|
|
||||||
- MariaDB access in runtime code is allowed only inside sync and setup flows;
|
|
||||||
- no live MariaDB fallback for reads that already exist in local cache.
|
|
||||||
|
|
||||||
## Sync contract
|
|
||||||
|
|
||||||
Bidirectional:
|
|
||||||
- projects;
|
|
||||||
- configurations;
|
|
||||||
- `vendor_spec`;
|
|
||||||
- pending change metadata.
|
|
||||||
|
|
||||||
Pull-only:
|
|
||||||
- components;
|
|
||||||
- pricelists and pricelist items;
|
|
||||||
- partnumber books and partnumber book items.
|
|
||||||
|
|
||||||
Readiness guard:
|
|
||||||
- every sync push/pull runs a preflight check;
|
|
||||||
- blocked sync returns `423 Locked` with a machine-readable reason;
|
|
||||||
- local work continues even when sync is blocked.
|
|
||||||
- sync metadata updates must preserve project `updated_at`; sync time belongs in `synced_at`, not in the user-facing last-modified timestamp.
|
|
||||||
- pricelist pull must persist a new local snapshot atomically: header and items appear together, and `last_pricelist_sync` advances only after item download succeeds.
|
|
||||||
- UI sync status must distinguish "last sync failed" from "up to date"; if the app can prove newer server pricelist data exists, the indicator must say local cache is incomplete.
|
|
||||||
|
|
||||||
## Pricing contract
|
|
||||||
|
|
||||||
Prices come only from `local_pricelist_items`.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- `local_components` is metadata-only;
|
|
||||||
- quote calculation must not read prices from components;
|
|
||||||
- latest pricelist selection ignores snapshots without items;
|
|
||||||
- auto pricelist mode stays auto and must not be persisted as an explicit resolved ID.
|
|
||||||
|
|
||||||
## Pricing tab layout
|
|
||||||
|
|
||||||
The Pricing tab (Ценообразование) has two tables: Buy (Цена покупки) and Sale (Цена продажи).
|
|
||||||
|
|
||||||
Column order (both tables):
|
|
||||||
|
|
||||||
```
|
|
||||||
PN вендора | Описание | LOT | Кол-во | Estimate | Склад | Конкуренты | Ручная цена
|
|
||||||
```
|
|
||||||
|
|
||||||
Per-LOT row expansion rules:
|
|
||||||
- each `lot_mappings` entry in a BOM row becomes its own table row with its own quantity and prices;
|
|
||||||
- `baseLot` (resolved LOT without an explicit mapping) is treated as the first sub-row with `quantity_per_pn` from `_getRowLotQtyPerPN`;
|
|
||||||
- when one vendor PN expands into N LOT sub-rows, PN вендора and Описание cells use `rowspan="N"` and appear only on the first sub-row;
|
|
||||||
- a visual top border (`border-t border-gray-200`) separates each vendor PN group.
|
|
||||||
|
|
||||||
Vendor price attachment:
|
|
||||||
- `vendorOrig` and `vendorOrigUnit` (BOM unit/total price) are attached to the first LOT sub-row only;
|
|
||||||
- subsequent sub-rows carry empty `data-vendor-orig` so `setPricingCustomPriceFromVendor` counts each vendor PN exactly once.
|
|
||||||
|
|
||||||
Controls terminology:
|
|
||||||
- custom price input is labeled **Ручная цена** (not "Своя цена");
|
|
||||||
- the button that fills custom price from BOM totals is labeled **BOM Цена** (not "Проставить цены BOM").
|
|
||||||
|
|
||||||
CSV export reads PN вендора, Описание, and LOT from `data-vendor-pn`, `data-desc`, `data-lot` row attributes to bypass the rowspan cell offset problem.
|
|
||||||
|
|
||||||
## Configuration versioning
|
|
||||||
|
|
||||||
Configuration revisions are append-only snapshots stored in `local_configuration_versions`.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- the editable working configuration is always the implicit head named `main`; UI must not switch the user to a numbered revision after save;
|
|
||||||
- create a new revision when spec, BOM, or pricing content changes;
|
|
||||||
- revision history is retrospective: the revisions page shows past snapshots, not the current `main` state;
|
|
||||||
- rollback creates a new head revision from an old snapshot;
|
|
||||||
- rename, reorder, project move, and similar operational edits do not create a new revision snapshot;
|
|
||||||
- revision deduplication includes `items`, `server_count`, `total_price`, `custom_price`, `vendor_spec`, pricelist selectors, `disable_price_refresh`, and `only_in_stock`;
|
|
||||||
- BOM updates must use version-aware save flow, not a direct SQL field update;
|
|
||||||
- current revision pointer must be recoverable if legacy or damaged rows are found locally.
|
|
||||||
|
|
||||||
## Sync UX
|
|
||||||
|
|
||||||
UI-facing sync status must never block on live MariaDB calls.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- navbar sync indicator and sync info modal read only local cached state from SQLite/app settings;
|
|
||||||
- background/manual sync may talk to MariaDB, but polling endpoints must stay fast even on slow or broken connections;
|
|
||||||
- any MariaDB timeout/invalid-connection during sync must invalidate the cached remote handle immediately so UI stops treating the connection as healthy.
|
|
||||||
|
|
||||||
## Naming collisions
|
|
||||||
|
|
||||||
UI-driven rename and copy flows use one suffix convention for conflicts.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- configuration and variant names must auto-resolve collisions with `_копия`, then `_копия2`, `_копия3`, and so on;
|
|
||||||
- copy checkboxes and copy modals must prefill `_копия`, not ` (копия)`;
|
|
||||||
- the literal variant name `main` is reserved and must not be allowed for non-main variants.
|
|
||||||
|
|
||||||
## Configuration types
|
|
||||||
|
|
||||||
Configurations have a `config_type` field: `"server"` (default) or `"storage"`.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- `config_type` defaults to `"server"` for all existing and new configurations unless explicitly set;
|
|
||||||
- the configurator page is shared for both types; the SW tab is always visible regardless of type;
|
|
||||||
- storage configurations use the same vendor_spec + PN→LOT + pricing flow as server configurations;
|
|
||||||
- storage component categories map to existing tabs: `ENC`/`DKC`/`CTL` → Base, `HIC` → PCI (HIC-карты СХД; `HBA`/`NIC` — серверные, не смешивать), `SSD`/`HDD` → Storage (используют существующие серверные LOT), `ACC` → Accessories (используют существующие серверные LOT), `SW` → SW.
|
|
||||||
- `DKC` = контроллерная полка (модель СХД + тип дисков + кол-во слотов + кол-во контроллеров); `CTL` = контроллер (кэш + встроенные порты); `ENC` = дисковая полка без контроллера.
|
|
||||||
|
|
||||||
## Vendor BOM contract
|
|
||||||
|
|
||||||
Vendor BOM is stored in `vendor_spec` on the configuration row.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- PN to LOT resolution uses the active local partnumber book;
|
|
||||||
- canonical persisted mapping is `lot_mappings[]`;
|
|
||||||
- QuoteForge does not use legacy BOM tables such as `qt_bom`, `qt_lot_bundles`, or `qt_lot_bundle_items`.
|
|
||||||
@@ -1,405 +0,0 @@
|
|||||||
# 03 - Database
|
|
||||||
|
|
||||||
## SQLite
|
|
||||||
|
|
||||||
SQLite is the local runtime database.
|
|
||||||
|
|
||||||
Main tables:
|
|
||||||
|
|
||||||
| Table | Purpose |
|
|
||||||
| --- | --- |
|
|
||||||
| `local_components` | synced component metadata |
|
|
||||||
| `local_pricelists` | local pricelist headers |
|
|
||||||
| `local_pricelist_items` | local pricelist rows, the only runtime price source |
|
|
||||||
| `local_projects` | user projects |
|
|
||||||
| `local_configurations` | user configurations |
|
|
||||||
| `local_configuration_versions` | immutable revision snapshots |
|
|
||||||
| `local_partnumber_books` | partnumber book headers |
|
|
||||||
| `local_partnumber_book_items` | PN -> LOT catalog payload |
|
|
||||||
| `pending_changes` | sync queue |
|
|
||||||
| `connection_settings` | encrypted MariaDB connection settings |
|
|
||||||
| `app_settings` | local app state |
|
|
||||||
| `local_schema_migrations` | applied local migration markers |
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- cache tables may be rebuilt if local migration recovery requires it;
|
|
||||||
- user-authored tables must not be dropped as a recovery shortcut;
|
|
||||||
- `local_pricelist_items` is the only valid runtime source of prices;
|
|
||||||
- configuration `items` and `vendor_spec` are stored as JSON payloads inside configuration rows.
|
|
||||||
|
|
||||||
## MariaDB
|
|
||||||
|
|
||||||
MariaDB is the central sync database (`RFQ_LOG`). Final schema as of 2026-04-15.
|
|
||||||
|
|
||||||
### QuoteForge tables (qt_*)
|
|
||||||
|
|
||||||
Runtime read:
|
|
||||||
- `qt_categories` — pricelist categories
|
|
||||||
- `qt_lot_metadata` — component metadata, price settings
|
|
||||||
- `qt_pricelists` — pricelist headers (source: estimate / warehouse / competitor)
|
|
||||||
- `qt_pricelist_items` — pricelist rows
|
|
||||||
- `qt_partnumber_books` — partnumber book headers
|
|
||||||
- `qt_partnumber_book_items` — PN→LOT catalog payload
|
|
||||||
|
|
||||||
Runtime read/write:
|
|
||||||
- `qt_projects` — projects
|
|
||||||
- `qt_configurations` — configurations
|
|
||||||
- `qt_client_schema_state` — per-client sync status and version tracking
|
|
||||||
- `qt_pricelist_sync_status` — pricelist sync timestamps per user
|
|
||||||
|
|
||||||
Insert-only tracking:
|
|
||||||
- `qt_vendor_partnumber_seen` — vendor partnumbers encountered during sync
|
|
||||||
|
|
||||||
Server-side only (not queried by client runtime):
|
|
||||||
- `qt_component_usage_stats` — aggregated component popularity stats (written by server jobs)
|
|
||||||
- `qt_pricing_alerts` — price anomaly alerts (models exist in Go; feature disabled in runtime)
|
|
||||||
- `qt_schema_migrations` — server migration history (applied via `go run ./cmd/qfs -migrate`)
|
|
||||||
- `qt_scheduler_runs` — server background job tracking (no Go code references it in this repo)
|
|
||||||
|
|
||||||
### Competitor subsystem (server-side only, not used by QuoteForge Go code)
|
|
||||||
|
|
||||||
- `qt_competitors` — competitor registry
|
|
||||||
- `partnumber_log_competitors` — competitor price log (FK → qt_competitors)
|
|
||||||
|
|
||||||
These tables exist in the schema and are maintained by another tool or workflow.
|
|
||||||
QuoteForge references competitor pricelists only via `qt_pricelists` (source='competitor').
|
|
||||||
|
|
||||||
### Legacy RFQ tables (pre-QuoteForge, no Go code references)
|
|
||||||
|
|
||||||
- `lot` — original component registry (data preserved; superseded by `qt_lot_metadata`)
|
|
||||||
- `lot_log` — original supplier price log
|
|
||||||
- `supplier` — supplier registry (FK target for lot_log and machine_log)
|
|
||||||
- `machine` — device model registry
|
|
||||||
- `machine_log` — device price/quote log
|
|
||||||
- `parts_log` — supplier partnumber log used by server-side import/pricing workflows, not by QuoteForge runtime
|
|
||||||
|
|
||||||
These tables are retained for historical data. QuoteForge does not read or write them at runtime.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- QuoteForge runtime must not depend on any legacy RFQ tables;
|
|
||||||
- QuoteForge sync reads prices and categories from `qt_pricelists` / `qt_pricelist_items` only;
|
|
||||||
- QuoteForge does not enrich local pricelist rows from `parts_log` or any other raw supplier log table;
|
|
||||||
- normal UI requests must not query MariaDB tables directly;
|
|
||||||
- `qt_client_local_migrations` exists in the 2026-04-15 schema dump, but runtime sync does not depend on it.
|
|
||||||
|
|
||||||
## MariaDB Table Structures
|
|
||||||
|
|
||||||
Full column reference as of 2026-03-21 (`RFQ_LOG` final schema).
|
|
||||||
|
|
||||||
### qt_categories
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| code | varchar(20) UNIQUE NOT NULL | |
|
|
||||||
| name | varchar(100) NOT NULL | |
|
|
||||||
| name_ru | varchar(100) | |
|
|
||||||
| display_order | bigint DEFAULT 0 | |
|
|
||||||
| is_required | tinyint(1) DEFAULT 0 | |
|
|
||||||
|
|
||||||
### qt_client_schema_state
|
|
||||||
PK: (username, hostname)
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| username | varchar(100) | |
|
|
||||||
| hostname | varchar(255) DEFAULT '' | |
|
|
||||||
| last_applied_migration_id | varchar(128) | |
|
|
||||||
| app_version | varchar(64) | |
|
|
||||||
| last_sync_at | datetime | |
|
|
||||||
| last_sync_status | varchar(32) | |
|
|
||||||
| pending_changes_count | int DEFAULT 0 | |
|
|
||||||
| pending_errors_count | int DEFAULT 0 | |
|
|
||||||
| configurations_count | int DEFAULT 0 | |
|
|
||||||
| projects_count | int DEFAULT 0 | |
|
|
||||||
| estimate_pricelist_version | varchar(128) | |
|
|
||||||
| warehouse_pricelist_version | varchar(128) | |
|
|
||||||
| competitor_pricelist_version | varchar(128) | |
|
|
||||||
| last_sync_error_code | varchar(128) | |
|
|
||||||
| last_sync_error_text | text | |
|
|
||||||
| last_checked_at | datetime NOT NULL | |
|
|
||||||
| updated_at | datetime NOT NULL | |
|
|
||||||
|
|
||||||
### qt_component_usage_stats
|
|
||||||
PK: lot_name
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| lot_name | varchar(255) | |
|
|
||||||
| quotes_total | bigint DEFAULT 0 | |
|
|
||||||
| quotes_last30d | bigint DEFAULT 0 | |
|
|
||||||
| quotes_last7d | bigint DEFAULT 0 | |
|
|
||||||
| total_quantity | bigint DEFAULT 0 | |
|
|
||||||
| total_revenue | decimal(14,2) DEFAULT 0 | |
|
|
||||||
| trend_direction | enum('up','stable','down') DEFAULT 'stable' | |
|
|
||||||
| trend_percent | decimal(5,2) DEFAULT 0 | |
|
|
||||||
| last_used_at | datetime(3) | |
|
|
||||||
|
|
||||||
### qt_competitors
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| name | varchar(255) NOT NULL | |
|
|
||||||
| code | varchar(100) UNIQUE NOT NULL | |
|
|
||||||
| delivery_basis | varchar(50) DEFAULT 'DDP' | |
|
|
||||||
| currency | varchar(10) DEFAULT 'USD' | |
|
|
||||||
| column_mapping | longtext JSON | |
|
|
||||||
| is_active | tinyint(1) DEFAULT 1 | |
|
|
||||||
| created_at | timestamp | |
|
|
||||||
| updated_at | timestamp ON UPDATE | |
|
|
||||||
| price_uplift | decimal(8,4) DEFAULT 1.3 | effective_price = price / price_uplift |
|
|
||||||
|
|
||||||
### qt_configurations
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| uuid | varchar(36) UNIQUE NOT NULL | |
|
|
||||||
| user_id | bigint UNSIGNED | |
|
|
||||||
| owner_username | varchar(100) NOT NULL | |
|
|
||||||
| app_version | varchar(64) | |
|
|
||||||
| project_uuid | char(36) | FK → qt_projects.uuid ON DELETE SET NULL |
|
|
||||||
| name | varchar(200) NOT NULL | |
|
|
||||||
| items | longtext JSON NOT NULL | component list |
|
|
||||||
| total_price | decimal(12,2) | |
|
|
||||||
| notes | text | |
|
|
||||||
| is_template | tinyint(1) DEFAULT 0 | |
|
|
||||||
| created_at | datetime(3) | |
|
|
||||||
| custom_price | decimal(12,2) | |
|
|
||||||
| server_count | bigint DEFAULT 1 | |
|
|
||||||
| server_model | varchar(100) | |
|
|
||||||
| support_code | varchar(20) | |
|
|
||||||
| article | varchar(80) | |
|
|
||||||
| pricelist_id | bigint UNSIGNED | FK → qt_pricelists.id |
|
|
||||||
| warehouse_pricelist_id | bigint UNSIGNED | FK → qt_pricelists.id |
|
|
||||||
| competitor_pricelist_id | bigint UNSIGNED | FK → qt_pricelists.id |
|
|
||||||
| disable_price_refresh | tinyint(1) DEFAULT 0 | |
|
|
||||||
| only_in_stock | tinyint(1) DEFAULT 0 | |
|
|
||||||
| line_no | int | position within project |
|
|
||||||
| price_updated_at | timestamp | |
|
|
||||||
| vendor_spec | longtext JSON | |
|
|
||||||
|
|
||||||
### qt_lot_metadata
|
|
||||||
PK: lot_name
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| lot_name | varchar(255) | |
|
|
||||||
| category_id | bigint UNSIGNED | FK → qt_categories.id |
|
|
||||||
| vendor | varchar(50) | |
|
|
||||||
| model | varchar(100) | |
|
|
||||||
| specs | longtext JSON | |
|
|
||||||
| current_price | decimal(12,2) | cached computed price |
|
|
||||||
| price_method | enum('manual','median','average','weighted_median') DEFAULT 'median' | |
|
|
||||||
| price_period_days | bigint DEFAULT 90 | |
|
|
||||||
| price_updated_at | datetime(3) | |
|
|
||||||
| request_count | bigint DEFAULT 0 | |
|
|
||||||
| last_request_date | date | |
|
|
||||||
| popularity_score | decimal(10,4) DEFAULT 0 | |
|
|
||||||
| price_coefficient | decimal(5,2) DEFAULT 0 | markup % |
|
|
||||||
| manual_price | decimal(12,2) | |
|
|
||||||
| meta_prices | varchar(1000) | raw price samples JSON |
|
|
||||||
| meta_method | varchar(20) | method used for last compute |
|
|
||||||
| meta_period_days | bigint DEFAULT 90 | |
|
|
||||||
| is_hidden | tinyint(1) DEFAULT 0 | |
|
|
||||||
|
|
||||||
### qt_partnumber_books
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| version | varchar(30) UNIQUE NOT NULL | |
|
|
||||||
| created_at | timestamp | |
|
|
||||||
| created_by | varchar(100) | |
|
|
||||||
| is_active | tinyint(1) DEFAULT 0 | only one active at a time |
|
|
||||||
| partnumbers_json | longtext DEFAULT '[]' | flat list of partnumbers |
|
|
||||||
|
|
||||||
### qt_partnumber_book_items
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| partnumber | varchar(255) UNIQUE NOT NULL | |
|
|
||||||
| lots_json | longtext NOT NULL | JSON array of lot_names |
|
|
||||||
| description | varchar(10000) | |
|
|
||||||
|
|
||||||
### qt_pricelists
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| source | varchar(20) DEFAULT 'estimate' | 'estimate' / 'warehouse' / 'competitor' |
|
|
||||||
| version | varchar(20) NOT NULL | UNIQUE with source |
|
|
||||||
| created_at | datetime(3) | |
|
|
||||||
| created_by | varchar(100) | |
|
|
||||||
| is_active | tinyint(1) DEFAULT 1 | |
|
|
||||||
| usage_count | bigint DEFAULT 0 | |
|
|
||||||
| expires_at | datetime(3) | |
|
|
||||||
| notification | varchar(500) | shown to clients on sync |
|
|
||||||
|
|
||||||
### qt_pricelist_items
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| pricelist_id | bigint UNSIGNED NOT NULL | FK → qt_pricelists.id |
|
|
||||||
| lot_name | varchar(255) NOT NULL | INDEX with pricelist_id |
|
|
||||||
| lot_category | varchar(50) | |
|
|
||||||
| price | decimal(12,2) NOT NULL | |
|
|
||||||
| price_method | varchar(20) | |
|
|
||||||
| price_period_days | bigint DEFAULT 90 | |
|
|
||||||
| price_coefficient | decimal(5,2) DEFAULT 0 | |
|
|
||||||
| manual_price | decimal(12,2) | |
|
|
||||||
| meta_prices | varchar(1000) | |
|
|
||||||
|
|
||||||
### qt_pricelist_sync_status
|
|
||||||
PK: username
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| username | varchar(100) | |
|
|
||||||
| last_sync_at | datetime NOT NULL | |
|
|
||||||
| updated_at | datetime NOT NULL | |
|
|
||||||
| app_version | varchar(64) | |
|
|
||||||
|
|
||||||
### qt_pricing_alerts
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| lot_name | varchar(255) NOT NULL | |
|
|
||||||
| alert_type | enum('high_demand_stale_price','price_spike','price_drop','no_recent_quotes','trending_no_price') | |
|
|
||||||
| severity | enum('low','medium','high','critical') DEFAULT 'medium' | |
|
|
||||||
| message | text NOT NULL | |
|
|
||||||
| details | longtext JSON | |
|
|
||||||
| status | enum('new','acknowledged','resolved','ignored') DEFAULT 'new' | |
|
|
||||||
| created_at | datetime(3) | |
|
|
||||||
|
|
||||||
### qt_projects
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| uuid | char(36) UNIQUE NOT NULL | |
|
|
||||||
| owner_username | varchar(100) NOT NULL | |
|
|
||||||
| code | varchar(100) NOT NULL | UNIQUE with variant |
|
|
||||||
| variant | varchar(100) DEFAULT '' | UNIQUE with code |
|
|
||||||
| name | varchar(200) | |
|
|
||||||
| tracker_url | varchar(500) | |
|
|
||||||
| is_active | tinyint(1) DEFAULT 1 | |
|
|
||||||
| is_system | tinyint(1) DEFAULT 0 | |
|
|
||||||
| created_at | timestamp | |
|
|
||||||
| updated_at | timestamp ON UPDATE | |
|
|
||||||
|
|
||||||
### qt_schema_migrations
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| filename | varchar(255) UNIQUE NOT NULL | |
|
|
||||||
| applied_at | datetime(3) | |
|
|
||||||
|
|
||||||
### qt_scheduler_runs
|
|
||||||
PK: job_name
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| job_name | varchar(100) | |
|
|
||||||
| last_started_at | datetime | |
|
|
||||||
| last_finished_at | datetime | |
|
|
||||||
| last_status | varchar(20) DEFAULT 'idle' | |
|
|
||||||
| last_error | text | |
|
|
||||||
| updated_at | timestamp ON UPDATE | |
|
|
||||||
|
|
||||||
### qt_vendor_partnumber_seen
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| source_type | varchar(32) NOT NULL | |
|
|
||||||
| vendor | varchar(255) DEFAULT '' | |
|
|
||||||
| partnumber | varchar(255) UNIQUE NOT NULL | |
|
|
||||||
| description | varchar(10000) | |
|
|
||||||
| last_seen_at | datetime(3) NOT NULL | |
|
|
||||||
| is_ignored | tinyint(1) DEFAULT 0 | |
|
|
||||||
| is_pattern | tinyint(1) DEFAULT 0 | |
|
|
||||||
| ignored_at | datetime(3) | |
|
|
||||||
| ignored_by | varchar(100) | |
|
|
||||||
| created_at | datetime(3) | |
|
|
||||||
| updated_at | datetime(3) | |
|
|
||||||
|
|
||||||
### stock_ignore_rules
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| target | varchar(20) NOT NULL | UNIQUE with match_type+pattern |
|
|
||||||
| match_type | varchar(20) NOT NULL | |
|
|
||||||
| pattern | varchar(500) NOT NULL | |
|
|
||||||
| created_at | timestamp | |
|
|
||||||
|
|
||||||
### stock_log
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| stock_log_id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| partnumber | varchar(255) NOT NULL | INDEX with date |
|
|
||||||
| supplier | varchar(255) | |
|
|
||||||
| date | date NOT NULL | |
|
|
||||||
| price | decimal(12,2) NOT NULL | |
|
|
||||||
| quality | varchar(255) | |
|
|
||||||
| comments | text | |
|
|
||||||
| vendor | varchar(255) | INDEX |
|
|
||||||
| qty | decimal(14,3) | |
|
|
||||||
|
|
||||||
### partnumber_log_competitors
|
|
||||||
| Column | Type | Notes |
|
|
||||||
|--------|------|-------|
|
|
||||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
|
||||||
| competitor_id | bigint UNSIGNED NOT NULL | FK → qt_competitors.id |
|
|
||||||
| partnumber | varchar(255) NOT NULL | |
|
|
||||||
| description | varchar(500) | |
|
|
||||||
| vendor | varchar(255) | |
|
|
||||||
| price | decimal(12,2) NOT NULL | |
|
|
||||||
| price_loccur | decimal(12,2) | local currency price |
|
|
||||||
| currency | varchar(10) | |
|
|
||||||
| qty | decimal(12,4) DEFAULT 1 | |
|
|
||||||
| date | date NOT NULL | |
|
|
||||||
| created_at | timestamp | |
|
|
||||||
|
|
||||||
### Legacy tables (lot / lot_log / machine / machine_log / supplier)
|
|
||||||
|
|
||||||
Retained for historical data only. Not queried by QuoteForge.
|
|
||||||
|
|
||||||
**lot**: lot_name (PK, char 255), lot_category, lot_description
|
|
||||||
**lot_log**: lot_log_id AUTO_INCREMENT, lot (FK→lot), supplier (FK→supplier), date, price double, quality, comments
|
|
||||||
**supplier**: supplier_name (PK, char 255), supplier_comment
|
|
||||||
**machine**: machine_name (PK, char 255), machine_description
|
|
||||||
**machine_log**: machine_log_id AUTO_INCREMENT, date, supplier (FK→supplier), country, opty, type, machine (FK→machine), customer_requirement, variant, price_gpl, price_estimate, qty, quality, carepack, lead_time_weeks, prepayment_percent, price_got, Comment
|
|
||||||
|
|
||||||
## MariaDB User Permissions
|
|
||||||
|
|
||||||
The application user needs read-only access to reference tables and read/write access to runtime tables.
|
|
||||||
|
|
||||||
```sql
|
|
||||||
-- Read-only: reference and pricing data
|
|
||||||
GRANT SELECT ON RFQ_LOG.qt_categories TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.qt_lot_metadata TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.qt_pricelists TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.qt_pricelist_items TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.stock_log TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.stock_ignore_rules TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.qt_partnumber_books TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.qt_partnumber_book_items TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT ON RFQ_LOG.lot TO 'qfs_user'@'%';
|
|
||||||
|
|
||||||
-- Read/write: runtime sync and user data
|
|
||||||
GRANT SELECT, INSERT, UPDATE, DELETE ON RFQ_LOG.qt_projects TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT, INSERT, UPDATE, DELETE ON RFQ_LOG.qt_configurations TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_client_schema_state TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_pricelist_sync_status TO 'qfs_user'@'%';
|
|
||||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_vendor_partnumber_seen TO 'qfs_user'@'%';
|
|
||||||
|
|
||||||
FLUSH PRIVILEGES;
|
|
||||||
```
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- `qt_client_schema_state` requires INSERT + UPDATE for sync status tracking (uses `ON DUPLICATE KEY UPDATE`);
|
|
||||||
- `qt_vendor_partnumber_seen` requires INSERT + UPDATE (vendor PN discovery during sync);
|
|
||||||
- no DELETE is needed on sync/tracking tables — rows are never removed by the client;
|
|
||||||
- `lot` SELECT is required for the connection validation probe in `/setup`;
|
|
||||||
- the setup page shows `can_write: true` only when `qt_client_schema_state` INSERT succeeds.
|
|
||||||
|
|
||||||
## Migrations
|
|
||||||
|
|
||||||
SQLite:
|
|
||||||
- schema creation and additive changes go through GORM `AutoMigrate`;
|
|
||||||
- data fixes, index repair, and one-off rewrites go through `runLocalMigrations`;
|
|
||||||
- local migration state is tracked in `local_schema_migrations`.
|
|
||||||
|
|
||||||
MariaDB:
|
|
||||||
- SQL files live in `migrations/`;
|
|
||||||
- they are applied by `go run ./cmd/qfs -migrate`.
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
# 04 - API
|
|
||||||
|
|
||||||
## Public web routes
|
|
||||||
|
|
||||||
| Route | Purpose |
|
|
||||||
| --- | --- |
|
|
||||||
| `/` | configurator |
|
|
||||||
| `/configs` | configuration list |
|
|
||||||
| `/configs/:uuid/revisions` | revision history page |
|
|
||||||
| `/projects` | project list |
|
|
||||||
| `/projects/:uuid` | project detail |
|
|
||||||
| `/pricelists` | pricelist list |
|
|
||||||
| `/pricelists/:id` | pricelist detail |
|
|
||||||
| `/partnumber-books` | partnumber book page |
|
|
||||||
| `/setup` | DB setup page |
|
|
||||||
|
|
||||||
## Setup and health
|
|
||||||
|
|
||||||
| Method | Path | Purpose |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `GET` | `/health` | process health |
|
|
||||||
| `GET` | `/setup` | setup page |
|
|
||||||
| `POST` | `/setup` | save tested DB settings |
|
|
||||||
| `POST` | `/setup/test` | test DB connection |
|
|
||||||
| `GET` | `/setup/status` | setup status |
|
|
||||||
| `GET` | `/api/db-status` | current DB/sync status |
|
|
||||||
| `GET` | `/api/current-user` | local user identity |
|
|
||||||
| `GET` | `/api/ping` | lightweight API ping |
|
|
||||||
|
|
||||||
`POST /api/restart` exists only in `debug` mode.
|
|
||||||
|
|
||||||
## Reference data
|
|
||||||
|
|
||||||
| Method | Path | Purpose |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `GET` | `/api/components` | list component metadata |
|
|
||||||
| `GET` | `/api/components/:lot_name` | one component |
|
|
||||||
| `GET` | `/api/categories` | list categories |
|
|
||||||
| `GET` | `/api/pricelists` | list local pricelists |
|
|
||||||
| `GET` | `/api/pricelists/latest` | latest pricelist by source |
|
|
||||||
| `GET` | `/api/pricelists/:id` | pricelist header |
|
|
||||||
| `GET` | `/api/pricelists/:id/items` | pricelist rows |
|
|
||||||
| `GET` | `/api/pricelists/:id/lots` | lot names in a pricelist |
|
|
||||||
| `GET` | `/api/partnumber-books` | local partnumber books |
|
|
||||||
| `GET` | `/api/partnumber-books/:id` | book items by `server_id` |
|
|
||||||
|
|
||||||
## Quote and export
|
|
||||||
|
|
||||||
| Method | Path | Purpose |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `POST` | `/api/quote/validate` | validate config items |
|
|
||||||
| `POST` | `/api/quote/calculate` | calculate quote totals |
|
|
||||||
| `POST` | `/api/quote/price-levels` | resolve estimate/warehouse/competitor prices |
|
|
||||||
| `POST` | `/api/export/csv` | export a single configuration |
|
|
||||||
| `GET` | `/api/configs/:uuid/export` | export a stored configuration |
|
|
||||||
| `GET` | `/api/projects/:uuid/export` | legacy project BOM export |
|
|
||||||
| `POST` | `/api/projects/:uuid/export` | pricing-tab project export |
|
|
||||||
|
|
||||||
## Configurations
|
|
||||||
|
|
||||||
| Method | Path | Purpose |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `GET` | `/api/configs` | list configurations |
|
|
||||||
| `POST` | `/api/configs/import` | import configurations from server |
|
|
||||||
| `POST` | `/api/configs` | create configuration |
|
|
||||||
| `POST` | `/api/configs/preview-article` | preview article |
|
|
||||||
| `GET` | `/api/configs/:uuid` | get configuration |
|
|
||||||
| `PUT` | `/api/configs/:uuid` | update configuration |
|
|
||||||
| `DELETE` | `/api/configs/:uuid` | archive configuration |
|
|
||||||
| `POST` | `/api/configs/:uuid/reactivate` | reactivate configuration |
|
|
||||||
| `PATCH` | `/api/configs/:uuid/rename` | rename configuration |
|
|
||||||
| `POST` | `/api/configs/:uuid/clone` | clone configuration |
|
|
||||||
| `POST` | `/api/configs/:uuid/refresh-prices` | refresh prices |
|
|
||||||
| `PATCH` | `/api/configs/:uuid/project` | move configuration to project |
|
|
||||||
| `GET` | `/api/configs/:uuid/versions` | list revisions |
|
|
||||||
| `GET` | `/api/configs/:uuid/versions/:version` | get one revision |
|
|
||||||
| `POST` | `/api/configs/:uuid/rollback` | rollback by creating a new head revision |
|
|
||||||
| `PATCH` | `/api/configs/:uuid/server-count` | update server count |
|
|
||||||
| `GET` | `/api/configs/:uuid/vendor-spec` | read vendor BOM |
|
|
||||||
| `PUT` | `/api/configs/:uuid/vendor-spec` | replace vendor BOM |
|
|
||||||
| `POST` | `/api/configs/:uuid/vendor-spec/resolve` | resolve PN -> LOT |
|
|
||||||
| `POST` | `/api/configs/:uuid/vendor-spec/apply` | apply BOM to cart |
|
|
||||||
|
|
||||||
## Projects
|
|
||||||
|
|
||||||
| Method | Path | Purpose |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `GET` | `/api/projects` | paginated project list |
|
|
||||||
| `GET` | `/api/projects/all` | lightweight list for dropdowns |
|
|
||||||
| `POST` | `/api/projects` | create project |
|
|
||||||
| `GET` | `/api/projects/:uuid` | get project |
|
|
||||||
| `PUT` | `/api/projects/:uuid` | update project |
|
|
||||||
| `POST` | `/api/projects/:uuid/archive` | archive project |
|
|
||||||
| `POST` | `/api/projects/:uuid/reactivate` | reactivate project |
|
|
||||||
| `DELETE` | `/api/projects/:uuid` | delete project variant only |
|
|
||||||
| `GET` | `/api/projects/:uuid/configs` | list project configurations |
|
|
||||||
| `PATCH` | `/api/projects/:uuid/configs/reorder` | persist line order |
|
|
||||||
| `POST` | `/api/projects/:uuid/configs` | create configuration inside project |
|
|
||||||
| `POST` | `/api/projects/:uuid/configs/:config_uuid/clone` | clone config into project |
|
|
||||||
| `POST` | `/api/projects/:uuid/vendor-import` | import CFXML workspace into project |
|
|
||||||
|
|
||||||
Vendor import contract:
|
|
||||||
- multipart field name is `file`;
|
|
||||||
- file limit is `1 GiB`;
|
|
||||||
- oversized payloads are rejected before XML parsing.
|
|
||||||
|
|
||||||
## Sync
|
|
||||||
|
|
||||||
| Method | Path | Purpose |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `GET` | `/api/sync/status` | sync status |
|
|
||||||
| `GET` | `/api/sync/readiness` | sync readiness |
|
|
||||||
| `GET` | `/api/sync/info` | sync modal data |
|
|
||||||
| `GET` | `/api/sync/users-status` | remote user status |
|
|
||||||
| `GET` | `/api/sync/pending/count` | pending queue count |
|
|
||||||
| `GET` | `/api/sync/pending` | pending queue rows |
|
|
||||||
| `POST` | `/api/sync/components` | pull components |
|
|
||||||
| `POST` | `/api/sync/pricelists` | pull pricelists |
|
|
||||||
| `POST` | `/api/sync/partnumber-books` | pull partnumber books |
|
|
||||||
| `POST` | `/api/sync/partnumber-seen` | report unresolved vendor PN |
|
|
||||||
| `POST` | `/api/sync/all` | push and pull full sync |
|
|
||||||
| `POST` | `/api/sync/push` | push pending changes |
|
|
||||||
| `POST` | `/api/sync/repair` | repair broken pending rows |
|
|
||||||
|
|
||||||
When readiness is blocked, sync write endpoints return `423 Locked`.
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
# 05 - Config
|
|
||||||
|
|
||||||
## Runtime files
|
|
||||||
|
|
||||||
| Artifact | Default location |
|
|
||||||
| --- | --- |
|
|
||||||
| `qfs.db` | OS-specific user state directory |
|
|
||||||
| `config.yaml` | same state directory as `qfs.db` |
|
|
||||||
| `local_encryption.key` | same state directory as `qfs.db` |
|
|
||||||
| `backups/` | next to `qfs.db` unless overridden |
|
|
||||||
|
|
||||||
The runtime state directory can be overridden with `QFS_STATE_DIR`.
|
|
||||||
Direct paths can be overridden with `QFS_DB_PATH` and `QFS_CONFIG_PATH`.
|
|
||||||
|
|
||||||
## Runtime config shape
|
|
||||||
|
|
||||||
Runtime keeps `config.yaml` intentionally small:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
server:
|
|
||||||
host: "127.0.0.1"
|
|
||||||
port: 8080
|
|
||||||
mode: "release"
|
|
||||||
read_timeout: 30s
|
|
||||||
write_timeout: 30s
|
|
||||||
|
|
||||||
backup:
|
|
||||||
time: "00:00"
|
|
||||||
|
|
||||||
logging:
|
|
||||||
level: "info"
|
|
||||||
format: "json"
|
|
||||||
output: "stdout"
|
|
||||||
```
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- QuoteForge creates this file automatically if it does not exist;
|
|
||||||
- startup rewrites legacy config files into this minimal runtime shape;
|
|
||||||
- startup normalizes any `server.host` value to `127.0.0.1` before saving the runtime config;
|
|
||||||
- `server.host` must stay on loopback.
|
|
||||||
|
|
||||||
Saved MariaDB credentials do not live in `config.yaml`.
|
|
||||||
They are stored in SQLite and encrypted with `local_encryption.key` unless `QUOTEFORGE_ENCRYPTION_KEY` overrides the key material.
|
|
||||||
|
|
||||||
## Environment variables
|
|
||||||
|
|
||||||
| Variable | Purpose |
|
|
||||||
| --- | --- |
|
|
||||||
| `QFS_STATE_DIR` | override runtime state directory |
|
|
||||||
| `QFS_DB_PATH` | explicit SQLite path |
|
|
||||||
| `QFS_CONFIG_PATH` | explicit config path |
|
|
||||||
| `QFS_BACKUP_DIR` | explicit backup root |
|
|
||||||
| `QFS_BACKUP_DISABLE` | disable rotating backups |
|
|
||||||
| `QUOTEFORGE_ENCRYPTION_KEY` | override encryption key |
|
|
||||||
| `QF_SERVER_PORT` | override HTTP port |
|
|
||||||
|
|
||||||
`QFS_BACKUP_DISABLE` accepts `1`, `true`, or `yes`.
|
|
||||||
|
|
||||||
## CLI flags
|
|
||||||
|
|
||||||
| Flag | Purpose |
|
|
||||||
| --- | --- |
|
|
||||||
| `-config <path>` | config file path |
|
|
||||||
| `-localdb <path>` | SQLite path |
|
|
||||||
| `-reset-localdb` | destructive local DB reset |
|
|
||||||
| `-migrate` | apply server migrations and exit |
|
|
||||||
| `-version` | print app version and exit |
|
|
||||||
|
|
||||||
## First run
|
|
||||||
|
|
||||||
1. runtime ensures `config.yaml` exists;
|
|
||||||
2. runtime opens the local SQLite database;
|
|
||||||
3. if no stored MariaDB credentials exist, `/setup` is served;
|
|
||||||
4. after setup, runtime works locally and sync uses saved DB settings in the background.
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
# 06 - Backup
|
|
||||||
|
|
||||||
## Scope
|
|
||||||
|
|
||||||
QuoteForge creates rotating local ZIP backups of:
|
|
||||||
- a consistent SQLite snapshot saved as `qfs.db`;
|
|
||||||
- `config.yaml` when present.
|
|
||||||
|
|
||||||
The backup intentionally does not include `local_encryption.key`.
|
|
||||||
|
|
||||||
## Location and naming
|
|
||||||
|
|
||||||
Default root:
|
|
||||||
- `<db dir>/backups`
|
|
||||||
|
|
||||||
Subdirectories:
|
|
||||||
- `daily/`
|
|
||||||
- `weekly/`
|
|
||||||
- `monthly/`
|
|
||||||
- `yearly/`
|
|
||||||
|
|
||||||
Archive name:
|
|
||||||
- `qfs-backp-YYYY-MM-DD.zip`
|
|
||||||
|
|
||||||
## Retention
|
|
||||||
|
|
||||||
| Period | Keep |
|
|
||||||
| --- | --- |
|
|
||||||
| Daily | 7 |
|
|
||||||
| Weekly | 4 |
|
|
||||||
| Monthly | 12 |
|
|
||||||
| Yearly | 10 |
|
|
||||||
|
|
||||||
## Behavior
|
|
||||||
|
|
||||||
- on startup, QuoteForge creates a backup if the current period has none yet;
|
|
||||||
- a daily scheduler creates the next backup at `backup.time`;
|
|
||||||
- duplicate snapshots inside the same period are prevented by a period marker file;
|
|
||||||
- old archives are pruned automatically.
|
|
||||||
|
|
||||||
## Safety rules
|
|
||||||
|
|
||||||
- backup root must be outside the git worktree;
|
|
||||||
- backup creation is blocked if the resolved backup root sits inside the repository;
|
|
||||||
- SQLite snapshot must be created from a consistent database copy, not by copying live WAL files directly;
|
|
||||||
- restore to another machine requires re-entering DB credentials unless the encryption key is migrated separately.
|
|
||||||
|
|
||||||
## Restore
|
|
||||||
|
|
||||||
1. stop QuoteForge;
|
|
||||||
2. unpack the chosen archive outside the repository;
|
|
||||||
3. replace `qfs.db`;
|
|
||||||
4. replace `config.yaml` if needed;
|
|
||||||
5. restart the app;
|
|
||||||
6. re-enter MariaDB credentials if the original encryption key is unavailable.
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
# 07 - Development
|
|
||||||
|
|
||||||
## Common commands
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go run ./cmd/qfs
|
|
||||||
go run ./cmd/qfs -migrate
|
|
||||||
go run ./cmd/migrate_project_updated_at
|
|
||||||
go test ./...
|
|
||||||
go vet ./...
|
|
||||||
make build-release
|
|
||||||
make install-hooks
|
|
||||||
```
|
|
||||||
|
|
||||||
## Guardrails
|
|
||||||
|
|
||||||
- run `gofmt` before commit;
|
|
||||||
- use `slog` for server logging;
|
|
||||||
- keep runtime business logic SQLite-only;
|
|
||||||
- limit MariaDB access to sync, setup, and migration tooling;
|
|
||||||
- keep `config.yaml` out of git and use `config.example.yaml` only as a template;
|
|
||||||
- update `bible-local/` in the same commit as architecture changes.
|
|
||||||
|
|
||||||
## Removed features that must not return
|
|
||||||
|
|
||||||
- admin pricing UI/API;
|
|
||||||
- alerts and notification workflows;
|
|
||||||
- stock import tooling;
|
|
||||||
- cron jobs;
|
|
||||||
- standalone importer utility.
|
|
||||||
|
|
||||||
## Release notes
|
|
||||||
|
|
||||||
Release history belongs under `releases/<version>/RELEASE_NOTES.md`.
|
|
||||||
Do not keep temporary change summaries in the repository root.
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
# 09 - Vendor BOM
|
|
||||||
|
|
||||||
## Storage contract
|
|
||||||
|
|
||||||
Vendor BOM is stored in `local_configurations.vendor_spec` and synced with `qt_configurations.vendor_spec`.
|
|
||||||
|
|
||||||
Each row uses this canonical shape:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"sort_order": 10,
|
|
||||||
"vendor_partnumber": "ABC-123",
|
|
||||||
"quantity": 2,
|
|
||||||
"description": "row description",
|
|
||||||
"unit_price": 4500.0,
|
|
||||||
"total_price": 9000.0,
|
|
||||||
"lot_mappings": [
|
|
||||||
{ "lot_name": "LOT_A", "quantity_per_pn": 1 }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- `lot_mappings[]` is the only persisted PN -> LOT mapping contract;
|
|
||||||
- QuoteForge does not use legacy BOM tables;
|
|
||||||
- apply flow rebuilds cart rows from `lot_mappings[]`.
|
|
||||||
|
|
||||||
## Partnumber books
|
|
||||||
|
|
||||||
Partnumber books are pull-only snapshots from PriceForge.
|
|
||||||
|
|
||||||
Local tables:
|
|
||||||
- `local_partnumber_books`
|
|
||||||
- `local_partnumber_book_items`
|
|
||||||
|
|
||||||
Server tables:
|
|
||||||
- `qt_partnumber_books`
|
|
||||||
- `qt_partnumber_book_items`
|
|
||||||
|
|
||||||
Resolution flow:
|
|
||||||
1. load the active local book;
|
|
||||||
2. find `vendor_partnumber`;
|
|
||||||
3. copy `lots_json` into `lot_mappings[]`;
|
|
||||||
4. keep unresolved rows editable in the UI.
|
|
||||||
|
|
||||||
## CFXML import
|
|
||||||
|
|
||||||
`POST /api/projects/:uuid/vendor-import` imports one vendor workspace into an existing project.
|
|
||||||
|
|
||||||
Rules:
|
|
||||||
- accepted file field is `file`;
|
|
||||||
- maximum file size is `1 GiB`;
|
|
||||||
- one `ProprietaryGroupIdentifier` becomes one QuoteForge configuration;
|
|
||||||
- software rows stay inside their hardware group and never become standalone configurations;
|
|
||||||
- primary group row is selected structurally, without vendor-specific SKU hardcoding;
|
|
||||||
- imported configuration order follows workspace order.
|
|
||||||
|
|
||||||
Imported configuration fields:
|
|
||||||
- `name` from primary row `ProductName`
|
|
||||||
- `server_count` from primary row `Quantity`
|
|
||||||
- `server_model` from primary row `ProductDescription`
|
|
||||||
- `article` or `support_code` from `ProprietaryProductIdentifier`
|
|
||||||
|
|
||||||
Imported BOM rows become `vendor_spec` rows and are resolved through the active local partnumber book when possible.
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
# QuoteForge Bible
|
|
||||||
|
|
||||||
Project-specific architecture and operational contracts.
|
|
||||||
|
|
||||||
## Files
|
|
||||||
|
|
||||||
| File | Scope |
|
|
||||||
| --- | --- |
|
|
||||||
| [01-overview.md](01-overview.md) | Product scope, runtime model, repository map |
|
|
||||||
| [02-architecture.md](02-architecture.md) | Local-first rules, sync, pricing, versioning |
|
|
||||||
| [03-database.md](03-database.md) | SQLite and MariaDB data model, permissions, migrations |
|
|
||||||
| [04-api.md](04-api.md) | HTTP routes and API contract |
|
|
||||||
| [05-config.md](05-config.md) | Runtime config, paths, env vars, startup behavior |
|
|
||||||
| [06-backup.md](06-backup.md) | Backup contract and restore workflow |
|
|
||||||
| [07-dev.md](07-dev.md) | Development commands and guardrails |
|
|
||||||
| [09-vendor-spec.md](09-vendor-spec.md) | Vendor BOM and CFXML import contract |
|
|
||||||
|
|
||||||
## Rules
|
|
||||||
|
|
||||||
- `bible-local/` is the source of truth for QuoteForge-specific behavior.
|
|
||||||
- Keep these files in English.
|
|
||||||
- Update the matching file in the same commit as any architectural change.
|
|
||||||
- Remove stale documentation instead of preserving history in place.
|
|
||||||
|
|
||||||
## Quick reference
|
|
||||||
|
|
||||||
- Local DB path: see [05-config.md](05-config.md)
|
|
||||||
- Runtime bind: loopback only
|
|
||||||
- Local backups: see [06-backup.md](06-backup.md)
|
|
||||||
- Release notes: `releases/<version>/RELEASE_NOTES.md`
|
|
||||||
84
cmd/cron/main.go
Normal file
84
cmd/cron/main.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services/alerts"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services/pricing"
|
||||||
|
"gorm.io/driver/mysql"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"gorm.io/gorm/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||||
|
cronJob := flag.String("job", "", "type of cron job to run (alerts, update-prices)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
cfg, err := config.Load(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to connect to database: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure tables exist
|
||||||
|
if err := models.Migrate(db); err != nil {
|
||||||
|
log.Fatalf("Migration failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize repositories
|
||||||
|
statsRepo := repository.NewStatsRepository(db)
|
||||||
|
alertRepo := repository.NewAlertRepository(db)
|
||||||
|
componentRepo := repository.NewComponentRepository(db)
|
||||||
|
priceRepo := repository.NewPriceRepository(db)
|
||||||
|
|
||||||
|
// Initialize services
|
||||||
|
alertService := alerts.NewService(alertRepo, componentRepo, priceRepo, statsRepo, cfg.Alerts, cfg.Pricing)
|
||||||
|
pricingService := pricing.NewService(componentRepo, priceRepo, cfg.Pricing)
|
||||||
|
|
||||||
|
switch *cronJob {
|
||||||
|
case "alerts":
|
||||||
|
log.Println("Running alerts check...")
|
||||||
|
if err := alertService.CheckAndGenerateAlerts(); err != nil {
|
||||||
|
log.Printf("Error running alerts check: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Println("Alerts check completed successfully")
|
||||||
|
}
|
||||||
|
case "update-prices":
|
||||||
|
log.Println("Recalculating all prices...")
|
||||||
|
updated, errors := pricingService.RecalculateAllPrices()
|
||||||
|
log.Printf("Prices recalculated: %d updated, %d errors", updated, errors)
|
||||||
|
case "reset-counters":
|
||||||
|
log.Println("Resetting usage counters...")
|
||||||
|
if err := statsRepo.ResetWeeklyCounters(); err != nil {
|
||||||
|
log.Printf("Error resetting weekly counters: %v", err)
|
||||||
|
}
|
||||||
|
if err := statsRepo.ResetMonthlyCounters(); err != nil {
|
||||||
|
log.Printf("Error resetting monthly counters: %v", err)
|
||||||
|
}
|
||||||
|
log.Println("Usage counters reset completed")
|
||||||
|
case "update-popularity":
|
||||||
|
log.Println("Updating popularity scores...")
|
||||||
|
if err := statsRepo.UpdatePopularityScores(); err != nil {
|
||||||
|
log.Printf("Error updating popularity scores: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Println("Popularity scores updated successfully")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.Println("No valid cron job specified. Available jobs:")
|
||||||
|
log.Println(" - alerts: Check and generate alerts")
|
||||||
|
log.Println(" - update-prices: Recalculate all prices")
|
||||||
|
log.Println(" - reset-counters: Reset usage counters")
|
||||||
|
log.Println(" - update-popularity: Update popularity scores")
|
||||||
|
}
|
||||||
|
}
|
||||||
160
cmd/importer/main.go
Normal file
160
cmd/importer/main.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"gorm.io/driver/mysql"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"gorm.io/gorm/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
cfg, err := config.Load(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to connect to database: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Connected to database")
|
||||||
|
|
||||||
|
// Ensure tables exist
|
||||||
|
if err := models.Migrate(db); err != nil {
|
||||||
|
log.Fatalf("Migration failed: %v", err)
|
||||||
|
}
|
||||||
|
if err := models.SeedCategories(db); err != nil {
|
||||||
|
log.Fatalf("Seeding categories failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load categories for lookup
|
||||||
|
var categories []models.Category
|
||||||
|
db.Find(&categories)
|
||||||
|
categoryMap := make(map[string]uint)
|
||||||
|
for _, c := range categories {
|
||||||
|
categoryMap[c.Code] = c.ID
|
||||||
|
}
|
||||||
|
log.Printf("Loaded %d categories", len(categories))
|
||||||
|
|
||||||
|
// Get all lots
|
||||||
|
var lots []models.Lot
|
||||||
|
if err := db.Find(&lots).Error; err != nil {
|
||||||
|
log.Fatalf("Failed to load lots: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("Found %d lots to import", len(lots))
|
||||||
|
|
||||||
|
// Import each lot
|
||||||
|
var imported, skipped, updated int
|
||||||
|
for _, lot := range lots {
|
||||||
|
category, model := ParsePartNumber(lot.LotName)
|
||||||
|
|
||||||
|
var categoryID *uint
|
||||||
|
if id, ok := categoryMap[category]; ok && id > 0 {
|
||||||
|
categoryID = &id
|
||||||
|
} else {
|
||||||
|
// Try to find by prefix match
|
||||||
|
for code, id := range categoryMap {
|
||||||
|
if strings.HasPrefix(category, code) {
|
||||||
|
categoryID = &id
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if already exists
|
||||||
|
var existing models.LotMetadata
|
||||||
|
result := db.Where("lot_name = ?", lot.LotName).First(&existing)
|
||||||
|
|
||||||
|
if result.Error == gorm.ErrRecordNotFound {
|
||||||
|
// Check if there are prices in the last 90 days
|
||||||
|
var recentPriceCount int64
|
||||||
|
db.Model(&models.LotLog{}).
|
||||||
|
Where("lot = ? AND date >= DATE_SUB(NOW(), INTERVAL 90 DAY)", lot.LotName).
|
||||||
|
Count(&recentPriceCount)
|
||||||
|
|
||||||
|
// Default to 90 days, but use "all time" (0) if no recent prices
|
||||||
|
periodDays := 90
|
||||||
|
if recentPriceCount == 0 {
|
||||||
|
periodDays = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new
|
||||||
|
metadata := models.LotMetadata{
|
||||||
|
LotName: lot.LotName,
|
||||||
|
CategoryID: categoryID,
|
||||||
|
Model: model,
|
||||||
|
PricePeriodDays: periodDays,
|
||||||
|
}
|
||||||
|
if err := db.Create(&metadata).Error; err != nil {
|
||||||
|
log.Printf("Failed to create metadata for %s: %v", lot.LotName, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
imported++
|
||||||
|
} else if result.Error == nil {
|
||||||
|
// Update if needed
|
||||||
|
needsUpdate := false
|
||||||
|
|
||||||
|
if existing.Model == "" {
|
||||||
|
existing.Model = model
|
||||||
|
needsUpdate = true
|
||||||
|
}
|
||||||
|
if existing.CategoryID == nil {
|
||||||
|
existing.CategoryID = categoryID
|
||||||
|
needsUpdate = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if using default period (90 days) but no recent prices
|
||||||
|
if existing.PricePeriodDays == 90 {
|
||||||
|
var recentPriceCount int64
|
||||||
|
db.Model(&models.LotLog{}).
|
||||||
|
Where("lot = ? AND date >= DATE_SUB(NOW(), INTERVAL 90 DAY)", lot.LotName).
|
||||||
|
Count(&recentPriceCount)
|
||||||
|
|
||||||
|
if recentPriceCount == 0 {
|
||||||
|
existing.PricePeriodDays = 0
|
||||||
|
needsUpdate = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if needsUpdate {
|
||||||
|
db.Save(&existing)
|
||||||
|
updated++
|
||||||
|
} else {
|
||||||
|
skipped++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Import complete: %d imported, %d updated, %d skipped", imported, updated, skipped)
|
||||||
|
|
||||||
|
// Show final counts
|
||||||
|
var metadataCount int64
|
||||||
|
db.Model(&models.LotMetadata{}).Count(&metadataCount)
|
||||||
|
log.Printf("Total metadata records: %d", metadataCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePartNumber extracts category and model from lot_name
|
||||||
|
// Examples:
|
||||||
|
// "CPU_AMD_9654" → category="CPU", model="AMD_9654"
|
||||||
|
// "MB_INTEL_4.Sapphire_2S" → category="MB", model="INTEL_4.Sapphire_2S"
|
||||||
|
func ParsePartNumber(lotName string) (category, model string) {
|
||||||
|
parts := strings.SplitN(lotName, "_", 2)
|
||||||
|
if len(parts) >= 1 {
|
||||||
|
category = parts[0]
|
||||||
|
}
|
||||||
|
if len(parts) >= 2 {
|
||||||
|
model = parts[1]
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
"gorm.io/driver/mysql"
|
"gorm.io/driver/mysql"
|
||||||
@@ -15,6 +16,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||||
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to resolve default local SQLite path: %v", err)
|
log.Fatalf("Failed to resolve default local SQLite path: %v", err)
|
||||||
@@ -26,6 +28,22 @@ func main() {
|
|||||||
log.Println("QuoteForge Configuration Migration Tool")
|
log.Println("QuoteForge Configuration Migration Tool")
|
||||||
log.Println("========================================")
|
log.Println("========================================")
|
||||||
|
|
||||||
|
// Load config for MariaDB connection
|
||||||
|
cfg, err := config.Load(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to MariaDB
|
||||||
|
log.Printf("Connecting to MariaDB at %s:%d...", cfg.Database.Host, cfg.Database.Port)
|
||||||
|
mariaDB, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to connect to MariaDB: %v", err)
|
||||||
|
}
|
||||||
|
log.Println("Connected to MariaDB")
|
||||||
|
|
||||||
// Initialize local SQLite
|
// Initialize local SQLite
|
||||||
log.Printf("Opening local SQLite at %s...", *localDBPath)
|
log.Printf("Opening local SQLite at %s...", *localDBPath)
|
||||||
local, err := localdb.New(*localDBPath)
|
local, err := localdb.New(*localDBPath)
|
||||||
@@ -33,28 +51,6 @@ func main() {
|
|||||||
log.Fatalf("Failed to initialize local database: %v", err)
|
log.Fatalf("Failed to initialize local database: %v", err)
|
||||||
}
|
}
|
||||||
log.Println("Local SQLite initialized")
|
log.Println("Local SQLite initialized")
|
||||||
if !local.HasSettings() {
|
|
||||||
log.Fatalf("SQLite connection settings are not configured. Run qfs setup first.")
|
|
||||||
}
|
|
||||||
|
|
||||||
settings, err := local.GetSettings()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to load SQLite connection settings: %v", err)
|
|
||||||
}
|
|
||||||
dsn, err := local.GetDSN()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to build DSN from SQLite settings: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect to MariaDB
|
|
||||||
log.Printf("Connecting to MariaDB at %s:%d...", settings.Host, settings.Port)
|
|
||||||
mariaDB, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to connect to MariaDB: %v", err)
|
|
||||||
}
|
|
||||||
log.Println("Connected to MariaDB")
|
|
||||||
|
|
||||||
// Count configurations in MariaDB
|
// Count configurations in MariaDB
|
||||||
var serverCount int64
|
var serverCount int64
|
||||||
@@ -153,7 +149,23 @@ func main() {
|
|||||||
log.Printf(" Skipped: %d", skipped)
|
log.Printf(" Skipped: %d", skipped)
|
||||||
log.Printf(" Errors: %d", errors)
|
log.Printf(" Errors: %d", errors)
|
||||||
|
|
||||||
fmt.Println("\nDone! You can now run the server with: go run ./cmd/qfs")
|
// Save connection settings to local SQLite if not exists
|
||||||
|
if !local.HasSettings() {
|
||||||
|
log.Println("\nSaving connection settings to local SQLite...")
|
||||||
|
if err := local.SaveSettings(
|
||||||
|
cfg.Database.Host,
|
||||||
|
cfg.Database.Port,
|
||||||
|
cfg.Database.Name,
|
||||||
|
cfg.Database.User,
|
||||||
|
cfg.Database.Password,
|
||||||
|
); err != nil {
|
||||||
|
log.Printf("Warning: Failed to save settings: %v", err)
|
||||||
|
} else {
|
||||||
|
log.Println("Connection settings saved")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\nDone! You can now run the server with: go run ./cmd/server")
|
||||||
}
|
}
|
||||||
|
|
||||||
func derefUint(v *uint) uint {
|
func derefUint(v *uint) uint {
|
||||||
|
|||||||
@@ -10,8 +10,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"gorm.io/driver/mysql"
|
"gorm.io/driver/mysql"
|
||||||
@@ -39,29 +38,17 @@ type migrationAction struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to resolve default local SQLite path: %v", err)
|
|
||||||
}
|
|
||||||
localDBPath := flag.String("localdb", defaultLocalDBPath, "path to local SQLite database (default: user state dir or QFS_DB_PATH)")
|
|
||||||
apply := flag.Bool("apply", false, "apply migration (default is preview only)")
|
apply := flag.Bool("apply", false, "apply migration (default is preview only)")
|
||||||
yes := flag.Bool("yes", false, "skip interactive confirmation (works only with -apply)")
|
yes := flag.Bool("yes", false, "skip interactive confirmation (works only with -apply)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
local, err := localdb.New(*localDBPath)
|
cfg, err := config.Load(*configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to initialize local database: %v", err)
|
log.Fatalf("failed to load config: %v", err)
|
||||||
}
|
}
|
||||||
if !local.HasSettings() {
|
|
||||||
log.Fatalf("SQLite connection settings are not configured. Run qfs setup first.")
|
|
||||||
}
|
|
||||||
dsn, err := local.GetDSN()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to build DSN from SQLite settings: %v", err)
|
|
||||||
}
|
|
||||||
dbUser := strings.TrimSpace(local.GetDBUser())
|
|
||||||
|
|
||||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
db, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -72,7 +59,7 @@ func main() {
|
|||||||
log.Fatalf("precheck failed: %v", err)
|
log.Fatalf("precheck failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
actions, existingProjects, err := buildPlan(db, dbUser)
|
actions, existingProjects, err := buildPlan(db, cfg.Database.User)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to build migration plan: %v", err)
|
log.Fatalf("failed to build migration plan: %v", err)
|
||||||
}
|
}
|
||||||
@@ -163,7 +150,7 @@ func buildPlan(db *gorm.DB, fallbackOwner string) ([]migrationAction, map[string
|
|||||||
}
|
}
|
||||||
for i := range projects {
|
for i := range projects {
|
||||||
p := projects[i]
|
p := projects[i]
|
||||||
existingProjects[projectKey(p.OwnerUsername, derefString(p.Name))] = &p
|
existingProjects[projectKey(p.OwnerUsername, p.Name)] = &p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,13 +240,12 @@ func executePlan(db *gorm.DB, actions []migrationAction, existingProjects map[st
|
|||||||
|
|
||||||
for _, action := range actions {
|
for _, action := range actions {
|
||||||
key := projectKey(action.OwnerUsername, action.TargetProjectName)
|
key := projectKey(action.OwnerUsername, action.TargetProjectName)
|
||||||
project := projectCache[key]
|
project := projectCache[key]
|
||||||
if project == nil {
|
if project == nil {
|
||||||
project = &models.Project{
|
project = &models.Project{
|
||||||
UUID: uuid.NewString(),
|
UUID: uuid.NewString(),
|
||||||
OwnerUsername: action.OwnerUsername,
|
OwnerUsername: action.OwnerUsername,
|
||||||
Code: action.TargetProjectName,
|
Name: action.TargetProjectName,
|
||||||
Name: ptrString(action.TargetProjectName),
|
|
||||||
IsActive: true,
|
IsActive: true,
|
||||||
IsSystem: false,
|
IsSystem: false,
|
||||||
}
|
}
|
||||||
@@ -269,7 +255,7 @@ func executePlan(db *gorm.DB, actions []migrationAction, existingProjects map[st
|
|||||||
projectCache[key] = project
|
projectCache[key] = project
|
||||||
} else if !project.IsActive {
|
} else if !project.IsActive {
|
||||||
if err := tx.Model(&models.Project{}).Where("uuid = ?", project.UUID).Update("is_active", true).Error; err != nil {
|
if err := tx.Model(&models.Project{}).Where("uuid = ?", project.UUID).Update("is_active", true).Error; err != nil {
|
||||||
return fmt.Errorf("reactivate project %s (%s): %w", derefString(project.Name), project.UUID, err)
|
return fmt.Errorf("reactivate project %s (%s): %w", project.Name, project.UUID, err)
|
||||||
}
|
}
|
||||||
project.IsActive = true
|
project.IsActive = true
|
||||||
}
|
}
|
||||||
@@ -295,14 +281,3 @@ func setKeys(set map[string]struct{}) []string {
|
|||||||
func projectKey(owner, name string) string {
|
func projectKey(owner, name string) string {
|
||||||
return owner + "||" + name
|
return owner + "||" + name
|
||||||
}
|
}
|
||||||
|
|
||||||
func derefString(value *string) string {
|
|
||||||
if value == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return *value
|
|
||||||
}
|
|
||||||
|
|
||||||
func ptrString(value string) *string {
|
|
||||||
return &value
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,173 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"sort"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
"gorm.io/driver/mysql"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"gorm.io/gorm/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
type projectTimestampRow struct {
|
|
||||||
UUID string
|
|
||||||
UpdatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type updatePlanRow struct {
|
|
||||||
UUID string
|
|
||||||
Code string
|
|
||||||
Variant string
|
|
||||||
LocalUpdatedAt time.Time
|
|
||||||
ServerUpdatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to resolve default local SQLite path: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
localDBPath := flag.String("localdb", defaultLocalDBPath, "path to local SQLite database (default: user state dir or QFS_DB_PATH)")
|
|
||||||
apply := flag.Bool("apply", false, "apply updates to local SQLite (default is preview only)")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
local, err := localdb.New(*localDBPath)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to initialize local database: %v", err)
|
|
||||||
}
|
|
||||||
defer local.Close()
|
|
||||||
|
|
||||||
if !local.HasSettings() {
|
|
||||||
log.Fatalf("SQLite connection settings are not configured. Run qfs setup first.")
|
|
||||||
}
|
|
||||||
|
|
||||||
dsn, err := local.GetDSN()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to build DSN from SQLite settings: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to connect to MariaDB: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serverRows, err := loadServerProjects(db)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to load server projects: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
localProjects, err := local.GetAllProjects(true)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to load local projects: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
plan := buildUpdatePlan(localProjects, serverRows)
|
|
||||||
printPlan(plan, *apply)
|
|
||||||
|
|
||||||
if !*apply || len(plan) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
updated := 0
|
|
||||||
for i := range plan {
|
|
||||||
project, err := local.GetProjectByUUID(plan[i].UUID)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("skip %s: load local project: %v", plan[i].UUID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
project.UpdatedAt = plan[i].ServerUpdatedAt
|
|
||||||
if err := local.SaveProjectPreservingUpdatedAt(project); err != nil {
|
|
||||||
log.Printf("skip %s: save local project: %v", plan[i].UUID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
updated++
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("updated %d local project timestamps", updated)
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadServerProjects(db *gorm.DB) (map[string]time.Time, error) {
|
|
||||||
var rows []projectTimestampRow
|
|
||||||
if err := db.Model(&models.Project{}).
|
|
||||||
Select("uuid, updated_at").
|
|
||||||
Find(&rows).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make(map[string]time.Time, len(rows))
|
|
||||||
for _, row := range rows {
|
|
||||||
if row.UUID == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out[row.UUID] = row.UpdatedAt
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildUpdatePlan(localProjects []localdb.LocalProject, serverRows map[string]time.Time) []updatePlanRow {
|
|
||||||
plan := make([]updatePlanRow, 0)
|
|
||||||
for i := range localProjects {
|
|
||||||
project := localProjects[i]
|
|
||||||
serverUpdatedAt, ok := serverRows[project.UUID]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if project.UpdatedAt.Equal(serverUpdatedAt) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
plan = append(plan, updatePlanRow{
|
|
||||||
UUID: project.UUID,
|
|
||||||
Code: project.Code,
|
|
||||||
Variant: project.Variant,
|
|
||||||
LocalUpdatedAt: project.UpdatedAt,
|
|
||||||
ServerUpdatedAt: serverUpdatedAt,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(plan, func(i, j int) bool {
|
|
||||||
if plan[i].Code != plan[j].Code {
|
|
||||||
return plan[i].Code < plan[j].Code
|
|
||||||
}
|
|
||||||
return plan[i].Variant < plan[j].Variant
|
|
||||||
})
|
|
||||||
|
|
||||||
return plan
|
|
||||||
}
|
|
||||||
|
|
||||||
func printPlan(plan []updatePlanRow, apply bool) {
|
|
||||||
mode := "preview"
|
|
||||||
if apply {
|
|
||||||
mode = "apply"
|
|
||||||
}
|
|
||||||
log.Printf("project updated_at resync mode=%s changes=%d", mode, len(plan))
|
|
||||||
if len(plan) == 0 {
|
|
||||||
log.Printf("no local project timestamps need resync")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, row := range plan {
|
|
||||||
variant := row.Variant
|
|
||||||
if variant == "" {
|
|
||||||
variant = "main"
|
|
||||||
}
|
|
||||||
log.Printf("%s [%s] local=%s server=%s", row.Code, variant, formatStamp(row.LocalUpdatedAt), formatStamp(row.ServerUpdatedAt))
|
|
||||||
}
|
|
||||||
if !apply {
|
|
||||||
fmt.Println("Re-run with -apply to write server updated_at into local SQLite.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatStamp(value time.Time) string {
|
|
||||||
if value.IsZero() {
|
|
||||||
return "zero"
|
|
||||||
}
|
|
||||||
return value.Format(time.RFC3339)
|
|
||||||
}
|
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMigrateConfigFileToRuntimeShapeDropsDeprecatedSections(t *testing.T) {
|
|
||||||
t.Helper()
|
|
||||||
dir := t.TempDir()
|
|
||||||
path := filepath.Join(dir, "config.yaml")
|
|
||||||
|
|
||||||
legacy := `server:
|
|
||||||
host: "0.0.0.0"
|
|
||||||
port: 9191
|
|
||||||
database:
|
|
||||||
host: "legacy-db"
|
|
||||||
port: 3306
|
|
||||||
name: "RFQ_LOG"
|
|
||||||
user: "old"
|
|
||||||
password: "REDACTED_TEST_PASSWORD"
|
|
||||||
pricing:
|
|
||||||
default_method: "median"
|
|
||||||
logging:
|
|
||||||
level: "debug"
|
|
||||||
format: "text"
|
|
||||||
output: "stdout"
|
|
||||||
`
|
|
||||||
if err := os.WriteFile(path, []byte(legacy), 0644); err != nil {
|
|
||||||
t.Fatalf("write legacy config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg, err := config.Load(path)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("load legacy config: %v", err)
|
|
||||||
}
|
|
||||||
setConfigDefaults(cfg)
|
|
||||||
cfg.Server.Host, _, err = normalizeLoopbackServerHost(cfg.Server.Host)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("normalize server host: %v", err)
|
|
||||||
}
|
|
||||||
if err := migrateConfigFileToRuntimeShape(path, cfg); err != nil {
|
|
||||||
t.Fatalf("migrate config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("read migrated config: %v", err)
|
|
||||||
}
|
|
||||||
text := string(got)
|
|
||||||
if strings.Contains(text, "database:") {
|
|
||||||
t.Fatalf("migrated config still contains deprecated database section:\n%s", text)
|
|
||||||
}
|
|
||||||
if strings.Contains(text, "pricing:") {
|
|
||||||
t.Fatalf("migrated config still contains deprecated pricing section:\n%s", text)
|
|
||||||
}
|
|
||||||
if !strings.Contains(text, "server:") || !strings.Contains(text, "logging:") {
|
|
||||||
t.Fatalf("migrated config missing required sections:\n%s", text)
|
|
||||||
}
|
|
||||||
if !strings.Contains(text, "port: 9191") {
|
|
||||||
t.Fatalf("migrated config did not preserve server port:\n%s", text)
|
|
||||||
}
|
|
||||||
if !strings.Contains(text, "host: 127.0.0.1") {
|
|
||||||
t.Fatalf("migrated config did not normalize server host:\n%s", text)
|
|
||||||
}
|
|
||||||
if !strings.Contains(text, "level: debug") {
|
|
||||||
t.Fatalf("migrated config did not preserve logging level:\n%s", text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNormalizeLoopbackServerHost(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
host string
|
|
||||||
want string
|
|
||||||
wantChanged bool
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{host: "127.0.0.1", want: "127.0.0.1", wantChanged: false, wantErr: false},
|
|
||||||
{host: "localhost", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
|
||||||
{host: "::1", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
|
||||||
{host: "0.0.0.0", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
|
||||||
{host: "192.168.1.10", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
got, changed, err := normalizeLoopbackServerHost(tc.host)
|
|
||||||
if tc.wantErr && err == nil {
|
|
||||||
t.Fatalf("expected error for host %q", tc.host)
|
|
||||||
}
|
|
||||||
if !tc.wantErr && err != nil {
|
|
||||||
t.Fatalf("unexpected error for host %q: %v", tc.host, err)
|
|
||||||
}
|
|
||||||
if got != tc.want {
|
|
||||||
t.Fatalf("unexpected normalized host for %q: got %q want %q", tc.host, got, tc.want)
|
|
||||||
}
|
|
||||||
if changed != tc.wantChanged {
|
|
||||||
t.Fatalf("unexpected changed flag for %q: got %t want %t", tc.host, changed, tc.wantChanged)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
978
cmd/qfs/main.go
978
cmd/qfs/main.go
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"log/slog"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRequestLoggerDoesNotLogResponseBody(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
var logBuffer bytes.Buffer
|
|
||||||
previousLogger := slog.Default()
|
|
||||||
slog.SetDefault(slog.New(slog.NewTextHandler(&logBuffer, &slog.HandlerOptions{})))
|
|
||||||
defer slog.SetDefault(previousLogger)
|
|
||||||
|
|
||||||
router := gin.New()
|
|
||||||
router.Use(requestLogger())
|
|
||||||
router.GET("/fail", func(c *gin.Context) {
|
|
||||||
_ = c.Error(errors.New("root cause"))
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "do not log this body"})
|
|
||||||
})
|
|
||||||
|
|
||||||
rec := httptest.NewRecorder()
|
|
||||||
req := httptest.NewRequest(http.MethodGet, "/fail?debug=1", nil)
|
|
||||||
router.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
|
||||||
t.Fatalf("expected 400, got %d", rec.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
logOutput := logBuffer.String()
|
|
||||||
if !strings.Contains(logOutput, "request failed") {
|
|
||||||
t.Fatalf("expected request failure log, got %q", logOutput)
|
|
||||||
}
|
|
||||||
if strings.Contains(logOutput, "do not log this body") {
|
|
||||||
t.Fatalf("response body leaked into logs: %q", logOutput)
|
|
||||||
}
|
|
||||||
if !strings.Contains(logOutput, "root cause") {
|
|
||||||
t.Fatalf("expected error details in logs, got %q", logOutput)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,12 +3,10 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
@@ -39,7 +37,7 @@ func TestConfigurationVersioningAPI(t *testing.T) {
|
|||||||
|
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
setConfigDefaults(cfg)
|
setConfigDefaults(cfg)
|
||||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
router, _, err := setupRouter(cfg, local, connMgr, nil, "tester", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("setup router: %v", err)
|
t.Fatalf("setup router: %v", err)
|
||||||
}
|
}
|
||||||
@@ -79,7 +77,7 @@ func TestConfigurationVersioningAPI(t *testing.T) {
|
|||||||
if err := json.Unmarshal(rbRec.Body.Bytes(), &rbResp); err != nil {
|
if err := json.Unmarshal(rbRec.Body.Bytes(), &rbResp); err != nil {
|
||||||
t.Fatalf("unmarshal rollback response: %v", err)
|
t.Fatalf("unmarshal rollback response: %v", err)
|
||||||
}
|
}
|
||||||
if rbResp.Message == "" || rbResp.CurrentVersion.VersionNo != 2 {
|
if rbResp.Message == "" || rbResp.CurrentVersion.VersionNo != 3 {
|
||||||
t.Fatalf("unexpected rollback response: %+v", rbResp)
|
t.Fatalf("unexpected rollback response: %+v", rbResp)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,12 +144,12 @@ func TestProjectArchiveHidesConfigsAndCloneIntoProject(t *testing.T) {
|
|||||||
|
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
setConfigDefaults(cfg)
|
setConfigDefaults(cfg)
|
||||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
router, _, err := setupRouter(cfg, local, connMgr, nil, "tester", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("setup router: %v", err)
|
t.Fatalf("setup router: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"P1","code":"P1"}`)))
|
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"P1"}`)))
|
||||||
createProjectReq.Header.Set("Content-Type", "application/json")
|
createProjectReq.Header.Set("Content-Type", "application/json")
|
||||||
createProjectRec := httptest.NewRecorder()
|
createProjectRec := httptest.NewRecorder()
|
||||||
router.ServeHTTP(createProjectRec, createProjectReq)
|
router.ServeHTTP(createProjectRec, createProjectReq)
|
||||||
@@ -240,12 +238,12 @@ func TestConfigMoveToProjectEndpoint(t *testing.T) {
|
|||||||
local, connMgr, _ := newAPITestStack(t)
|
local, connMgr, _ := newAPITestStack(t)
|
||||||
cfg := &config.Config{}
|
cfg := &config.Config{}
|
||||||
setConfigDefaults(cfg)
|
setConfigDefaults(cfg)
|
||||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
router, _, err := setupRouter(cfg, local, connMgr, nil, "tester", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("setup router: %v", err)
|
t.Fatalf("setup router: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"Move Project","code":"MOVE"}`)))
|
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"Move Project"}`)))
|
||||||
createProjectReq.Header.Set("Content-Type", "application/json")
|
createProjectReq.Header.Set("Content-Type", "application/json")
|
||||||
createProjectRec := httptest.NewRecorder()
|
createProjectRec := httptest.NewRecorder()
|
||||||
router.ServeHTTP(createProjectRec, createProjectReq)
|
router.ServeHTTP(createProjectRec, createProjectReq)
|
||||||
@@ -292,88 +290,6 @@ func TestConfigMoveToProjectEndpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVendorImportRejectsOversizedUpload(t *testing.T) {
|
|
||||||
moveToRepoRoot(t)
|
|
||||||
|
|
||||||
prevLimit := vendorImportMaxBytes
|
|
||||||
vendorImportMaxBytes = 128
|
|
||||||
defer func() { vendorImportMaxBytes = prevLimit }()
|
|
||||||
|
|
||||||
local, connMgr, _ := newAPITestStack(t)
|
|
||||||
cfg := &config.Config{}
|
|
||||||
setConfigDefaults(cfg)
|
|
||||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("setup router: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"Import Project","code":"IMP"}`)))
|
|
||||||
createProjectReq.Header.Set("Content-Type", "application/json")
|
|
||||||
createProjectRec := httptest.NewRecorder()
|
|
||||||
router.ServeHTTP(createProjectRec, createProjectReq)
|
|
||||||
if createProjectRec.Code != http.StatusCreated {
|
|
||||||
t.Fatalf("create project status=%d body=%s", createProjectRec.Code, createProjectRec.Body.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
var project models.Project
|
|
||||||
if err := json.Unmarshal(createProjectRec.Body.Bytes(), &project); err != nil {
|
|
||||||
t.Fatalf("unmarshal project: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var body bytes.Buffer
|
|
||||||
writer := multipart.NewWriter(&body)
|
|
||||||
part, err := writer.CreateFormFile("file", "huge.xml")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("create form file: %v", err)
|
|
||||||
}
|
|
||||||
payload := "<CFXML>" + strings.Repeat("A", int(vendorImportMaxBytes)+1) + "</CFXML>"
|
|
||||||
if _, err := part.Write([]byte(payload)); err != nil {
|
|
||||||
t.Fatalf("write multipart payload: %v", err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
t.Fatalf("close multipart writer: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req := httptest.NewRequest(http.MethodPost, "/api/projects/"+project.UUID+"/vendor-import", &body)
|
|
||||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
||||||
rec := httptest.NewRecorder()
|
|
||||||
router.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
|
||||||
t.Fatalf("expected 400 for oversized upload, got %d body=%s", rec.Code, rec.Body.String())
|
|
||||||
}
|
|
||||||
if !strings.Contains(rec.Body.String(), "1 GiB") {
|
|
||||||
t.Fatalf("expected size limit message, got %s", rec.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateConfigMalformedJSONReturnsGenericError(t *testing.T) {
|
|
||||||
moveToRepoRoot(t)
|
|
||||||
|
|
||||||
local, connMgr, _ := newAPITestStack(t)
|
|
||||||
cfg := &config.Config{}
|
|
||||||
setConfigDefaults(cfg)
|
|
||||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("setup router: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
req := httptest.NewRequest(http.MethodPost, "/api/configs", bytes.NewReader([]byte(`{"name":`)))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
rec := httptest.NewRecorder()
|
|
||||||
router.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
|
||||||
t.Fatalf("expected 400 for malformed json, got %d body=%s", rec.Code, rec.Body.String())
|
|
||||||
}
|
|
||||||
if strings.Contains(strings.ToLower(rec.Body.String()), "unexpected eof") {
|
|
||||||
t.Fatalf("expected sanitized error body, got %s", rec.Body.String())
|
|
||||||
}
|
|
||||||
if !strings.Contains(rec.Body.String(), "invalid request") {
|
|
||||||
t.Fatalf("expected generic invalid request message, got %s", rec.Body.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAPITestStack(t *testing.T) (*localdb.LocalDB, *db.ConnectionManager, *services.LocalConfigurationService) {
|
func newAPITestStack(t *testing.T) (*localdb.LocalDB, *db.ConnectionManager, *services.LocalConfigurationService) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,58 @@
|
|||||||
# QuoteForge runtime config
|
# QuoteForge Configuration
|
||||||
# Runtime creates a minimal config automatically on first start.
|
# Copy this file to config.yaml and update values
|
||||||
# This file is only a reference template.
|
|
||||||
|
|
||||||
server:
|
server:
|
||||||
host: "127.0.0.1" # Loopback only; remote HTTP binding is unsupported
|
host: "127.0.0.1" # Use 0.0.0.0 to listen on all interfaces
|
||||||
port: 8080
|
port: 8080
|
||||||
mode: "release" # debug | release
|
mode: "release" # debug | release
|
||||||
read_timeout: "30s"
|
read_timeout: "30s"
|
||||||
write_timeout: "30s"
|
write_timeout: "30s"
|
||||||
|
|
||||||
backup:
|
database:
|
||||||
time: "00:00"
|
host: "localhost"
|
||||||
|
port: 3306
|
||||||
|
name: "RFQ_LOG"
|
||||||
|
user: "quoteforge"
|
||||||
|
password: "CHANGE_ME"
|
||||||
|
max_open_conns: 25
|
||||||
|
max_idle_conns: 5
|
||||||
|
conn_max_lifetime: "5m"
|
||||||
|
|
||||||
|
auth:
|
||||||
|
jwt_secret: "CHANGE_ME_MIN_32_CHARACTERS_LONG"
|
||||||
|
token_expiry: "24h"
|
||||||
|
refresh_expiry: "168h" # 7 days
|
||||||
|
|
||||||
|
pricing:
|
||||||
|
default_method: "weighted_median" # median | average | weighted_median
|
||||||
|
default_period_days: 90
|
||||||
|
freshness_green_days: 30
|
||||||
|
freshness_yellow_days: 60
|
||||||
|
freshness_red_days: 90
|
||||||
|
min_quotes_for_median: 3
|
||||||
|
popularity_decay_days: 180
|
||||||
|
|
||||||
|
export:
|
||||||
|
temp_dir: "/tmp/quoteforge-exports"
|
||||||
|
max_file_age: "1h"
|
||||||
|
company_name: "Your Company Name"
|
||||||
|
|
||||||
|
alerts:
|
||||||
|
enabled: true
|
||||||
|
check_interval: "1h"
|
||||||
|
high_demand_threshold: 5 # КП за 30 дней
|
||||||
|
trending_threshold_percent: 50 # % роста для алерта
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
email_enabled: false
|
||||||
|
smtp_host: "smtp.example.com"
|
||||||
|
smtp_port: 587
|
||||||
|
smtp_user: ""
|
||||||
|
smtp_password: ""
|
||||||
|
from_address: "quoteforge@example.com"
|
||||||
|
|
||||||
logging:
|
logging:
|
||||||
level: "info" # debug | info | warn | error
|
level: "info" # debug | info | warn | error
|
||||||
format: "json" # json | text
|
format: "json" # json | text
|
||||||
output: "stdout" # stdout | stderr | /path/to/file
|
output: "stdout" # stdout | file
|
||||||
|
file_path: "/var/log/quoteforge/app.log"
|
||||||
|
|||||||
15
crontab
Normal file
15
crontab
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Cron jobs for QuoteForge
|
||||||
|
# Run alerts check every hour
|
||||||
|
0 * * * * /app/quoteforge-cron -job=alerts
|
||||||
|
|
||||||
|
# Run price updates daily at 2 AM
|
||||||
|
0 2 * * * /app/quoteforge-cron -job=update-prices
|
||||||
|
|
||||||
|
# Reset weekly counters every Sunday at 1 AM
|
||||||
|
0 1 * * 0 /app/quoteforge-cron -job=reset-counters
|
||||||
|
|
||||||
|
# Update popularity scores daily at 3 AM
|
||||||
|
0 3 * * * /app/quoteforge-cron -job=update-popularity
|
||||||
|
|
||||||
|
# Log rotation (optional)
|
||||||
|
# 0 0 * * * /usr/bin/logrotate /etc/logrotate.conf
|
||||||
BIN
dist/qfs-darwin-amd64
vendored
BIN
dist/qfs-darwin-amd64
vendored
Binary file not shown.
BIN
dist/qfs-darwin-arm64
vendored
BIN
dist/qfs-darwin-arm64
vendored
Binary file not shown.
BIN
dist/qfs-linux-amd64
vendored
BIN
dist/qfs-linux-amd64
vendored
Binary file not shown.
BIN
dist/qfs-windows-amd64.exe
vendored
BIN
dist/qfs-windows-amd64.exe
vendored
Binary file not shown.
@@ -1,213 +0,0 @@
|
|||||||
# Руководство по составлению каталога лотов СХД
|
|
||||||
|
|
||||||
## Что такое LOT и зачем он нужен
|
|
||||||
|
|
||||||
LOT — это внутренний идентификатор типа компонента в системе QuoteForge.
|
|
||||||
|
|
||||||
Каждый LOT представляет одну рыночную позицию и хранит **средневзвешенную рыночную цену**, рассчитанную по историческим данным от поставщиков. Это позволяет получать актуальную оценку стоимости независимо от конкретного поставщика или прайс-листа.
|
|
||||||
|
|
||||||
Партномера вендора (Part Number, Feature Code) сами по себе не имеют цены в системе — они **переводятся в LOT** через книгу партномеров. Именно через LOT происходит расценка конфигурации.
|
|
||||||
|
|
||||||
**Пример:** Feature Code `B4B9` и Part Number `4C57A14368` — это два разных обозначения одной и той же HIC-карты от Lenovo. Оба маппируются на один LOT `HIC_4pFC32`, у которого есть рыночная цена.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Категории и вкладки конфигуратора
|
|
||||||
|
|
||||||
Категория LOT определяет, в какой вкладке конфигуратора он появится.
|
|
||||||
|
|
||||||
| Код категории | Название | Вкладка | Что сюда относится |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `ENC` | Storage Enclosure | **Base** | Дисковая полка без контроллера |
|
|
||||||
| `DKC` | Disk/Controller Enclosure | **Base** | Контроллерная полка: модель СХД + тип дисков + кол-во слотов + кол-во контроллеров |
|
|
||||||
| `CTL` | Storage Controller | **Base** | Контроллер СХД: объём кэша + встроенные хост-порты |
|
|
||||||
| `HIC` | Host Interface Card | **PCI** | HIC-карты СХД: интерфейсы подключения (FC, iSCSI, SAS) |
|
|
||||||
| `HDD` | HDD | **Storage** | Жёсткие диски (HDD) |
|
|
||||||
| `SSD` | SSD | **Storage** | Твердотельные диски (SSD, NVMe) |
|
|
||||||
| `ACC` | Accessories | **Accessories** | Кабели подключения, кабели питания |
|
|
||||||
| `SW` | Software | **SW** | Программные лицензии |
|
|
||||||
| *(прочее)* | — | **Other** | Гарантийные опции, инсталляция |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Правила именования LOT
|
|
||||||
|
|
||||||
Формат: `КАТЕГОРИЯ_МОДЕЛЬСХД_СПЕЦИФИКА`
|
|
||||||
|
|
||||||
- только латиница, цифры и знак `_`
|
|
||||||
- регистр — ВЕРХНИЙ
|
|
||||||
- без пробелов, дефисов, точек
|
|
||||||
- каждый LOT уникален — два разных компонента не могут иметь одинаковое имя
|
|
||||||
|
|
||||||
### DKC — контроллерная полка
|
|
||||||
|
|
||||||
Специфика: `ТИПДИСКА_СЛОТЫ_NCTRL`
|
|
||||||
|
|
||||||
| Пример | Расшифровка |
|
|
||||||
|---|---|
|
|
||||||
| `DKC_DE4000H_SFF_24_2CTRL` | DE4000H, 24 слота SFF (2.5"), 2 контроллера |
|
|
||||||
| `DKC_DE4000H_LFF_12_2CTRL` | DE4000H, 12 слотов LFF (3.5"), 2 контроллера |
|
|
||||||
| `DKC_DE4000H_SFF_24_1CTRL` | DE4000H, 24 слота SFF, 1 контроллер (симплекс) |
|
|
||||||
|
|
||||||
Обозначения типа диска: `SFF` — 2.5", `LFF` — 3.5", `NVMe` — U.2/U.3.
|
|
||||||
|
|
||||||
### CTL — контроллер
|
|
||||||
|
|
||||||
Специфика: `КЭШГБ_ПОРТЫТИП` (если встроенные порты есть) или `КЭШГБ_BASE` (если без портов, добавляются через HIC)
|
|
||||||
|
|
||||||
| Пример | Расшифровка |
|
|
||||||
|---|---|
|
|
||||||
| `CTL_DE4000H_32GB_BASE` | 32GB кэш, без встроенных хост-портов |
|
|
||||||
| `CTL_DE4000H_8GB_BASE` | 8GB кэш, без встроенных хост-портов |
|
|
||||||
| `CTL_MSA2060_8GB_ISCSI10G_4P` | 8GB кэш, встроенные 4× iSCSI 10GbE |
|
|
||||||
|
|
||||||
### HIC — HIC-карты (интерфейс подключения)
|
|
||||||
|
|
||||||
Специфика: `NpПРОТОКОЛ` — без привязки к модели СХД, по аналогии с серверными `HBA_2pFC16`, `HBA_4pFC32_Gen6`.
|
|
||||||
|
|
||||||
| Пример | Расшифровка |
|
|
||||||
|---|---|
|
|
||||||
| `HIC_4pFC32` | 4 порта FC 32Gb |
|
|
||||||
| `HIC_4pFC16` | 4 порта FC 16G/10GbE |
|
|
||||||
| `HIC_4p25G_iSCSI` | 4 порта 25G iSCSI |
|
|
||||||
| `HIC_4p12G_SAS` | 4 порта SAS 12Gb |
|
|
||||||
| `HIC_2p10G_BaseT` | 2 порта 10G Base-T |
|
|
||||||
|
|
||||||
### HDD / SSD / NVMe — диски
|
|
||||||
|
|
||||||
Диски **не привязываются к модели СХД** — используются существующие LOT из серверного каталога (`HDD_...`, `SSD_...`, `NVME_...`). Новые LOT для дисков СХД не создаются; партномера дисков маппируются на уже существующие серверные LOT.
|
|
||||||
|
|
||||||
### ACC — кабели
|
|
||||||
|
|
||||||
Кабели **не привязываются к модели СХД**. Формат: `ACC_CABLE_{ТИП}_{ДЛИНА}` — универсальные LOT, одинаковые для серверов и СХД.
|
|
||||||
|
|
||||||
| Пример | Расшифровка |
|
|
||||||
|---|---|
|
|
||||||
| `ACC_CABLE_CAT6_10M` | Кабель CAT6 10м |
|
|
||||||
| `ACC_CABLE_FC_OM4_3M` | Кабель FC LC-LC OM4 до 3м |
|
|
||||||
| `ACC_CABLE_PWR_C13C14_15M` | Кабель питания C13–C14 1.5м |
|
|
||||||
|
|
||||||
### SW — программные лицензии
|
|
||||||
|
|
||||||
Специфика: краткое название функции.
|
|
||||||
|
|
||||||
| Пример | Расшифровка |
|
|
||||||
|---|---|
|
|
||||||
| `SW_DE4000H_ASYNC_MIRROR` | Async Mirroring |
|
|
||||||
| `SW_DE4000H_SNAPSHOT_512` | Snapshot 512 |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Таблица лотов: DE4000H (пример заполнения)
|
|
||||||
|
|
||||||
### DKC — контроллерная полка
|
|
||||||
|
|
||||||
| lot_name | vendor | model | description | disk_slots | disk_type | controllers |
|
|
||||||
|---|---|---|---|---|---|---|
|
|
||||||
| `DKC_DE4000H_SFF_24_2CTRL` | Lenovo | DE4000H 2U24 | DE4000H, 24× SFF, 2 контроллера | 24 | SFF | 2 |
|
|
||||||
| `DKC_DE4000H_LFF_12_2CTRL` | Lenovo | DE4000H 2U12 | DE4000H, 12× LFF, 2 контроллера | 12 | LFF | 2 |
|
|
||||||
|
|
||||||
### CTL — контроллер
|
|
||||||
|
|
||||||
| lot_name | vendor | model | description | cache_gb | host_ports |
|
|
||||||
|---|---|---|---|---|---|
|
|
||||||
| `CTL_DE4000H_32GB_BASE` | Lenovo | DE4000 Controller 32GB Gen2 | Контроллер DE4000, 32GB кэш, без встроенных портов | 32 | — |
|
|
||||||
| `CTL_DE4000H_8GB_BASE` | Lenovo | DE4000 Controller 8GB Gen2 | Контроллер DE4000, 8GB кэш, без встроенных портов | 8 | — |
|
|
||||||
|
|
||||||
### HIC — HIC-карты
|
|
||||||
|
|
||||||
| lot_name | vendor | model | description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `HIC_2p10G_BaseT` | Lenovo | HIC 10GBASE-T 2-Ports | HIC 10GBASE-T, 2 порта |
|
|
||||||
| `HIC_4p25G_iSCSI` | Lenovo | HIC 10/25GbE iSCSI 4-ports | HIC iSCSI 10/25GbE, 4 порта |
|
|
||||||
| `HIC_4p12G_SAS` | Lenovo | HIC 12Gb SAS 4-ports | HIC SAS 12Gb, 4 порта |
|
|
||||||
| `HIC_4pFC32` | Lenovo | HIC 32Gb FC 4-ports | HIC FC 32Gb, 4 порта |
|
|
||||||
| `HIC_4pFC16` | Lenovo | HIC 16G FC/10GbE 4-ports | HIC FC 16G/10GbE, 4 порта |
|
|
||||||
|
|
||||||
### HDD / SSD / NVMe / ACC — диски и кабели
|
|
||||||
|
|
||||||
Для дисков и кабелей новые LOT не создаются. Партномера маппируются на существующие серверные LOT из каталога.
|
|
||||||
|
|
||||||
### SW — программные лицензии
|
|
||||||
|
|
||||||
| lot_name | vendor | model | description |
|
|
||||||
|---|---|---|---|
|
|
||||||
| `SW_DE4000H_ASYNC_MIRROR` | Lenovo | DE4000H Asynchronous Mirroring | Лицензия Async Mirroring |
|
|
||||||
| `SW_DE4000H_SNAPSHOT_512` | Lenovo | DE4000H Snapshot Upgrade 512 | Лицензия Snapshot 512 |
|
|
||||||
| `SW_DE4000H_SYNC_MIRROR` | Lenovo | DE4000 Synchronous Mirroring | Лицензия Sync Mirroring |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Таблица партномеров: DE4000H (пример заполнения)
|
|
||||||
|
|
||||||
Каждый Feature Code и Part Number должен быть привязан к своему LOT.
|
|
||||||
Если у компонента есть оба — добавить две строки.
|
|
||||||
|
|
||||||
| partnumber | lot_name | описание |
|
|
||||||
|---|---|---|
|
|
||||||
| `BEY7` | `ENC_2U24_CHASSIS` | Lenovo ThinkSystem Storage 2U24 Chassis |
|
|
||||||
| `BQA0` | `CTL_DE4000H_32GB_BASE` | DE4000 Controller 32GB Gen2 |
|
|
||||||
| `BQ9Z` | `CTL_DE4000H_8GB_BASE` | DE4000 Controller 8GB Gen2 |
|
|
||||||
| `B4B1` | `HIC_2p10G_BaseT` | HIC 10GBASE-T 2-Ports |
|
|
||||||
| `4C57A14376` | `HIC_2p10G_BaseT` | HIC 10GBASE-T 2-Ports |
|
|
||||||
| `B4BA` | `HIC_4p25G_iSCSI` | HIC 10/25GbE iSCSI 4-ports |
|
|
||||||
| `4C57A14369` | `HIC_4p25G_iSCSI` | HIC 10/25GbE iSCSI 4-ports |
|
|
||||||
| `B4B8` | `HIC_4p12G_SAS` | HIC 12Gb SAS 4-ports |
|
|
||||||
| `4C57A14367` | `HIC_4p12G_SAS` | HIC 12Gb SAS 4-ports |
|
|
||||||
| `B4B9` | `HIC_4pFC32` | HIC 32Gb FC 4-ports |
|
|
||||||
| `4C57A14368` | `HIC_4pFC32` | HIC 32Gb FC 4-ports |
|
|
||||||
| `B4B7` | `HIC_4pFC16` | HIC 16G FC/10GbE 4-ports |
|
|
||||||
| `4C57A14366` | `HIC_4pFC16` | HIC 16G FC/10GbE 4-ports |
|
|
||||||
| `BW12` | `HDD_SAS_02.4TB` | 2.4TB 10K 2.5" HDD 2U24 |
|
|
||||||
| `4XB7A88046` | `HDD_SAS_02.4TB` | 2.4TB 10K 2.5" HDD 2U24 |
|
|
||||||
| `B4C0` | `HDD_SAS_01.8TB` | 1.8TB 10K 2.5" HDD SED FIPS |
|
|
||||||
| `4XB7A14114` | `HDD_SAS_01.8TB` | 1.8TB 10K 2.5" HDD SED FIPS |
|
|
||||||
| `BW13` | `HDD_SAS_02.4TB` | 2.4TB 10K 2.5" HDD FIPS |
|
|
||||||
| `4XB7A88048` | `HDD_SAS_02.4TB` | 2.4TB 10K 2.5" HDD FIPS |
|
|
||||||
| `BKUQ` | `SSD_SAS_0.960T` | 960GB 1DWD 2.5" SSD |
|
|
||||||
| `4XB7A74948` | `SSD_SAS_0.960T` | 960GB 1DWD 2.5" SSD |
|
|
||||||
| `BKUT` | `SSD_SAS_01.92T` | 1.92TB 1DWD 2.5" SSD |
|
|
||||||
| `4XB7A74951` | `SSD_SAS_01.92T` | 1.92TB 1DWD 2.5" SSD |
|
|
||||||
| `BKUK` | `SSD_SAS_03.84T` | 3.84TB 1DWD 2.5" SSD |
|
|
||||||
| `4XB7A74955` | `SSD_SAS_03.84T` | 3.84TB 1DWD 2.5" SSD |
|
|
||||||
| `B4RY` | `SSD_SAS_07.68T` | 7.68TB 1DWD 2.5" SSD |
|
|
||||||
| `4XB7A14176` | `SSD_SAS_07.68T` | 7.68TB 1DWD 2.5" SSD |
|
|
||||||
| `B4CD` | `SSD_SAS_15.36T` | 15.36TB 1DWD 2.5" SSD |
|
|
||||||
| `4XB7A14110` | `SSD_SAS_15.36T` | 15.36TB 1DWD 2.5" SSD |
|
|
||||||
| `BWCJ` | `SSD_SAS_03.84T` | 3.84TB 1DWD 2.5" SSD FIPS |
|
|
||||||
| `4XB7A88469` | `SSD_SAS_03.84T` | 3.84TB 1DWD 2.5" SSD FIPS |
|
|
||||||
| `BW2B` | `SSD_SAS_15.36T` | 15.36TB 1DWD 2.5" SSD SED |
|
|
||||||
| `4XB7A88466` | `SSD_SAS_15.36T` | 15.36TB 1DWD 2.5" SSD SED |
|
|
||||||
| `AVFW` | `ACC_CABLE_CAT6_1M` | CAT6 0.75-1.5m |
|
|
||||||
| `A1MT` | `ACC_CABLE_CAT6_10M` | CAT6 10m |
|
|
||||||
| `90Y3718` | `ACC_CABLE_CAT6_10M` | CAT6 10m |
|
|
||||||
| `A1MW` | `ACC_CABLE_CAT6_25M` | CAT6 25m |
|
|
||||||
| `90Y3727` | `ACC_CABLE_CAT6_25M` | CAT6 25m |
|
|
||||||
| `39Y7937` | `ACC_CABLE_PWR_C13C14_15M` | C13–C14 1.5m |
|
|
||||||
| `39Y7938` | `ACC_CABLE_PWR_C13C20_28M` | C13–C20 2.8m |
|
|
||||||
| `4L67A08371` | `ACC_CABLE_PWR_C13C14_43M` | C13–C14 4.3m |
|
|
||||||
| `C932` | `SW_DE4000H_ASYNC_MIRROR` | DE4000H Asynchronous Mirroring |
|
|
||||||
| `00WE123` | `SW_DE4000H_ASYNC_MIRROR` | DE4000H Asynchronous Mirroring |
|
|
||||||
| `C930` | `SW_DE4000H_SNAPSHOT_512` | DE4000H Snapshot Upgrade 512 |
|
|
||||||
| `C931` | `SW_DE4000H_SYNC_MIRROR` | DE4000 Synchronous Mirroring |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Шаблон для новых моделей СХД
|
|
||||||
|
|
||||||
```
|
|
||||||
DKC_МОДЕЛЬ_ТИПДИСКА_СЛОТЫ_NCTRL — контроллерная полка
|
|
||||||
CTL_МОДЕЛЬ_КЭШГБ_ПОРТЫ — контроллер
|
|
||||||
HIC_МОДЕЛЬ_ПРОТОКОЛ_СКОРОСТЬ_ПОРТЫ — HIC-карта (интерфейс подключения)
|
|
||||||
SW_МОДЕЛЬ_ФУНКЦИЯ — лицензия
|
|
||||||
```
|
|
||||||
|
|
||||||
Диски (HDD/SSD/NVMe) и кабели (ACC) — маппируются на существующие серверные LOT, новые не создаются.
|
|
||||||
|
|
||||||
Пример для HPE MSA 2060:
|
|
||||||
```
|
|
||||||
DKC_MSA2060_SFF_24_2CTRL
|
|
||||||
CTL_MSA2060_8GB_ISCSI10G_4P
|
|
||||||
HIC_MSA2060_FC32G_2P
|
|
||||||
SW_MSA2060_REMOTE_SNAP
|
|
||||||
```
|
|
||||||
5
go.mod
5
go.mod
@@ -5,8 +5,9 @@ go 1.24.0
|
|||||||
require (
|
require (
|
||||||
github.com/gin-gonic/gin v1.9.1
|
github.com/gin-gonic/gin v1.9.1
|
||||||
github.com/glebarez/sqlite v1.11.0
|
github.com/glebarez/sqlite v1.11.0
|
||||||
github.com/go-sql-driver/mysql v1.7.1
|
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
|
golang.org/x/crypto v0.43.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gorm.io/driver/mysql v1.5.2
|
gorm.io/driver/mysql v1.5.2
|
||||||
gorm.io/gorm v1.25.7
|
gorm.io/gorm v1.25.7
|
||||||
@@ -22,6 +23,7 @@ require (
|
|||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||||
|
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||||
github.com/goccy/go-json v0.10.2 // indirect
|
github.com/goccy/go-json v0.10.2 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
@@ -37,7 +39,6 @@ require (
|
|||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.3.0 // indirect
|
||||||
golang.org/x/crypto v0.43.0 // indirect
|
|
||||||
golang.org/x/net v0.46.0 // indirect
|
golang.org/x/net v0.46.0 // indirect
|
||||||
golang.org/x/sys v0.37.0 // indirect
|
golang.org/x/sys v0.37.0 // indirect
|
||||||
golang.org/x/text v0.30.0 // indirect
|
golang.org/x/text v0.30.0 // indirect
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -32,6 +32,8 @@ github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrt
|
|||||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
|||||||
@@ -1,393 +0,0 @@
|
|||||||
package appstate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/glebarez/sqlite"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"gorm.io/gorm/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
type backupPeriod struct {
|
|
||||||
name string
|
|
||||||
retention int
|
|
||||||
key func(time.Time) string
|
|
||||||
date func(time.Time) string
|
|
||||||
}
|
|
||||||
|
|
||||||
var backupPeriods = []backupPeriod{
|
|
||||||
{
|
|
||||||
name: "daily",
|
|
||||||
retention: 7,
|
|
||||||
key: func(t time.Time) string {
|
|
||||||
return t.Format("2006-01-02")
|
|
||||||
},
|
|
||||||
date: func(t time.Time) string {
|
|
||||||
return t.Format("2006-01-02")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "weekly",
|
|
||||||
retention: 4,
|
|
||||||
key: func(t time.Time) string {
|
|
||||||
y, w := t.ISOWeek()
|
|
||||||
return fmt.Sprintf("%04d-W%02d", y, w)
|
|
||||||
},
|
|
||||||
date: func(t time.Time) string {
|
|
||||||
return t.Format("2006-01-02")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "monthly",
|
|
||||||
retention: 12,
|
|
||||||
key: func(t time.Time) string {
|
|
||||||
return t.Format("2006-01")
|
|
||||||
},
|
|
||||||
date: func(t time.Time) string {
|
|
||||||
return t.Format("2006-01-02")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "yearly",
|
|
||||||
retention: 10,
|
|
||||||
key: func(t time.Time) string {
|
|
||||||
return t.Format("2006")
|
|
||||||
},
|
|
||||||
date: func(t time.Time) string {
|
|
||||||
return t.Format("2006-01-02")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
envBackupDisable = "QFS_BACKUP_DISABLE"
|
|
||||||
envBackupDir = "QFS_BACKUP_DIR"
|
|
||||||
)
|
|
||||||
|
|
||||||
var backupNow = time.Now
|
|
||||||
|
|
||||||
// EnsureRotatingLocalBackup creates or refreshes daily/weekly/monthly/yearly backups
|
|
||||||
// for the local database and config. It keeps a limited number per period.
|
|
||||||
func EnsureRotatingLocalBackup(dbPath, configPath string) ([]string, error) {
|
|
||||||
if isBackupDisabled() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if dbPath == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(dbPath); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("stat db: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
root := resolveBackupRoot(dbPath)
|
|
||||||
if err := validateBackupRoot(root); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
now := backupNow()
|
|
||||||
|
|
||||||
created := make([]string, 0)
|
|
||||||
for _, period := range backupPeriods {
|
|
||||||
newFiles, err := ensurePeriodBackup(root, period, now, dbPath, configPath)
|
|
||||||
if err != nil {
|
|
||||||
return created, err
|
|
||||||
}
|
|
||||||
if len(newFiles) > 0 {
|
|
||||||
created = append(created, newFiles...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return created, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveBackupRoot(dbPath string) string {
|
|
||||||
if fromEnv := strings.TrimSpace(os.Getenv(envBackupDir)); fromEnv != "" {
|
|
||||||
return filepath.Clean(fromEnv)
|
|
||||||
}
|
|
||||||
return filepath.Join(filepath.Dir(dbPath), "backups")
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateBackupRoot(root string) error {
|
|
||||||
absRoot, err := filepath.Abs(root)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("resolve backup root: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if gitRoot, ok := findGitWorktreeRoot(absRoot); ok {
|
|
||||||
return fmt.Errorf("backup root must stay outside git worktree: %s is inside %s", absRoot, gitRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findGitWorktreeRoot(path string) (string, bool) {
|
|
||||||
current := filepath.Clean(path)
|
|
||||||
info, err := os.Stat(current)
|
|
||||||
if err == nil && !info.IsDir() {
|
|
||||||
current = filepath.Dir(current)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
gitPath := filepath.Join(current, ".git")
|
|
||||||
if _, err := os.Stat(gitPath); err == nil {
|
|
||||||
return current, true
|
|
||||||
}
|
|
||||||
|
|
||||||
parent := filepath.Dir(current)
|
|
||||||
if parent == current {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
current = parent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isBackupDisabled() bool {
|
|
||||||
val := strings.ToLower(strings.TrimSpace(os.Getenv(envBackupDisable)))
|
|
||||||
return val == "1" || val == "true" || val == "yes"
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensurePeriodBackup(root string, period backupPeriod, now time.Time, dbPath, configPath string) ([]string, error) {
|
|
||||||
key := period.key(now)
|
|
||||||
periodDir := filepath.Join(root, period.name)
|
|
||||||
if err := os.MkdirAll(periodDir, 0755); err != nil {
|
|
||||||
return nil, fmt.Errorf("create %s backup dir: %w", period.name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasBackupForKey(periodDir, key) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
archiveName := fmt.Sprintf("qfs-backp-%s.zip", period.date(now))
|
|
||||||
archivePath := filepath.Join(periodDir, archiveName)
|
|
||||||
|
|
||||||
if err := createBackupArchive(archivePath, dbPath, configPath); err != nil {
|
|
||||||
return nil, fmt.Errorf("create %s backup archive: %w", period.name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writePeriodMarker(periodDir, key); err != nil {
|
|
||||||
return []string{archivePath}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pruneOldBackups(periodDir, period.retention); err != nil {
|
|
||||||
return []string{archivePath}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return []string{archivePath}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasBackupForKey(periodDir, key string) bool {
|
|
||||||
marker := periodMarker{Key: ""}
|
|
||||||
data, err := os.ReadFile(periodMarkerPath(periodDir))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(data, &marker); err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return marker.Key == key
|
|
||||||
}
|
|
||||||
|
|
||||||
type periodMarker struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func periodMarkerPath(periodDir string) string {
|
|
||||||
return filepath.Join(periodDir, ".period.json")
|
|
||||||
}
|
|
||||||
|
|
||||||
func writePeriodMarker(periodDir, key string) error {
|
|
||||||
data, err := json.MarshalIndent(periodMarker{Key: key}, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return os.WriteFile(periodMarkerPath(periodDir), data, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
func pruneOldBackups(periodDir string, keep int) error {
|
|
||||||
entries, err := os.ReadDir(periodDir)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("read backups dir: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
files := make([]os.DirEntry, 0, len(entries))
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(entry.Name(), ".zip") {
|
|
||||||
files = append(files, entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(files) <= keep {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(files, func(i, j int) bool {
|
|
||||||
infoI, errI := files[i].Info()
|
|
||||||
infoJ, errJ := files[j].Info()
|
|
||||||
if errI != nil || errJ != nil {
|
|
||||||
return files[i].Name() < files[j].Name()
|
|
||||||
}
|
|
||||||
return infoI.ModTime().Before(infoJ.ModTime())
|
|
||||||
})
|
|
||||||
|
|
||||||
for i := 0; i < len(files)-keep; i++ {
|
|
||||||
path := filepath.Join(periodDir, files[i].Name())
|
|
||||||
if err := os.Remove(path); err != nil {
|
|
||||||
return fmt.Errorf("remove old backup %s: %w", path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBackupArchive(destPath, dbPath, configPath string) error {
|
|
||||||
snapshotPath, cleanup, err := createSQLiteSnapshot(dbPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
file, err := os.Create(destPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
zipWriter := zip.NewWriter(file)
|
|
||||||
if err := addZipFileAs(zipWriter, snapshotPath, filepath.Base(dbPath)); err != nil {
|
|
||||||
_ = zipWriter.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.TrimSpace(configPath) != "" {
|
|
||||||
_ = addZipOptionalFile(zipWriter, configPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := zipWriter.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return file.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
func createSQLiteSnapshot(dbPath string) (string, func(), error) {
|
|
||||||
tempFile, err := os.CreateTemp("", "qfs-backup-*.db")
|
|
||||||
if err != nil {
|
|
||||||
return "", func() {}, err
|
|
||||||
}
|
|
||||||
tempPath := tempFile.Name()
|
|
||||||
if err := tempFile.Close(); err != nil {
|
|
||||||
_ = os.Remove(tempPath)
|
|
||||||
return "", func() {}, err
|
|
||||||
}
|
|
||||||
if err := os.Remove(tempPath); err != nil && !os.IsNotExist(err) {
|
|
||||||
return "", func() {}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
_ = os.Remove(tempPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
cleanup()
|
|
||||||
return "", func() {}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sqlDB, err := db.DB()
|
|
||||||
if err != nil {
|
|
||||||
cleanup()
|
|
||||||
return "", func() {}, err
|
|
||||||
}
|
|
||||||
defer sqlDB.Close()
|
|
||||||
|
|
||||||
if err := db.Exec("PRAGMA busy_timeout = 5000").Error; err != nil {
|
|
||||||
cleanup()
|
|
||||||
return "", func() {}, fmt.Errorf("configure sqlite busy_timeout: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
literalPath := strings.ReplaceAll(tempPath, "'", "''")
|
|
||||||
if err := vacuumIntoWithRetry(db, literalPath); err != nil {
|
|
||||||
cleanup()
|
|
||||||
return "", func() {}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tempPath, cleanup, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func vacuumIntoWithRetry(db *gorm.DB, literalPath string) error {
|
|
||||||
var lastErr error
|
|
||||||
for attempt := 0; attempt < 3; attempt++ {
|
|
||||||
if err := db.Exec("VACUUM INTO '" + literalPath + "'").Error; err != nil {
|
|
||||||
lastErr = err
|
|
||||||
if !isSQLiteBusyError(err) {
|
|
||||||
return fmt.Errorf("create sqlite snapshot: %w", err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Duration(attempt+1) * 250 * time.Millisecond)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("create sqlite snapshot after retries: %w", lastErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSQLiteBusyError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
lower := strings.ToLower(err.Error())
|
|
||||||
return strings.Contains(lower, "database is locked") || strings.Contains(lower, "database is busy")
|
|
||||||
}
|
|
||||||
|
|
||||||
func addZipOptionalFile(writer *zip.Writer, path string) error {
|
|
||||||
if _, err := os.Stat(path); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return addZipFile(writer, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func addZipFile(writer *zip.Writer, path string) error {
|
|
||||||
return addZipFileAs(writer, path, filepath.Base(path))
|
|
||||||
}
|
|
||||||
|
|
||||||
func addZipFileAs(writer *zip.Writer, path string, archiveName string) error {
|
|
||||||
in, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
info, err := in.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
header, err := zip.FileInfoHeader(info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
header.Name = archiveName
|
|
||||||
header.Method = zip.Deflate
|
|
||||||
|
|
||||||
out, err := writer.CreateHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
@@ -1,157 +0,0 @@
|
|||||||
package appstate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/glebarez/sqlite"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEnsureRotatingLocalBackupCreatesAndRotates(t *testing.T) {
|
|
||||||
temp := t.TempDir()
|
|
||||||
dbPath := filepath.Join(temp, "qfs.db")
|
|
||||||
cfgPath := filepath.Join(temp, "config.yaml")
|
|
||||||
|
|
||||||
if err := writeTestSQLiteDB(dbPath); err != nil {
|
|
||||||
t.Fatalf("write sqlite db: %v", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil {
|
|
||||||
t.Fatalf("write config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
prevNow := backupNow
|
|
||||||
defer func() { backupNow = prevNow }()
|
|
||||||
backupNow = func() time.Time { return time.Date(2026, 2, 11, 10, 0, 0, 0, time.UTC) }
|
|
||||||
|
|
||||||
created, err := EnsureRotatingLocalBackup(dbPath, cfgPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("backup: %v", err)
|
|
||||||
}
|
|
||||||
if len(created) == 0 {
|
|
||||||
t.Fatalf("expected backup to be created")
|
|
||||||
}
|
|
||||||
|
|
||||||
dailyArchive := filepath.Join(temp, "backups", "daily", "qfs-backp-2026-02-11.zip")
|
|
||||||
if _, err := os.Stat(dailyArchive); err != nil {
|
|
||||||
t.Fatalf("daily archive missing: %v", err)
|
|
||||||
}
|
|
||||||
assertZipContains(t, dailyArchive, "qfs.db", "config.yaml")
|
|
||||||
|
|
||||||
backupNow = func() time.Time { return time.Date(2026, 2, 12, 10, 0, 0, 0, time.UTC) }
|
|
||||||
created, err = EnsureRotatingLocalBackup(dbPath, cfgPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("backup rotate: %v", err)
|
|
||||||
}
|
|
||||||
if len(created) == 0 {
|
|
||||||
t.Fatalf("expected backup to be created for new day")
|
|
||||||
}
|
|
||||||
|
|
||||||
dailyArchive = filepath.Join(temp, "backups", "daily", "qfs-backp-2026-02-12.zip")
|
|
||||||
if _, err := os.Stat(dailyArchive); err != nil {
|
|
||||||
t.Fatalf("daily archive missing after rotate: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnsureRotatingLocalBackupEnvControls(t *testing.T) {
|
|
||||||
temp := t.TempDir()
|
|
||||||
dbPath := filepath.Join(temp, "qfs.db")
|
|
||||||
cfgPath := filepath.Join(temp, "config.yaml")
|
|
||||||
|
|
||||||
if err := writeTestSQLiteDB(dbPath); err != nil {
|
|
||||||
t.Fatalf("write sqlite db: %v", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil {
|
|
||||||
t.Fatalf("write config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
backupRoot := filepath.Join(temp, "custom_backups")
|
|
||||||
t.Setenv(envBackupDir, backupRoot)
|
|
||||||
|
|
||||||
if _, err := EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil {
|
|
||||||
t.Fatalf("backup with env: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(filepath.Join(backupRoot, "daily", ".period.json")); err != nil {
|
|
||||||
t.Fatalf("expected backup in custom dir: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Setenv(envBackupDisable, "1")
|
|
||||||
if _, err := EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil {
|
|
||||||
t.Fatalf("backup disabled: %v", err)
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(filepath.Join(backupRoot, "daily", ".period.json")); err != nil {
|
|
||||||
t.Fatalf("backup should remain from previous run: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnsureRotatingLocalBackupRejectsGitWorktree(t *testing.T) {
|
|
||||||
temp := t.TempDir()
|
|
||||||
repoRoot := filepath.Join(temp, "repo")
|
|
||||||
if err := os.MkdirAll(filepath.Join(repoRoot, ".git"), 0755); err != nil {
|
|
||||||
t.Fatalf("mkdir git dir: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPath := filepath.Join(repoRoot, "data", "qfs.db")
|
|
||||||
cfgPath := filepath.Join(repoRoot, "data", "config.yaml")
|
|
||||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
|
|
||||||
t.Fatalf("mkdir data dir: %v", err)
|
|
||||||
}
|
|
||||||
if err := writeTestSQLiteDB(dbPath); err != nil {
|
|
||||||
t.Fatalf("write sqlite db: %v", err)
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil {
|
|
||||||
t.Fatalf("write cfg: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := EnsureRotatingLocalBackup(dbPath, cfgPath)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatal("expected git worktree backup root to be rejected")
|
|
||||||
}
|
|
||||||
if !strings.Contains(err.Error(), "outside git worktree") {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeTestSQLiteDB(path string) error {
|
|
||||||
db, err := gorm.Open(sqlite.Open(path), &gorm.Config{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sqlDB, err := db.DB()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sqlDB.Close()
|
|
||||||
|
|
||||||
return db.Exec(`
|
|
||||||
CREATE TABLE sample_items (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
name TEXT NOT NULL
|
|
||||||
);
|
|
||||||
INSERT INTO sample_items(name) VALUES ('backup');
|
|
||||||
`).Error
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertZipContains(t *testing.T, archivePath string, expected ...string) {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
reader, err := zip.OpenReader(archivePath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open archive: %v", err)
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
|
|
||||||
found := make(map[string]bool, len(reader.File))
|
|
||||||
for _, file := range reader.File {
|
|
||||||
found[file.Name] = true
|
|
||||||
}
|
|
||||||
for _, name := range expected {
|
|
||||||
if !found[name] {
|
|
||||||
t.Fatalf("archive %s missing %s", archivePath, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -56,25 +55,6 @@ func ResolveConfigPath(explicitPath string) (string, error) {
|
|||||||
return filepath.Join(dir, defaultCfg), nil
|
return filepath.Join(dir, defaultCfg), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveConfigPathNearDB returns config path using priority:
|
|
||||||
// explicit CLI path > QFS_CONFIG_PATH > directory of resolved local DB path.
|
|
||||||
// Falls back to ResolveConfigPath when dbPath is empty.
|
|
||||||
func ResolveConfigPathNearDB(explicitPath, dbPath string) (string, error) {
|
|
||||||
if explicitPath != "" {
|
|
||||||
return filepath.Clean(explicitPath), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if fromEnv := os.Getenv(envCfgPath); fromEnv != "" {
|
|
||||||
return filepath.Clean(fromEnv), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.TrimSpace(dbPath) != "" {
|
|
||||||
return filepath.Join(filepath.Dir(filepath.Clean(dbPath)), defaultCfg), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResolveConfigPath("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// MigrateLegacyDB copies an existing legacy DB (and optional SQLite sidecars)
|
// MigrateLegacyDB copies an existing legacy DB (and optional SQLite sidecars)
|
||||||
// to targetPath if targetPath does not already exist.
|
// to targetPath if targetPath does not already exist.
|
||||||
// Returns source path if migration happened.
|
// Returns source path if migration happened.
|
||||||
|
|||||||
@@ -1,124 +0,0 @@
|
|||||||
package article
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrMissingCategoryForLot is returned when a lot has no category in local_pricelist_items.lot_category.
|
|
||||||
var ErrMissingCategoryForLot = errors.New("missing_category_for_lot")
|
|
||||||
|
|
||||||
type MissingCategoryForLotError struct {
|
|
||||||
LotName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MissingCategoryForLotError) Error() string {
|
|
||||||
if e == nil || strings.TrimSpace(e.LotName) == "" {
|
|
||||||
return ErrMissingCategoryForLot.Error()
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s: %s", ErrMissingCategoryForLot.Error(), e.LotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *MissingCategoryForLotError) Unwrap() error {
|
|
||||||
return ErrMissingCategoryForLot
|
|
||||||
}
|
|
||||||
|
|
||||||
type Group string
|
|
||||||
|
|
||||||
const (
|
|
||||||
GroupCPU Group = "CPU"
|
|
||||||
GroupMEM Group = "MEM"
|
|
||||||
GroupGPU Group = "GPU"
|
|
||||||
GroupDISK Group = "DISK"
|
|
||||||
GroupNET Group = "NET"
|
|
||||||
GroupPSU Group = "PSU"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GroupForLotCategory maps pricelist lot_category codes into article groups.
|
|
||||||
// Unknown/unrelated categories return ok=false.
|
|
||||||
func GroupForLotCategory(cat string) (group Group, ok bool) {
|
|
||||||
c := strings.ToUpper(strings.TrimSpace(cat))
|
|
||||||
switch c {
|
|
||||||
case "CPU":
|
|
||||||
return GroupCPU, true
|
|
||||||
case "MEM":
|
|
||||||
return GroupMEM, true
|
|
||||||
case "GPU":
|
|
||||||
return GroupGPU, true
|
|
||||||
case "M2", "SSD", "HDD", "EDSFF", "HHHL":
|
|
||||||
return GroupDISK, true
|
|
||||||
case "NIC", "HCA", "DPU":
|
|
||||||
return GroupNET, true
|
|
||||||
case "HBA":
|
|
||||||
return GroupNET, true
|
|
||||||
case "PSU", "PS":
|
|
||||||
return GroupPSU, true
|
|
||||||
default:
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveLotCategoriesStrict resolves categories for lotNames using local_pricelist_items.lot_category
|
|
||||||
// for a given server pricelist id. If any lot is missing or has empty category, returns an error.
|
|
||||||
func ResolveLotCategoriesStrict(local *localdb.LocalDB, serverPricelistID uint, lotNames []string) (map[string]string, error) {
|
|
||||||
if local == nil {
|
|
||||||
return nil, fmt.Errorf("local db is nil")
|
|
||||||
}
|
|
||||||
cats, err := local.GetLocalLotCategoriesByServerPricelistID(serverPricelistID, lotNames)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
missing := make([]string, 0)
|
|
||||||
for _, lot := range lotNames {
|
|
||||||
cat := strings.TrimSpace(cats[lot])
|
|
||||||
if cat == "" {
|
|
||||||
missing = append(missing, lot)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cats[lot] = cat
|
|
||||||
}
|
|
||||||
if len(missing) > 0 {
|
|
||||||
fallback, err := local.GetLocalComponentCategoriesByLotNames(missing)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, lot := range missing {
|
|
||||||
if cat := strings.TrimSpace(fallback[lot]); cat != "" {
|
|
||||||
cats[lot] = cat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, lot := range missing {
|
|
||||||
if strings.TrimSpace(cats[lot]) == "" {
|
|
||||||
return nil, &MissingCategoryForLotError{LotName: lot}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cats, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeServerModel produces a stable article segment for the server model.
|
|
||||||
func NormalizeServerModel(model string) string {
|
|
||||||
trimmed := strings.TrimSpace(model)
|
|
||||||
if trimmed == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
upper := strings.ToUpper(trimmed)
|
|
||||||
var b strings.Builder
|
|
||||||
for _, r := range upper {
|
|
||||||
if r >= 'A' && r <= 'Z' {
|
|
||||||
b.WriteRune(r)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r >= '0' && r <= '9' {
|
|
||||||
b.WriteRune(r)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r == '.' {
|
|
||||||
b.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
@@ -1,98 +0,0 @@
|
|||||||
package article
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestResolveLotCategoriesStrict_MissingCategoryReturnsError(t *testing.T) {
|
|
||||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init local db: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
|
||||||
ServerID: 1,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "S-2026-02-11-001",
|
|
||||||
Name: "test",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
SyncedAt: time.Now(),
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
localPL, err := local.GetLocalPricelistByServerID(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("get local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
|
||||||
{PricelistID: localPL.ID, LotName: "CPU_A", LotCategory: "", Price: 10},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ResolveLotCategoriesStrict(local, 1, []string{"CPU_A"})
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("expected error")
|
|
||||||
}
|
|
||||||
if !errors.Is(err, ErrMissingCategoryForLot) {
|
|
||||||
t.Fatalf("expected ErrMissingCategoryForLot, got %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestResolveLotCategoriesStrict_FallbackToLocalComponents(t *testing.T) {
|
|
||||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init local db: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
|
||||||
ServerID: 2,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "S-2026-02-11-002",
|
|
||||||
Name: "test",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
SyncedAt: time.Now(),
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
localPL, err := local.GetLocalPricelistByServerID(2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("get local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
|
||||||
{PricelistID: localPL.ID, LotName: "CPU_B", LotCategory: "", Price: 10},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local items: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.DB().Create(&localdb.LocalComponent{
|
|
||||||
LotName: "CPU_B",
|
|
||||||
Category: "CPU",
|
|
||||||
LotDescription: "cpu",
|
|
||||||
}).Error; err != nil {
|
|
||||||
t.Fatalf("save local components: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cats, err := ResolveLotCategoriesStrict(local, 2, []string{"CPU_B"})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("expected fallback, got error: %v", err)
|
|
||||||
}
|
|
||||||
if cats["CPU_B"] != "CPU" {
|
|
||||||
t.Fatalf("expected CPU, got %q", cats["CPU_B"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGroupForLotCategory(t *testing.T) {
|
|
||||||
if g, ok := GroupForLotCategory("cpu"); !ok || g != GroupCPU {
|
|
||||||
t.Fatalf("expected cpu -> GroupCPU")
|
|
||||||
}
|
|
||||||
if g, ok := GroupForLotCategory("SFP"); ok || g != "" {
|
|
||||||
t.Fatalf("expected SFP to be excluded")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,632 +0,0 @@
|
|||||||
package article
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BuildOptions struct {
|
|
||||||
ServerModel string
|
|
||||||
SupportCode string
|
|
||||||
ServerPricelist *uint
|
|
||||||
}
|
|
||||||
|
|
||||||
type BuildResult struct {
|
|
||||||
Article string
|
|
||||||
Warnings []string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
reMemGiB = regexp.MustCompile(`(?i)(\d+)\s*(GB|G)`)
|
|
||||||
reMemTiB = regexp.MustCompile(`(?i)(\d+)\s*(TB|T)`)
|
|
||||||
reCapacityT = regexp.MustCompile(`(?i)(\d+(?:[.,]\d+)?)T`)
|
|
||||||
reCapacityG = regexp.MustCompile(`(?i)(\d+(?:[.,]\d+)?)G`)
|
|
||||||
rePortSpeed = regexp.MustCompile(`(?i)(\d+)p(\d+)(GbE|G)`)
|
|
||||||
rePortFC = regexp.MustCompile(`(?i)(\d+)pFC(\d+)`)
|
|
||||||
reWatts = regexp.MustCompile(`(?i)(\d{3,5})\s*W`)
|
|
||||||
)
|
|
||||||
|
|
||||||
func Build(local *localdb.LocalDB, items []models.ConfigItem, opts BuildOptions) (BuildResult, error) {
|
|
||||||
segments := make([]string, 0, 8)
|
|
||||||
warnings := make([]string, 0)
|
|
||||||
|
|
||||||
model := NormalizeServerModel(opts.ServerModel)
|
|
||||||
if model == "" {
|
|
||||||
return BuildResult{}, fmt.Errorf("server_model required")
|
|
||||||
}
|
|
||||||
segments = append(segments, model)
|
|
||||||
|
|
||||||
lotNames := make([]string, 0, len(items))
|
|
||||||
for _, it := range items {
|
|
||||||
lotNames = append(lotNames, it.LotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.ServerPricelist == nil || *opts.ServerPricelist == 0 {
|
|
||||||
return BuildResult{}, fmt.Errorf("pricelist_id required for article")
|
|
||||||
}
|
|
||||||
|
|
||||||
cats, err := ResolveLotCategoriesStrict(local, *opts.ServerPricelist, lotNames)
|
|
||||||
if err != nil {
|
|
||||||
return BuildResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuSeg := buildCPUSegment(items, cats)
|
|
||||||
if cpuSeg != "" {
|
|
||||||
segments = append(segments, cpuSeg)
|
|
||||||
}
|
|
||||||
memSeg, memWarn := buildMemSegment(items, cats)
|
|
||||||
if memWarn != "" {
|
|
||||||
warnings = append(warnings, memWarn)
|
|
||||||
}
|
|
||||||
if memSeg != "" {
|
|
||||||
segments = append(segments, memSeg)
|
|
||||||
}
|
|
||||||
gpuSeg := buildGPUSegment(items, cats)
|
|
||||||
if gpuSeg != "" {
|
|
||||||
segments = append(segments, gpuSeg)
|
|
||||||
}
|
|
||||||
diskSeg, diskWarn := buildDiskSegment(items, cats)
|
|
||||||
if diskWarn != "" {
|
|
||||||
warnings = append(warnings, diskWarn)
|
|
||||||
}
|
|
||||||
if diskSeg != "" {
|
|
||||||
segments = append(segments, diskSeg)
|
|
||||||
}
|
|
||||||
netSeg, netWarn := buildNetSegment(items, cats)
|
|
||||||
if netWarn != "" {
|
|
||||||
warnings = append(warnings, netWarn)
|
|
||||||
}
|
|
||||||
if netSeg != "" {
|
|
||||||
segments = append(segments, netSeg)
|
|
||||||
}
|
|
||||||
psuSeg, psuWarn := buildPSUSegment(items, cats)
|
|
||||||
if psuWarn != "" {
|
|
||||||
warnings = append(warnings, psuWarn)
|
|
||||||
}
|
|
||||||
if psuSeg != "" {
|
|
||||||
segments = append(segments, psuSeg)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.TrimSpace(opts.SupportCode) != "" {
|
|
||||||
code := strings.TrimSpace(opts.SupportCode)
|
|
||||||
if !isSupportCodeValid(code) {
|
|
||||||
return BuildResult{}, fmt.Errorf("invalid_support_code")
|
|
||||||
}
|
|
||||||
segments = append(segments, code)
|
|
||||||
}
|
|
||||||
|
|
||||||
article := strings.Join(segments, "-")
|
|
||||||
if len([]rune(article)) > 80 {
|
|
||||||
article = compressArticle(segments)
|
|
||||||
warnings = append(warnings, "compressed")
|
|
||||||
}
|
|
||||||
if len([]rune(article)) > 80 {
|
|
||||||
return BuildResult{}, fmt.Errorf("article_overflow")
|
|
||||||
}
|
|
||||||
|
|
||||||
return BuildResult{Article: article, Warnings: warnings}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSupportCodeValid(code string) bool {
|
|
||||||
if len(code) < 3 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !strings.Contains(code, "y") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
parts := strings.Split(code, "y")
|
|
||||||
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, r := range parts[0] {
|
|
||||||
if r < '0' || r > '9' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch parts[1] {
|
|
||||||
case "W", "B", "S", "P":
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCPUSegment(items []models.ConfigItem, cats map[string]string) string {
|
|
||||||
type agg struct {
|
|
||||||
qty int
|
|
||||||
}
|
|
||||||
models := map[string]*agg{}
|
|
||||||
for _, it := range items {
|
|
||||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
|
||||||
if !ok || group != GroupCPU {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
model := parseCPUModel(it.LotName)
|
|
||||||
if model == "" {
|
|
||||||
model = "UNK"
|
|
||||||
}
|
|
||||||
if _, ok := models[model]; !ok {
|
|
||||||
models[model] = &agg{}
|
|
||||||
}
|
|
||||||
models[model].qty += it.Quantity
|
|
||||||
}
|
|
||||||
if len(models) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
parts := make([]string, 0, len(models))
|
|
||||||
for model, a := range models {
|
|
||||||
parts = append(parts, fmt.Sprintf("%dx%s", a.qty, model))
|
|
||||||
}
|
|
||||||
sort.Strings(parts)
|
|
||||||
return strings.Join(parts, "+")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildMemSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
|
||||||
totalGiB := 0
|
|
||||||
for _, it := range items {
|
|
||||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
|
||||||
if !ok || group != GroupMEM {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
per := parseMemGiB(it.LotName)
|
|
||||||
if per <= 0 {
|
|
||||||
return "", "mem_unknown"
|
|
||||||
}
|
|
||||||
totalGiB += per * it.Quantity
|
|
||||||
}
|
|
||||||
if totalGiB == 0 {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
if totalGiB%1024 == 0 {
|
|
||||||
return fmt.Sprintf("%dT", totalGiB/1024), ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%dG", totalGiB), ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildGPUSegment(items []models.ConfigItem, cats map[string]string) string {
|
|
||||||
models := map[string]int{}
|
|
||||||
for _, it := range items {
|
|
||||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
|
||||||
if !ok || group != GroupGPU {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(strings.ToUpper(it.LotName), "MB_") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
model := parseGPUModel(it.LotName)
|
|
||||||
if model == "" {
|
|
||||||
model = "UNK"
|
|
||||||
}
|
|
||||||
models[model] += it.Quantity
|
|
||||||
}
|
|
||||||
if len(models) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
parts := make([]string, 0, len(models))
|
|
||||||
for model, qty := range models {
|
|
||||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, model))
|
|
||||||
}
|
|
||||||
sort.Strings(parts)
|
|
||||||
return strings.Join(parts, "+")
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildDiskSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
|
||||||
type key struct {
|
|
||||||
t string
|
|
||||||
c string
|
|
||||||
}
|
|
||||||
groupQty := map[key]int{}
|
|
||||||
warn := ""
|
|
||||||
for _, it := range items {
|
|
||||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
|
||||||
if !ok || group != GroupDISK {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
capToken := parseCapacity(it.LotName)
|
|
||||||
if capToken == "" {
|
|
||||||
warn = "disk_unknown"
|
|
||||||
}
|
|
||||||
typeCode := diskTypeCode(cats[it.LotName], it.LotName)
|
|
||||||
k := key{t: typeCode, c: capToken}
|
|
||||||
groupQty[k] += it.Quantity
|
|
||||||
}
|
|
||||||
if len(groupQty) == 0 {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
parts := make([]string, 0, len(groupQty))
|
|
||||||
for k, qty := range groupQty {
|
|
||||||
if k.c == "" {
|
|
||||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, k.t))
|
|
||||||
} else {
|
|
||||||
parts = append(parts, fmt.Sprintf("%dx%s%s", qty, k.c, k.t))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Strings(parts)
|
|
||||||
return strings.Join(parts, "+"), warn
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildNetSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
|
||||||
groupQty := map[string]int{}
|
|
||||||
warn := ""
|
|
||||||
for _, it := range items {
|
|
||||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
|
||||||
if !ok || group != GroupNET {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
profile := parsePortSpeed(it.LotName)
|
|
||||||
if profile == "" {
|
|
||||||
profile = "UNKNET"
|
|
||||||
warn = "net_unknown"
|
|
||||||
}
|
|
||||||
groupQty[profile] += it.Quantity
|
|
||||||
}
|
|
||||||
if len(groupQty) == 0 {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
parts := make([]string, 0, len(groupQty))
|
|
||||||
for profile, qty := range groupQty {
|
|
||||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, profile))
|
|
||||||
}
|
|
||||||
sort.Strings(parts)
|
|
||||||
return strings.Join(parts, "+"), warn
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildPSUSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
|
||||||
groupQty := map[string]int{}
|
|
||||||
warn := ""
|
|
||||||
for _, it := range items {
|
|
||||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
|
||||||
if !ok || group != GroupPSU {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rating := parseWatts(it.LotName)
|
|
||||||
if rating == "" {
|
|
||||||
rating = "UNKPSU"
|
|
||||||
warn = "psu_unknown"
|
|
||||||
}
|
|
||||||
groupQty[rating] += it.Quantity
|
|
||||||
}
|
|
||||||
if len(groupQty) == 0 {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
parts := make([]string, 0, len(groupQty))
|
|
||||||
for rating, qty := range groupQty {
|
|
||||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, rating))
|
|
||||||
}
|
|
||||||
sort.Strings(parts)
|
|
||||||
return strings.Join(parts, "+"), warn
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeModelToken(lotName string) string {
|
|
||||||
if idx := strings.Index(lotName, "_"); idx >= 0 && idx+1 < len(lotName) {
|
|
||||||
lotName = lotName[idx+1:]
|
|
||||||
}
|
|
||||||
parts := strings.Split(lotName, "_")
|
|
||||||
token := parts[len(parts)-1]
|
|
||||||
return strings.ToUpper(strings.TrimSpace(token))
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCPUModel(lotName string) string {
|
|
||||||
parts := strings.Split(lotName, "_")
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
last := strings.ToUpper(strings.TrimSpace(parts[len(parts)-1]))
|
|
||||||
if last != "" {
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return normalizeModelToken(lotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseGPUModel(lotName string) string {
|
|
||||||
upper := strings.ToUpper(lotName)
|
|
||||||
if idx := strings.Index(upper, "GPU_"); idx >= 0 {
|
|
||||||
upper = upper[idx+4:]
|
|
||||||
}
|
|
||||||
parts := strings.Split(upper, "_")
|
|
||||||
model := ""
|
|
||||||
numSuffix := ""
|
|
||||||
mem := ""
|
|
||||||
for i, p := range parts {
|
|
||||||
if p == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch p {
|
|
||||||
case "NV", "NVIDIA", "INTEL", "AMD", "RADEON", "PCIE", "PCI", "SXM", "SXMX", "SFF", "LOVELACE":
|
|
||||||
continue
|
|
||||||
case "ADA", "AMPERE", "HOPPER", "BLACKWELL":
|
|
||||||
if model != "" {
|
|
||||||
archAbbr := map[string]string{
|
|
||||||
"ADA": "ADA", "AMPERE": "AMP", "HOPPER": "HOP", "BLACKWELL": "BWL",
|
|
||||||
}
|
|
||||||
numSuffix += archAbbr[p]
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
if strings.Contains(p, "GB") {
|
|
||||||
mem = p
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if model == "" && i > 0 {
|
|
||||||
model = p
|
|
||||||
} else if model != "" && numSuffix == "" && isNumeric(p) {
|
|
||||||
numSuffix = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
full := model
|
|
||||||
if numSuffix != "" {
|
|
||||||
full = model + numSuffix
|
|
||||||
}
|
|
||||||
if full != "" && mem != "" {
|
|
||||||
return full + "_" + mem
|
|
||||||
}
|
|
||||||
if full != "" {
|
|
||||||
return full
|
|
||||||
}
|
|
||||||
return normalizeModelToken(lotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNumeric(s string) bool {
|
|
||||||
if s == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, r := range s {
|
|
||||||
if r < '0' || r > '9' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseMemGiB(lotName string) int {
|
|
||||||
if m := reMemTiB.FindStringSubmatch(lotName); len(m) == 3 {
|
|
||||||
return atoi(m[1]) * 1024
|
|
||||||
}
|
|
||||||
if m := reMemGiB.FindStringSubmatch(lotName); len(m) == 3 {
|
|
||||||
return atoi(m[1])
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCapacity(lotName string) string {
|
|
||||||
if m := reCapacityT.FindStringSubmatch(lotName); len(m) == 2 {
|
|
||||||
return normalizeTToken(strings.ReplaceAll(m[1], ",", ".")) + "T"
|
|
||||||
}
|
|
||||||
if m := reCapacityG.FindStringSubmatch(lotName); len(m) == 2 {
|
|
||||||
return normalizeNumberToken(strings.ReplaceAll(m[1], ",", ".")) + "G"
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func diskTypeCode(cat string, lotName string) string {
|
|
||||||
c := strings.ToUpper(strings.TrimSpace(cat))
|
|
||||||
if c == "M2" {
|
|
||||||
return "M2"
|
|
||||||
}
|
|
||||||
upper := strings.ToUpper(lotName)
|
|
||||||
if strings.Contains(upper, "NVME") {
|
|
||||||
return "NV"
|
|
||||||
}
|
|
||||||
if strings.Contains(upper, "SAS") {
|
|
||||||
return "SAS"
|
|
||||||
}
|
|
||||||
if strings.Contains(upper, "SATA") {
|
|
||||||
return "SAT"
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func parsePortSpeed(lotName string) string {
|
|
||||||
if m := rePortSpeed.FindStringSubmatch(lotName); len(m) == 4 {
|
|
||||||
return fmt.Sprintf("%sp%sG", m[1], m[2])
|
|
||||||
}
|
|
||||||
if m := rePortFC.FindStringSubmatch(lotName); len(m) == 3 {
|
|
||||||
return fmt.Sprintf("%spFC%s", m[1], m[2])
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseWatts(lotName string) string {
|
|
||||||
if m := reWatts.FindStringSubmatch(lotName); len(m) == 2 {
|
|
||||||
w := atoi(m[1])
|
|
||||||
if w >= 1000 {
|
|
||||||
kw := fmt.Sprintf("%.1f", float64(w)/1000.0)
|
|
||||||
kw = strings.TrimSuffix(kw, ".0")
|
|
||||||
return fmt.Sprintf("%skW", kw)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%dW", w)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeNumberToken(raw string) string {
|
|
||||||
raw = strings.TrimSpace(raw)
|
|
||||||
raw = strings.TrimLeft(raw, "0")
|
|
||||||
if raw == "" || raw[0] == '.' {
|
|
||||||
raw = "0" + raw
|
|
||||||
}
|
|
||||||
return raw
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeTToken(raw string) string {
|
|
||||||
raw = normalizeNumberToken(raw)
|
|
||||||
parts := strings.SplitN(raw, ".", 2)
|
|
||||||
intPart := parts[0]
|
|
||||||
frac := ""
|
|
||||||
if len(parts) == 2 {
|
|
||||||
frac = parts[1]
|
|
||||||
}
|
|
||||||
if frac == "" {
|
|
||||||
frac = "0"
|
|
||||||
}
|
|
||||||
if len(intPart) >= 2 {
|
|
||||||
return intPart + "." + frac
|
|
||||||
}
|
|
||||||
if len(frac) > 1 {
|
|
||||||
frac = frac[:1]
|
|
||||||
}
|
|
||||||
return intPart + "." + frac
|
|
||||||
}
|
|
||||||
|
|
||||||
func atoi(v string) int {
|
|
||||||
n := 0
|
|
||||||
for _, r := range v {
|
|
||||||
if r < '0' || r > '9' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
n = n*10 + int(r-'0')
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func compressArticle(segments []string) string {
|
|
||||||
if len(segments) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
normalized := make([]string, 0, len(segments))
|
|
||||||
for _, s := range segments {
|
|
||||||
normalized = append(normalized, strings.ReplaceAll(s, "GbE", "G"))
|
|
||||||
}
|
|
||||||
segments = normalized
|
|
||||||
article := strings.Join(segments, "-")
|
|
||||||
if len([]rune(article)) <= 80 {
|
|
||||||
return article
|
|
||||||
}
|
|
||||||
|
|
||||||
// segment order: model, cpu, mem, gpu, disk, net, psu, support
|
|
||||||
index := func(i int) (int, bool) {
|
|
||||||
if i >= 0 && i < len(segments) {
|
|
||||||
return i, true
|
|
||||||
}
|
|
||||||
return -1, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1) remove PSU
|
|
||||||
if i, ok := index(6); ok {
|
|
||||||
segments = append(segments[:i], segments[i+1:]...)
|
|
||||||
article = strings.Join(segments, "-")
|
|
||||||
if len([]rune(article)) <= 80 {
|
|
||||||
return article
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2) compress NET/HBA/HCA
|
|
||||||
if i, ok := index(5); ok {
|
|
||||||
segments[i] = compressNetSegment(segments[i])
|
|
||||||
article = strings.Join(segments, "-")
|
|
||||||
if len([]rune(article)) <= 80 {
|
|
||||||
return article
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3) compress DISK
|
|
||||||
if i, ok := index(4); ok {
|
|
||||||
segments[i] = compressDiskSegment(segments[i])
|
|
||||||
article = strings.Join(segments, "-")
|
|
||||||
if len([]rune(article)) <= 80 {
|
|
||||||
return article
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4) compress GPU to vendor only (GPU_NV)
|
|
||||||
if i, ok := index(3); ok {
|
|
||||||
segments[i] = compressGPUSegment(segments[i])
|
|
||||||
}
|
|
||||||
return strings.Join(segments, "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
func compressNetSegment(seg string) string {
|
|
||||||
if seg == "" {
|
|
||||||
return seg
|
|
||||||
}
|
|
||||||
parts := strings.Split(seg, "+")
|
|
||||||
out := make([]string, 0, len(parts))
|
|
||||||
for _, p := range parts {
|
|
||||||
p = strings.TrimSpace(p)
|
|
||||||
if p == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
qty := "1"
|
|
||||||
profile := p
|
|
||||||
if x := strings.SplitN(p, "x", 2); len(x) == 2 {
|
|
||||||
qty = x[0]
|
|
||||||
profile = x[1]
|
|
||||||
}
|
|
||||||
upper := strings.ToUpper(profile)
|
|
||||||
label := "NIC"
|
|
||||||
if strings.Contains(upper, "FC") {
|
|
||||||
label = "HBA"
|
|
||||||
} else if strings.Contains(upper, "HCA") || strings.Contains(upper, "IB") {
|
|
||||||
label = "HCA"
|
|
||||||
}
|
|
||||||
out = append(out, fmt.Sprintf("%sx%s", qty, label))
|
|
||||||
}
|
|
||||||
if len(out) == 0 {
|
|
||||||
return seg
|
|
||||||
}
|
|
||||||
sort.Strings(out)
|
|
||||||
return strings.Join(out, "+")
|
|
||||||
}
|
|
||||||
|
|
||||||
func compressDiskSegment(seg string) string {
|
|
||||||
if seg == "" {
|
|
||||||
return seg
|
|
||||||
}
|
|
||||||
parts := strings.Split(seg, "+")
|
|
||||||
out := make([]string, 0, len(parts))
|
|
||||||
for _, p := range parts {
|
|
||||||
p = strings.TrimSpace(p)
|
|
||||||
if p == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
qty := "1"
|
|
||||||
spec := p
|
|
||||||
if x := strings.SplitN(p, "x", 2); len(x) == 2 {
|
|
||||||
qty = x[0]
|
|
||||||
spec = x[1]
|
|
||||||
}
|
|
||||||
upper := strings.ToUpper(spec)
|
|
||||||
label := "DSK"
|
|
||||||
for _, t := range []string{"M2", "NV", "SAS", "SAT", "SSD", "HDD", "EDS", "HHH"} {
|
|
||||||
if strings.Contains(upper, t) {
|
|
||||||
label = t
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out = append(out, fmt.Sprintf("%sx%s", qty, label))
|
|
||||||
}
|
|
||||||
if len(out) == 0 {
|
|
||||||
return seg
|
|
||||||
}
|
|
||||||
sort.Strings(out)
|
|
||||||
return strings.Join(out, "+")
|
|
||||||
}
|
|
||||||
|
|
||||||
func compressGPUSegment(seg string) string {
|
|
||||||
if seg == "" {
|
|
||||||
return seg
|
|
||||||
}
|
|
||||||
parts := strings.Split(seg, "+")
|
|
||||||
out := make([]string, 0, len(parts))
|
|
||||||
for _, p := range parts {
|
|
||||||
p = strings.TrimSpace(p)
|
|
||||||
if p == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
qty := "1"
|
|
||||||
if x := strings.SplitN(p, "x", 2); len(x) == 2 {
|
|
||||||
qty = x[0]
|
|
||||||
}
|
|
||||||
out = append(out, fmt.Sprintf("%sxGPU_NV", qty))
|
|
||||||
}
|
|
||||||
if len(out) == 0 {
|
|
||||||
return seg
|
|
||||||
}
|
|
||||||
sort.Strings(out)
|
|
||||||
return strings.Join(out, "+")
|
|
||||||
}
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
package article
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBuild_ParsesNetAndPSU(t *testing.T) {
|
|
||||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init local db: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
|
||||||
ServerID: 1,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "S-2026-02-11-001",
|
|
||||||
Name: "test",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
SyncedAt: time.Now(),
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
localPL, err := local.GetLocalPricelistByServerID(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("get local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
|
||||||
{PricelistID: localPL.ID, LotName: "NIC_2p25G_MCX512A-AC", LotCategory: "NIC", Price: 1},
|
|
||||||
{PricelistID: localPL.ID, LotName: "HBA_2pFC32_Gen6", LotCategory: "HBA", Price: 1},
|
|
||||||
{PricelistID: localPL.ID, LotName: "PS_1000W_Platinum", LotCategory: "PS", Price: 1},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
items := models.ConfigItems{
|
|
||||||
{LotName: "NIC_2p25G_MCX512A-AC", Quantity: 1},
|
|
||||||
{LotName: "HBA_2pFC32_Gen6", Quantity: 1},
|
|
||||||
{LotName: "PS_1000W_Platinum", Quantity: 2},
|
|
||||||
}
|
|
||||||
result, err := Build(local, items, BuildOptions{
|
|
||||||
ServerModel: "DL380GEN11",
|
|
||||||
SupportCode: "1yW",
|
|
||||||
ServerPricelist: &localPL.ServerID,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("build article: %v", err)
|
|
||||||
}
|
|
||||||
if result.Article == "" {
|
|
||||||
t.Fatalf("expected article to be non-empty")
|
|
||||||
}
|
|
||||||
if contains(result.Article, "UNKNET") || contains(result.Article, "UNKPSU") {
|
|
||||||
t.Fatalf("unexpected UNK in article: %s", result.Article)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(s, sub string) bool {
|
|
||||||
return strings.Contains(s, sub)
|
|
||||||
}
|
|
||||||
@@ -7,14 +7,19 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
mysqlDriver "github.com/go-sql-driver/mysql"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Server ServerConfig `yaml:"server"`
|
Server ServerConfig `yaml:"server"`
|
||||||
Export ExportConfig `yaml:"export"`
|
Database DatabaseConfig `yaml:"database"`
|
||||||
Logging LoggingConfig `yaml:"logging"`
|
Auth AuthConfig `yaml:"auth"`
|
||||||
Backup BackupConfig `yaml:"backup"`
|
Pricing PricingConfig `yaml:"pricing"`
|
||||||
|
Export ExportConfig `yaml:"export"`
|
||||||
|
Alerts AlertsConfig `yaml:"alerts"`
|
||||||
|
Notifications NotificationsConfig `yaml:"notifications"`
|
||||||
|
Logging LoggingConfig `yaml:"logging"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ServerConfig struct {
|
type ServerConfig struct {
|
||||||
@@ -25,6 +30,70 @@ type ServerConfig struct {
|
|||||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DatabaseConfig struct {
|
||||||
|
Host string `yaml:"host"`
|
||||||
|
Port int `yaml:"port"`
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
User string `yaml:"user"`
|
||||||
|
Password string `yaml:"password"`
|
||||||
|
MaxOpenConns int `yaml:"max_open_conns"`
|
||||||
|
MaxIdleConns int `yaml:"max_idle_conns"`
|
||||||
|
ConnMaxLifetime time.Duration `yaml:"conn_max_lifetime"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DatabaseConfig) DSN() string {
|
||||||
|
cfg := mysqlDriver.NewConfig()
|
||||||
|
cfg.User = d.User
|
||||||
|
cfg.Passwd = d.Password
|
||||||
|
cfg.Net = "tcp"
|
||||||
|
cfg.Addr = net.JoinHostPort(d.Host, strconv.Itoa(d.Port))
|
||||||
|
cfg.DBName = d.Name
|
||||||
|
cfg.ParseTime = true
|
||||||
|
cfg.Loc = time.Local
|
||||||
|
cfg.Params = map[string]string{
|
||||||
|
"charset": "utf8mb4",
|
||||||
|
}
|
||||||
|
return cfg.FormatDSN()
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuthConfig struct {
|
||||||
|
JWTSecret string `yaml:"jwt_secret"`
|
||||||
|
TokenExpiry time.Duration `yaml:"token_expiry"`
|
||||||
|
RefreshExpiry time.Duration `yaml:"refresh_expiry"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PricingConfig struct {
|
||||||
|
DefaultMethod string `yaml:"default_method"`
|
||||||
|
DefaultPeriodDays int `yaml:"default_period_days"`
|
||||||
|
FreshnessGreenDays int `yaml:"freshness_green_days"`
|
||||||
|
FreshnessYellowDays int `yaml:"freshness_yellow_days"`
|
||||||
|
FreshnessRedDays int `yaml:"freshness_red_days"`
|
||||||
|
MinQuotesForMedian int `yaml:"min_quotes_for_median"`
|
||||||
|
PopularityDecayDays int `yaml:"popularity_decay_days"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportConfig struct {
|
||||||
|
TempDir string `yaml:"temp_dir"`
|
||||||
|
MaxFileAge time.Duration `yaml:"max_file_age"`
|
||||||
|
CompanyName string `yaml:"company_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AlertsConfig struct {
|
||||||
|
Enabled bool `yaml:"enabled"`
|
||||||
|
CheckInterval time.Duration `yaml:"check_interval"`
|
||||||
|
HighDemandThreshold int `yaml:"high_demand_threshold"`
|
||||||
|
TrendingThresholdPercent int `yaml:"trending_threshold_percent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NotificationsConfig struct {
|
||||||
|
EmailEnabled bool `yaml:"email_enabled"`
|
||||||
|
SMTPHost string `yaml:"smtp_host"`
|
||||||
|
SMTPPort int `yaml:"smtp_port"`
|
||||||
|
SMTPUser string `yaml:"smtp_user"`
|
||||||
|
SMTPPassword string `yaml:"smtp_password"`
|
||||||
|
FromAddress string `yaml:"from_address"`
|
||||||
|
}
|
||||||
|
|
||||||
type LoggingConfig struct {
|
type LoggingConfig struct {
|
||||||
Level string `yaml:"level"`
|
Level string `yaml:"level"`
|
||||||
Format string `yaml:"format"`
|
Format string `yaml:"format"`
|
||||||
@@ -32,14 +101,6 @@ type LoggingConfig struct {
|
|||||||
FilePath string `yaml:"file_path"`
|
FilePath string `yaml:"file_path"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExportConfig is kept for constructor compatibility in export services.
|
|
||||||
// Runtime no longer persists an export section in config.yaml.
|
|
||||||
type ExportConfig struct{}
|
|
||||||
|
|
||||||
type BackupConfig struct {
|
|
||||||
Time string `yaml:"time"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func Load(path string) (*Config, error) {
|
func Load(path string) (*Config, error) {
|
||||||
data, err := os.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -73,6 +134,45 @@ func (c *Config) setDefaults() {
|
|||||||
c.Server.WriteTimeout = 30 * time.Second
|
c.Server.WriteTimeout = 30 * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Database.Port == 0 {
|
||||||
|
c.Database.Port = 3306
|
||||||
|
}
|
||||||
|
if c.Database.MaxOpenConns == 0 {
|
||||||
|
c.Database.MaxOpenConns = 25
|
||||||
|
}
|
||||||
|
if c.Database.MaxIdleConns == 0 {
|
||||||
|
c.Database.MaxIdleConns = 5
|
||||||
|
}
|
||||||
|
if c.Database.ConnMaxLifetime == 0 {
|
||||||
|
c.Database.ConnMaxLifetime = 5 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Auth.TokenExpiry == 0 {
|
||||||
|
c.Auth.TokenExpiry = 24 * time.Hour
|
||||||
|
}
|
||||||
|
if c.Auth.RefreshExpiry == 0 {
|
||||||
|
c.Auth.RefreshExpiry = 7 * 24 * time.Hour
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Pricing.DefaultMethod == "" {
|
||||||
|
c.Pricing.DefaultMethod = "weighted_median"
|
||||||
|
}
|
||||||
|
if c.Pricing.DefaultPeriodDays == 0 {
|
||||||
|
c.Pricing.DefaultPeriodDays = 90
|
||||||
|
}
|
||||||
|
if c.Pricing.FreshnessGreenDays == 0 {
|
||||||
|
c.Pricing.FreshnessGreenDays = 30
|
||||||
|
}
|
||||||
|
if c.Pricing.FreshnessYellowDays == 0 {
|
||||||
|
c.Pricing.FreshnessYellowDays = 60
|
||||||
|
}
|
||||||
|
if c.Pricing.FreshnessRedDays == 0 {
|
||||||
|
c.Pricing.FreshnessRedDays = 90
|
||||||
|
}
|
||||||
|
if c.Pricing.MinQuotesForMedian == 0 {
|
||||||
|
c.Pricing.MinQuotesForMedian = 3
|
||||||
|
}
|
||||||
|
|
||||||
if c.Logging.Level == "" {
|
if c.Logging.Level == "" {
|
||||||
c.Logging.Level = "info"
|
c.Logging.Level = "info"
|
||||||
}
|
}
|
||||||
@@ -82,12 +182,8 @@ func (c *Config) setDefaults() {
|
|||||||
if c.Logging.Output == "" {
|
if c.Logging.Output == "" {
|
||||||
c.Logging.Output = "stdout"
|
c.Logging.Output = "stdout"
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Backup.Time == "" {
|
|
||||||
c.Backup.Time = "00:00"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) Address() string {
|
func (c *Config) Address() string {
|
||||||
return net.JoinHostPort(c.Server.Host, strconv.Itoa(c.Server.Port))
|
return fmt.Sprintf("%s:%d", c.Server.Host, c.Server.Port)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -238,22 +238,6 @@ func (cm *ConnectionManager) Disconnect() {
|
|||||||
cm.lastError = nil
|
cm.lastError = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarkOffline closes the current connection and preserves the last observed error.
|
|
||||||
func (cm *ConnectionManager) MarkOffline(err error) {
|
|
||||||
cm.mu.Lock()
|
|
||||||
defer cm.mu.Unlock()
|
|
||||||
|
|
||||||
if cm.db != nil {
|
|
||||||
sqlDB, dbErr := cm.db.DB()
|
|
||||||
if dbErr == nil {
|
|
||||||
sqlDB.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cm.db = nil
|
|
||||||
cm.lastError = err
|
|
||||||
cm.lastCheck = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLastError returns the last connection error (thread-safe)
|
// GetLastError returns the last connection error (thread-safe)
|
||||||
func (cm *ConnectionManager) GetLastError() error {
|
func (cm *ConnectionManager) GetLastError() error {
|
||||||
cm.mu.RLock()
|
cm.mu.RLock()
|
||||||
|
|||||||
113
internal/handlers/auth.go
Normal file
113
internal/handlers/auth.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/middleware"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AuthHandler struct {
|
||||||
|
authService *services.AuthService
|
||||||
|
userRepo *repository.UserRepository
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAuthHandler(authService *services.AuthService, userRepo *repository.UserRepository) *AuthHandler {
|
||||||
|
return &AuthHandler{
|
||||||
|
authService: authService,
|
||||||
|
userRepo: userRepo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoginRequest struct {
|
||||||
|
Username string `json:"username" binding:"required"`
|
||||||
|
Password string `json:"password" binding:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type LoginResponse struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
ExpiresAt int64 `json:"expires_at"`
|
||||||
|
User UserResponse `json:"user"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserResponse struct {
|
||||||
|
ID uint `json:"id"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
Role string `json:"role"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AuthHandler) Login(c *gin.Context) {
|
||||||
|
var req LoginRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens, user, err := h.authService.Login(req.Username, req.Password)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, LoginResponse{
|
||||||
|
AccessToken: tokens.AccessToken,
|
||||||
|
RefreshToken: tokens.RefreshToken,
|
||||||
|
ExpiresAt: tokens.ExpiresAt,
|
||||||
|
User: UserResponse{
|
||||||
|
ID: user.ID,
|
||||||
|
Username: user.Username,
|
||||||
|
Email: user.Email,
|
||||||
|
Role: string(user.Role),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type RefreshRequest struct {
|
||||||
|
RefreshToken string `json:"refresh_token" binding:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AuthHandler) Refresh(c *gin.Context) {
|
||||||
|
var req RefreshRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens, err := h.authService.RefreshTokens(req.RefreshToken)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, tokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AuthHandler) Me(c *gin.Context) {
|
||||||
|
claims := middleware.GetClaims(c)
|
||||||
|
if claims == nil {
|
||||||
|
c.JSON(http.StatusUnauthorized, gin.H{"error": "not authenticated"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
user, err := h.userRepo.GetByID(claims.UserID)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": "user not found"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, UserResponse{
|
||||||
|
ID: user.ID,
|
||||||
|
Username: user.Username,
|
||||||
|
Email: user.Email,
|
||||||
|
Role: string(user.Role),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *AuthHandler) Logout(c *gin.Context) {
|
||||||
|
// JWT is stateless, logout is handled on client by discarding tokens
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "logged out"})
|
||||||
|
}
|
||||||
@@ -3,10 +3,8 @@ package handlers
|
|||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
@@ -27,12 +25,6 @@ func NewComponentHandler(componentService *services.ComponentService, localDB *l
|
|||||||
func (h *ComponentHandler) List(c *gin.Context) {
|
func (h *ComponentHandler) List(c *gin.Context) {
|
||||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||||
if page < 1 {
|
|
||||||
page = 1
|
|
||||||
}
|
|
||||||
if perPage < 1 {
|
|
||||||
perPage = 20
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := repository.ComponentFilter{
|
filter := repository.ComponentFilter{
|
||||||
Category: c.Query("category"),
|
Category: c.Query("category"),
|
||||||
@@ -41,68 +33,73 @@ func (h *ComponentHandler) List(c *gin.Context) {
|
|||||||
ExcludeHidden: c.Query("include_hidden") != "true", // По умолчанию скрытые не показываются
|
ExcludeHidden: c.Query("include_hidden") != "true", // По умолчанию скрытые не показываются
|
||||||
}
|
}
|
||||||
|
|
||||||
localFilter := localdb.ComponentFilter{
|
result, err := h.componentService.List(filter, page, perPage)
|
||||||
Category: filter.Category,
|
|
||||||
Search: filter.Search,
|
|
||||||
HasPrice: filter.HasPrice,
|
|
||||||
}
|
|
||||||
offset := (page - 1) * perPage
|
|
||||||
localComps, total, err := h.localDB.ListComponents(localFilter, offset, perPage)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
components := make([]services.ComponentView, len(localComps))
|
// If offline mode (empty result), fallback to local components
|
||||||
for i, lc := range localComps {
|
isOffline := false
|
||||||
components[i] = services.ComponentView{
|
if v, ok := c.Get("is_offline"); ok {
|
||||||
LotName: lc.LotName,
|
if b, ok := v.(bool); ok {
|
||||||
Description: lc.LotDescription,
|
isOffline = b
|
||||||
Category: lc.Category,
|
}
|
||||||
CategoryName: lc.Category,
|
}
|
||||||
Model: lc.Model,
|
if isOffline && result.Total == 0 && h.localDB != nil {
|
||||||
|
localFilter := localdb.ComponentFilter{
|
||||||
|
Category: filter.Category,
|
||||||
|
Search: filter.Search,
|
||||||
|
HasPrice: filter.HasPrice,
|
||||||
|
}
|
||||||
|
|
||||||
|
offset := (page - 1) * perPage
|
||||||
|
localComps, total, err := h.localDB.ListComponents(localFilter, offset, perPage)
|
||||||
|
if err == nil && len(localComps) > 0 {
|
||||||
|
// Convert local components to ComponentView format
|
||||||
|
components := make([]services.ComponentView, len(localComps))
|
||||||
|
for i, lc := range localComps {
|
||||||
|
components[i] = services.ComponentView{
|
||||||
|
LotName: lc.LotName,
|
||||||
|
Description: lc.LotDescription,
|
||||||
|
Category: lc.Category,
|
||||||
|
CategoryName: lc.Category, // No translation in local mode
|
||||||
|
Model: lc.Model,
|
||||||
|
CurrentPrice: lc.CurrentPrice,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, &services.ComponentListResult{
|
||||||
|
Components: components,
|
||||||
|
Total: total,
|
||||||
|
Page: page,
|
||||||
|
PerPage: perPage,
|
||||||
|
})
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, &services.ComponentListResult{
|
c.JSON(http.StatusOK, result)
|
||||||
Components: components,
|
|
||||||
Total: total,
|
|
||||||
Page: page,
|
|
||||||
PerPage: perPage,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ComponentHandler) Get(c *gin.Context) {
|
func (h *ComponentHandler) Get(c *gin.Context) {
|
||||||
lotName := c.Param("lot_name")
|
lotName := c.Param("lot_name")
|
||||||
component, err := h.localDB.GetLocalComponent(lotName)
|
|
||||||
|
component, err := h.componentService.GetByLotName(lotName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, services.ComponentView{
|
c.JSON(http.StatusOK, component)
|
||||||
LotName: component.LotName,
|
|
||||||
Description: component.LotDescription,
|
|
||||||
Category: component.Category,
|
|
||||||
CategoryName: component.Category,
|
|
||||||
Model: component.Model,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ComponentHandler) GetCategories(c *gin.Context) {
|
func (h *ComponentHandler) GetCategories(c *gin.Context) {
|
||||||
codes, err := h.localDB.GetLocalComponentCategories()
|
categories, err := h.componentService.GetCategories()
|
||||||
if err == nil && len(codes) > 0 {
|
if err != nil {
|
||||||
categories := make([]models.Category, 0, len(codes))
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
for _, code := range codes {
|
|
||||||
trimmed := strings.TrimSpace(code)
|
|
||||||
if trimmed == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
categories = append(categories, models.Category{Code: trimmed, Name: trimmed})
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusOK, categories)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, models.DefaultCategories)
|
c.JSON(http.StatusOK, categories)
|
||||||
}
|
}
|
||||||
|
|||||||
239
internal/handlers/configuration.go
Normal file
239
internal/handlers/configuration.go
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/middleware"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ConfigurationHandler struct {
|
||||||
|
configService *services.ConfigurationService
|
||||||
|
exportService *services.ExportService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewConfigurationHandler(
|
||||||
|
configService *services.ConfigurationService,
|
||||||
|
exportService *services.ExportService,
|
||||||
|
) *ConfigurationHandler {
|
||||||
|
return &ConfigurationHandler{
|
||||||
|
configService: configService,
|
||||||
|
exportService: exportService,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) List(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||||
|
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||||
|
|
||||||
|
configs, total, err := h.configService.ListByUser(username, page, perPage)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"configurations": configs,
|
||||||
|
"total": total,
|
||||||
|
"page": page,
|
||||||
|
"per_page": perPage,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) Create(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
|
||||||
|
var req services.CreateConfigRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := h.configService.Create(username, &req)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusCreated, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) Get(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
|
config, err := h.configService.GetByUUID(uuid, username)
|
||||||
|
if err != nil {
|
||||||
|
status := http.StatusNotFound
|
||||||
|
if err == services.ErrConfigForbidden {
|
||||||
|
status = http.StatusForbidden
|
||||||
|
}
|
||||||
|
c.JSON(status, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) Update(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
|
var req services.CreateConfigRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := h.configService.Update(uuid, username, &req)
|
||||||
|
if err != nil {
|
||||||
|
status := http.StatusInternalServerError
|
||||||
|
if err == services.ErrConfigNotFound {
|
||||||
|
status = http.StatusNotFound
|
||||||
|
} else if err == services.ErrConfigForbidden {
|
||||||
|
status = http.StatusForbidden
|
||||||
|
}
|
||||||
|
c.JSON(status, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) Delete(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
|
err := h.configService.Delete(uuid, username)
|
||||||
|
if err != nil {
|
||||||
|
status := http.StatusInternalServerError
|
||||||
|
if err == services.ErrConfigNotFound {
|
||||||
|
status = http.StatusNotFound
|
||||||
|
} else if err == services.ErrConfigForbidden {
|
||||||
|
status = http.StatusForbidden
|
||||||
|
}
|
||||||
|
c.JSON(status, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "deleted"})
|
||||||
|
}
|
||||||
|
|
||||||
|
type RenameConfigRequest struct {
|
||||||
|
Name string `json:"name" binding:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) Rename(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
|
var req RenameConfigRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := h.configService.Rename(uuid, username, req.Name)
|
||||||
|
if err != nil {
|
||||||
|
status := http.StatusInternalServerError
|
||||||
|
if err == services.ErrConfigNotFound {
|
||||||
|
status = http.StatusNotFound
|
||||||
|
} else if err == services.ErrConfigForbidden {
|
||||||
|
status = http.StatusForbidden
|
||||||
|
}
|
||||||
|
c.JSON(status, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CloneConfigRequest struct {
|
||||||
|
Name string `json:"name" binding:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) Clone(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
|
var req CloneConfigRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := h.configService.Clone(uuid, username, req.Name)
|
||||||
|
if err != nil {
|
||||||
|
status := http.StatusInternalServerError
|
||||||
|
if err == services.ErrConfigNotFound {
|
||||||
|
status = http.StatusNotFound
|
||||||
|
} else if err == services.ErrConfigForbidden {
|
||||||
|
status = http.StatusForbidden
|
||||||
|
}
|
||||||
|
c.JSON(status, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusCreated, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ConfigurationHandler) RefreshPrices(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
|
config, err := h.configService.RefreshPrices(uuid, username)
|
||||||
|
if err != nil {
|
||||||
|
status := http.StatusInternalServerError
|
||||||
|
if err == services.ErrConfigNotFound {
|
||||||
|
status = http.StatusNotFound
|
||||||
|
} else if err == services.ErrConfigForbidden {
|
||||||
|
status = http.StatusForbidden
|
||||||
|
}
|
||||||
|
c.JSON(status, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (h *ConfigurationHandler) ExportJSON(c *gin.Context) {
|
||||||
|
// userID := middleware.GetUserID(c)
|
||||||
|
// uuid := c.Param("uuid")
|
||||||
|
//
|
||||||
|
// config, err := h.configService.GetByUUID(uuid, userID)
|
||||||
|
// if err != nil {
|
||||||
|
// c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// data, err := h.configService.ExportJSON(uuid, userID)
|
||||||
|
// if err != nil {
|
||||||
|
// c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// filename := fmt.Sprintf("%s %s SPEC.json", config.CreatedAt.Format("2006-01-02"), config.Name)
|
||||||
|
// c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||||
|
// c.Data(http.StatusOK, "application/json", data)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (h *ConfigurationHandler) ImportJSON(c *gin.Context) {
|
||||||
|
// userID := middleware.GetUserID(c)
|
||||||
|
//
|
||||||
|
// data, err := io.ReadAll(c.Request.Body)
|
||||||
|
// if err != nil {
|
||||||
|
// c.JSON(http.StatusBadRequest, gin.H{"error": "failed to read body"})
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// config, err := h.configService.ImportJSON(userID, data)
|
||||||
|
// if err != nil {
|
||||||
|
// c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// c.JSON(http.StatusCreated, config)
|
||||||
|
// }
|
||||||
@@ -3,43 +3,34 @@ package handlers
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
"git.mchus.pro/mchus/quoteforge/internal/middleware"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ExportHandler struct {
|
type ExportHandler struct {
|
||||||
exportService *services.ExportService
|
exportService *services.ExportService
|
||||||
configService services.ConfigurationGetter
|
configService services.ConfigurationGetter
|
||||||
projectService *services.ProjectService
|
componentService *services.ComponentService
|
||||||
dbUsername string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewExportHandler(
|
func NewExportHandler(
|
||||||
exportService *services.ExportService,
|
exportService *services.ExportService,
|
||||||
configService services.ConfigurationGetter,
|
configService services.ConfigurationGetter,
|
||||||
projectService *services.ProjectService,
|
componentService *services.ComponentService,
|
||||||
dbUsername string,
|
|
||||||
) *ExportHandler {
|
) *ExportHandler {
|
||||||
return &ExportHandler{
|
return &ExportHandler{
|
||||||
exportService: exportService,
|
exportService: exportService,
|
||||||
configService: configService,
|
configService: configService,
|
||||||
projectService: projectService,
|
componentService: componentService,
|
||||||
dbUsername: dbUsername,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExportRequest struct {
|
type ExportRequest struct {
|
||||||
Name string `json:"name" binding:"required"`
|
Name string `json:"name" binding:"required"`
|
||||||
ProjectName string `json:"project_name"`
|
Items []struct {
|
||||||
ProjectUUID string `json:"project_uuid"`
|
|
||||||
Article string `json:"article"`
|
|
||||||
ServerCount int `json:"server_count"`
|
|
||||||
PricelistID *uint `json:"pricelist_id"`
|
|
||||||
Items []struct {
|
|
||||||
LotName string `json:"lot_name" binding:"required"`
|
LotName string `json:"lot_name" binding:"required"`
|
||||||
Quantity int `json:"quantity" binding:"required,min=1"`
|
Quantity int `json:"quantity" binding:"required,min=1"`
|
||||||
UnitPrice float64 `json:"unit_price"`
|
UnitPrice float64 `json:"unit_price"`
|
||||||
@@ -47,237 +38,84 @@ type ExportRequest struct {
|
|||||||
Notes string `json:"notes"`
|
Notes string `json:"notes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ProjectExportOptionsRequest struct {
|
|
||||||
IncludeLOT bool `json:"include_lot"`
|
|
||||||
IncludeBOM bool `json:"include_bom"`
|
|
||||||
IncludeEstimate bool `json:"include_estimate"`
|
|
||||||
IncludeStock bool `json:"include_stock"`
|
|
||||||
IncludeCompetitor bool `json:"include_competitor"`
|
|
||||||
Basis string `json:"basis"` // "fob" or "ddp"
|
|
||||||
SaleMarkup float64 `json:"sale_markup"` // DDP multiplier; 0 defaults to 1.3
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ExportHandler) ExportCSV(c *gin.Context) {
|
func (h *ExportHandler) ExportCSV(c *gin.Context) {
|
||||||
var req ExportRequest
|
var req ExportRequest
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
data := h.buildExportData(&req)
|
data := h.buildExportData(&req)
|
||||||
|
|
||||||
// Validate before streaming (can return JSON error)
|
csvData, err := h.exportService.ToCSV(data)
|
||||||
if len(data.Configs) == 0 || len(data.Configs[0].Items) == 0 {
|
if err != nil {
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no items to export"})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get project code for filename
|
filename := fmt.Sprintf("%s %s SPEC.csv", time.Now().Format("2006-01-02"), req.Name)
|
||||||
projectCode := req.ProjectName // legacy field: may contain code from frontend
|
|
||||||
if projectCode == "" && req.ProjectUUID != "" {
|
|
||||||
if project, err := h.projectService.GetByUUID(req.ProjectUUID, h.dbUsername); err == nil && project != nil {
|
|
||||||
projectCode = project.Code
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if projectCode == "" {
|
|
||||||
projectCode = req.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set headers before streaming
|
|
||||||
exportDate := data.CreatedAt
|
|
||||||
articleSegment := sanitizeFilenameSegment(req.Article)
|
|
||||||
if articleSegment == "" {
|
|
||||||
articleSegment = "BOM"
|
|
||||||
}
|
|
||||||
filename := fmt.Sprintf("%s (%s) %s %s.csv", exportDate.Format("2006-01-02"), projectCode, req.Name, articleSegment)
|
|
||||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
|
||||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||||
|
c.Data(http.StatusOK, "text/csv; charset=utf-8", csvData)
|
||||||
// Stream CSV (cannot return JSON after this point)
|
|
||||||
if err := h.exportService.ToCSV(c.Writer, data); err != nil {
|
|
||||||
c.Error(err) // Log only
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildExportData converts an ExportRequest into a ProjectExportData using a temporary Configuration model
|
func (h *ExportHandler) buildExportData(req *ExportRequest) *services.ExportData {
|
||||||
// so that ExportService.ConfigToExportData can resolve categories via localDB.
|
items := make([]services.ExportItem, len(req.Items))
|
||||||
func (h *ExportHandler) buildExportData(req *ExportRequest) *services.ProjectExportData {
|
var total float64
|
||||||
configItems := make(models.ConfigItems, len(req.Items))
|
|
||||||
for i, item := range req.Items {
|
for i, item := range req.Items {
|
||||||
configItems[i] = models.ConfigItem{
|
itemTotal := item.UnitPrice * float64(item.Quantity)
|
||||||
LotName: item.LotName,
|
|
||||||
Quantity: item.Quantity,
|
// Получаем информацию о компоненте для заполнения категории и описания
|
||||||
UnitPrice: item.UnitPrice,
|
componentView, err := h.componentService.GetByLotName(item.LotName)
|
||||||
|
if err != nil {
|
||||||
|
// Если не удалось получить информацию о компоненте, используем только основные данные
|
||||||
|
items[i] = services.ExportItem{
|
||||||
|
LotName: item.LotName,
|
||||||
|
Quantity: item.Quantity,
|
||||||
|
UnitPrice: item.UnitPrice,
|
||||||
|
TotalPrice: itemTotal,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
items[i] = services.ExportItem{
|
||||||
|
LotName: item.LotName,
|
||||||
|
Description: componentView.Description,
|
||||||
|
Category: componentView.Category,
|
||||||
|
Quantity: item.Quantity,
|
||||||
|
UnitPrice: item.UnitPrice,
|
||||||
|
TotalPrice: itemTotal,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
total += itemTotal
|
||||||
}
|
}
|
||||||
|
|
||||||
serverCount := req.ServerCount
|
return &services.ExportData{
|
||||||
if serverCount < 1 {
|
Name: req.Name,
|
||||||
serverCount = 1
|
Items: items,
|
||||||
|
Total: total,
|
||||||
|
Notes: req.Notes,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := &models.Configuration{
|
|
||||||
Article: req.Article,
|
|
||||||
ServerCount: serverCount,
|
|
||||||
PricelistID: req.PricelistID,
|
|
||||||
Items: configItems,
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.exportService.ConfigToExportData(cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeFilenameSegment(value string) string {
|
|
||||||
if strings.TrimSpace(value) == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
replacer := strings.NewReplacer(
|
|
||||||
"/", "_",
|
|
||||||
"\\", "_",
|
|
||||||
":", "_",
|
|
||||||
"*", "_",
|
|
||||||
"?", "_",
|
|
||||||
"\"", "_",
|
|
||||||
"<", "_",
|
|
||||||
">", "_",
|
|
||||||
"|", "_",
|
|
||||||
)
|
|
||||||
return strings.TrimSpace(replacer.Replace(value))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ExportHandler) ExportConfigCSV(c *gin.Context) {
|
func (h *ExportHandler) ExportConfigCSV(c *gin.Context) {
|
||||||
|
username := middleware.GetUsername(c)
|
||||||
uuid := c.Param("uuid")
|
uuid := c.Param("uuid")
|
||||||
|
|
||||||
// Get config before streaming (can return JSON error)
|
config, err := h.configService.GetByUUID(uuid, username)
|
||||||
config, err := h.configService.GetByUUID(uuid, h.dbUsername)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusNotFound, "resource not found", err)
|
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
data := h.exportService.ConfigToExportData(config)
|
data := h.exportService.ConfigToExportData(config, h.componentService)
|
||||||
|
|
||||||
// Validate before streaming (can return JSON error)
|
csvData, err := h.exportService.ToCSV(data)
|
||||||
if len(data.Configs) == 0 || len(data.Configs[0].Items) == 0 {
|
if err != nil {
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no items to export"})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get project code for filename
|
filename := fmt.Sprintf("%s %s SPEC.csv", config.CreatedAt.Format("2006-01-02"), config.Name)
|
||||||
projectCode := config.Name // fallback: use config name if no project
|
|
||||||
if config.ProjectUUID != nil && *config.ProjectUUID != "" {
|
|
||||||
if project, err := h.projectService.GetByUUID(*config.ProjectUUID, h.dbUsername); err == nil && project != nil {
|
|
||||||
projectCode = project.Code
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set headers before streaming
|
|
||||||
// Use price update time if available, otherwise creation time
|
|
||||||
exportDate := config.CreatedAt
|
|
||||||
if config.PriceUpdatedAt != nil {
|
|
||||||
exportDate = *config.PriceUpdatedAt
|
|
||||||
}
|
|
||||||
filename := fmt.Sprintf("%s (%s) %s BOM.csv", exportDate.Format("2006-01-02"), projectCode, config.Name)
|
|
||||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
|
||||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||||
|
c.Data(http.StatusOK, "text/csv; charset=utf-8", csvData)
|
||||||
// Stream CSV (cannot return JSON after this point)
|
|
||||||
if err := h.exportService.ToCSV(c.Writer, data); err != nil {
|
|
||||||
c.Error(err) // Log only
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExportProjectCSV exports all active configurations of a project as a single CSV file.
|
|
||||||
func (h *ExportHandler) ExportProjectCSV(c *gin.Context) {
|
|
||||||
projectUUID := c.Param("uuid")
|
|
||||||
|
|
||||||
project, err := h.projectService.GetByUUID(projectUUID, h.dbUsername)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusNotFound, "resource not found", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := h.projectService.ListConfigurations(projectUUID, h.dbUsername, "active")
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(result.Configs) == 0 {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no configurations to export"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data := h.exportService.ProjectToExportData(result.Configs)
|
|
||||||
|
|
||||||
// Filename: YYYY-MM-DD (ProjectCode) BOM.csv
|
|
||||||
filename := fmt.Sprintf("%s (%s) BOM.csv", time.Now().Format("2006-01-02"), project.Code)
|
|
||||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
|
||||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
|
||||||
|
|
||||||
if err := h.exportService.ToCSV(c.Writer, data); err != nil {
|
|
||||||
c.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *ExportHandler) ExportProjectPricingCSV(c *gin.Context) {
|
|
||||||
projectUUID := c.Param("uuid")
|
|
||||||
|
|
||||||
var req ProjectExportOptionsRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := h.projectService.GetByUUID(projectUUID, h.dbUsername)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusNotFound, "resource not found", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := h.projectService.ListConfigurations(projectUUID, h.dbUsername, "active")
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(result.Configs) == 0 {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no configurations to export"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := services.ProjectPricingExportOptions{
|
|
||||||
IncludeLOT: req.IncludeLOT,
|
|
||||||
IncludeBOM: req.IncludeBOM,
|
|
||||||
IncludeEstimate: req.IncludeEstimate,
|
|
||||||
IncludeStock: req.IncludeStock,
|
|
||||||
IncludeCompetitor: req.IncludeCompetitor,
|
|
||||||
Basis: req.Basis,
|
|
||||||
SaleMarkup: req.SaleMarkup,
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := h.exportService.ProjectToPricingExportData(result.Configs, opts)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
basisLabel := "FOB"
|
|
||||||
if strings.EqualFold(strings.TrimSpace(req.Basis), "ddp") {
|
|
||||||
basisLabel = "DDP"
|
|
||||||
}
|
|
||||||
variantLabel := strings.TrimSpace(project.Variant)
|
|
||||||
if variantLabel == "" {
|
|
||||||
variantLabel = "main"
|
|
||||||
}
|
|
||||||
filename := fmt.Sprintf("%s (%s) %s %s.csv", time.Now().Format("2006-01-02"), project.Code, basisLabel, variantLabel)
|
|
||||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
|
||||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
|
||||||
|
|
||||||
if err := h.exportService.ToPricingCSV(c.Writer, data, opts); err != nil {
|
|
||||||
c.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,303 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Mock services for testing
|
|
||||||
type mockConfigService struct {
|
|
||||||
config *models.Configuration
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockConfigService) GetByUUID(uuid string, ownerUsername string) (*models.Configuration, error) {
|
|
||||||
return m.config, m.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExportCSV_Success(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
// Create handler with mocks
|
|
||||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
handler := NewExportHandler(
|
|
||||||
exportSvc,
|
|
||||||
&mockConfigService{},
|
|
||||||
nil,
|
|
||||||
"testuser",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create JSON request body
|
|
||||||
jsonBody := `{
|
|
||||||
"name": "Test Export",
|
|
||||||
"items": [
|
|
||||||
{
|
|
||||||
"lot_name": "LOT-001",
|
|
||||||
"quantity": 2,
|
|
||||||
"unit_price": 100.50
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"notes": "Test notes"
|
|
||||||
}`
|
|
||||||
|
|
||||||
// Create HTTP request
|
|
||||||
req, _ := http.NewRequest("POST", "/api/export/csv", bytes.NewBufferString(jsonBody))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
// Create response recorder
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
|
|
||||||
// Create Gin context
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
|
|
||||||
// Call handler
|
|
||||||
handler.ExportCSV(c)
|
|
||||||
|
|
||||||
// Check status code
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("Expected status 200, got %d", w.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Content-Type header
|
|
||||||
contentType := w.Header().Get("Content-Type")
|
|
||||||
if contentType != "text/csv; charset=utf-8" {
|
|
||||||
t.Errorf("Expected Content-Type 'text/csv; charset=utf-8', got %q", contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for BOM
|
|
||||||
responseBody := w.Body.Bytes()
|
|
||||||
if len(responseBody) < 3 {
|
|
||||||
t.Fatalf("Response too short to contain BOM")
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
actualBOM := responseBody[:3]
|
|
||||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
|
||||||
t.Errorf("UTF-8 BOM mismatch. Expected %v, got %v", expectedBOM, actualBOM)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check semicolon delimiter in CSV
|
|
||||||
reader := csv.NewReader(bytes.NewReader(responseBody[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
|
|
||||||
header, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to parse CSV header: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(header) != 8 {
|
|
||||||
t.Errorf("Expected 8 columns, got %d", len(header))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExportCSV_InvalidRequest(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
handler := NewExportHandler(
|
|
||||||
exportSvc,
|
|
||||||
&mockConfigService{},
|
|
||||||
nil,
|
|
||||||
"testuser",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create invalid request (missing required field)
|
|
||||||
req, _ := http.NewRequest("POST", "/api/export/csv", bytes.NewBufferString(`{"name": "Test"}`))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
|
|
||||||
handler.ExportCSV(c)
|
|
||||||
|
|
||||||
// Should return 400 Bad Request
|
|
||||||
if w.Code != http.StatusBadRequest {
|
|
||||||
t.Errorf("Expected status 400, got %d", w.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should return JSON error
|
|
||||||
var errResp map[string]interface{}
|
|
||||||
json.Unmarshal(w.Body.Bytes(), &errResp)
|
|
||||||
if _, hasError := errResp["error"]; !hasError {
|
|
||||||
t.Errorf("Expected error in JSON response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExportCSV_EmptyItems(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
handler := NewExportHandler(
|
|
||||||
exportSvc,
|
|
||||||
&mockConfigService{},
|
|
||||||
nil,
|
|
||||||
"testuser",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create request with empty items array - should fail binding validation
|
|
||||||
req, _ := http.NewRequest("POST", "/api/export/csv", bytes.NewBufferString(`{"name":"Empty Export","items":[],"notes":""}`))
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
|
|
||||||
handler.ExportCSV(c)
|
|
||||||
|
|
||||||
// Should return 400 Bad Request (validation error from gin binding)
|
|
||||||
if w.Code != http.StatusBadRequest {
|
|
||||||
t.Logf("Status code: %d (expected 400 for empty items)", w.Code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExportConfigCSV_Success(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
// Mock configuration
|
|
||||||
mockConfig := &models.Configuration{
|
|
||||||
UUID: "test-uuid",
|
|
||||||
Name: "Test Config",
|
|
||||||
OwnerUsername: "testuser",
|
|
||||||
Items: models.ConfigItems{
|
|
||||||
{
|
|
||||||
LotName: "LOT-001",
|
|
||||||
Quantity: 1,
|
|
||||||
UnitPrice: 100.0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
handler := NewExportHandler(
|
|
||||||
exportSvc,
|
|
||||||
&mockConfigService{config: mockConfig},
|
|
||||||
nil,
|
|
||||||
"testuser",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create HTTP request
|
|
||||||
req, _ := http.NewRequest("GET", "/api/configs/test-uuid/export", nil)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
c.Params = gin.Params{
|
|
||||||
{Key: "uuid", Value: "test-uuid"},
|
|
||||||
}
|
|
||||||
|
|
||||||
handler.ExportConfigCSV(c)
|
|
||||||
|
|
||||||
// Check status code
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Errorf("Expected status 200, got %d", w.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Content-Type header
|
|
||||||
contentType := w.Header().Get("Content-Type")
|
|
||||||
if contentType != "text/csv; charset=utf-8" {
|
|
||||||
t.Errorf("Expected Content-Type 'text/csv; charset=utf-8', got %q", contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for BOM
|
|
||||||
responseBody := w.Body.Bytes()
|
|
||||||
if len(responseBody) < 3 {
|
|
||||||
t.Fatalf("Response too short to contain BOM")
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
actualBOM := responseBody[:3]
|
|
||||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
|
||||||
t.Errorf("UTF-8 BOM mismatch")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExportConfigCSV_NotFound(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
handler := NewExportHandler(
|
|
||||||
exportSvc,
|
|
||||||
&mockConfigService{err: errors.New("config not found")},
|
|
||||||
nil,
|
|
||||||
"testuser",
|
|
||||||
)
|
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "/api/configs/nonexistent-uuid/export", nil)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
c.Params = gin.Params{
|
|
||||||
{Key: "uuid", Value: "nonexistent-uuid"},
|
|
||||||
}
|
|
||||||
handler.ExportConfigCSV(c)
|
|
||||||
|
|
||||||
// Should return 404 Not Found
|
|
||||||
if w.Code != http.StatusNotFound {
|
|
||||||
t.Errorf("Expected status 404, got %d", w.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should return JSON error
|
|
||||||
var errResp map[string]interface{}
|
|
||||||
json.Unmarshal(w.Body.Bytes(), &errResp)
|
|
||||||
if _, hasError := errResp["error"]; !hasError {
|
|
||||||
t.Errorf("Expected error in JSON response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExportConfigCSV_EmptyItems(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
// Mock configuration with empty items
|
|
||||||
mockConfig := &models.Configuration{
|
|
||||||
UUID: "test-uuid",
|
|
||||||
Name: "Empty Config",
|
|
||||||
OwnerUsername: "testuser",
|
|
||||||
Items: models.ConfigItems{},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
handler := NewExportHandler(
|
|
||||||
exportSvc,
|
|
||||||
&mockConfigService{config: mockConfig},
|
|
||||||
nil,
|
|
||||||
"testuser",
|
|
||||||
)
|
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "/api/configs/test-uuid/export", nil)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
c.Params = gin.Params{
|
|
||||||
{Key: "uuid", Value: "test-uuid"},
|
|
||||||
}
|
|
||||||
handler.ExportConfigCSV(c)
|
|
||||||
|
|
||||||
// Should return 400 Bad Request
|
|
||||||
if w.Code != http.StatusBadRequest {
|
|
||||||
t.Errorf("Expected status 400, got %d", w.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should return JSON error
|
|
||||||
var errResp map[string]interface{}
|
|
||||||
json.Unmarshal(w.Body.Bytes(), &errResp)
|
|
||||||
if _, hasError := errResp["error"]; !hasError {
|
|
||||||
t.Errorf("Expected error in JSON response")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PartnumberBooksHandler provides read-only access to local partnumber book snapshots.
|
|
||||||
type PartnumberBooksHandler struct {
|
|
||||||
localDB *localdb.LocalDB
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPartnumberBooksHandler(localDB *localdb.LocalDB) *PartnumberBooksHandler {
|
|
||||||
return &PartnumberBooksHandler{localDB: localDB}
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns all local partnumber book snapshots.
|
|
||||||
// GET /api/partnumber-books
|
|
||||||
func (h *PartnumberBooksHandler) List(c *gin.Context) {
|
|
||||||
bookRepo := repository.NewPartnumberBookRepository(h.localDB.DB())
|
|
||||||
books, err := bookRepo.ListBooks()
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type bookSummary struct {
|
|
||||||
ID uint `json:"id"`
|
|
||||||
ServerID int `json:"server_id"`
|
|
||||||
Version string `json:"version"`
|
|
||||||
CreatedAt string `json:"created_at"`
|
|
||||||
IsActive bool `json:"is_active"`
|
|
||||||
ItemCount int64 `json:"item_count"`
|
|
||||||
}
|
|
||||||
|
|
||||||
summaries := make([]bookSummary, 0, len(books))
|
|
||||||
for _, b := range books {
|
|
||||||
summaries = append(summaries, bookSummary{
|
|
||||||
ID: b.ID,
|
|
||||||
ServerID: b.ServerID,
|
|
||||||
Version: b.Version,
|
|
||||||
CreatedAt: b.CreatedAt.Format("2006-01-02"),
|
|
||||||
IsActive: b.IsActive,
|
|
||||||
ItemCount: bookRepo.CountBookItems(b.ID),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"books": summaries,
|
|
||||||
"total": len(summaries),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetItems returns items for a partnumber book by server ID.
|
|
||||||
// GET /api/partnumber-books/:id
|
|
||||||
func (h *PartnumberBooksHandler) GetItems(c *gin.Context) {
|
|
||||||
idStr := c.Param("id")
|
|
||||||
id, err := strconv.ParseUint(idStr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid book ID"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bookRepo := repository.NewPartnumberBookRepository(h.localDB.DB())
|
|
||||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
|
||||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "100"))
|
|
||||||
search := strings.TrimSpace(c.Query("search"))
|
|
||||||
if page < 1 {
|
|
||||||
page = 1
|
|
||||||
}
|
|
||||||
if perPage < 1 || perPage > 500 {
|
|
||||||
perPage = 100
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find local book by server_id
|
|
||||||
var book localdb.LocalPartnumberBook
|
|
||||||
if err := h.localDB.DB().Where("server_id = ?", id).First(&book).Error; err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "partnumber book not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
items, total, err := bookRepo.GetBookItemsPage(book.ID, search, page, perPage)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"book_id": book.ServerID,
|
|
||||||
"version": book.Version,
|
|
||||||
"is_active": book.IsActive,
|
|
||||||
"partnumbers": book.PartnumbersJSON,
|
|
||||||
"items": items,
|
|
||||||
"total": total,
|
|
||||||
"page": page,
|
|
||||||
"per_page": perPage,
|
|
||||||
"search": search,
|
|
||||||
"book_total": bookRepo.CountBookItems(book.ID),
|
|
||||||
"lot_count": bookRepo.CountDistinctLots(book.ID),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,120 +1,99 @@
|
|||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services/pricelist"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PricelistHandler struct {
|
type PricelistHandler struct {
|
||||||
|
service *pricelist.Service
|
||||||
localDB *localdb.LocalDB
|
localDB *localdb.LocalDB
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPricelistHandler(localDB *localdb.LocalDB) *PricelistHandler {
|
func NewPricelistHandler(service *pricelist.Service, localDB *localdb.LocalDB) *PricelistHandler {
|
||||||
return &PricelistHandler{localDB: localDB}
|
return &PricelistHandler{service: service, localDB: localDB}
|
||||||
}
|
}
|
||||||
|
|
||||||
// List returns all pricelists with pagination.
|
// List returns all pricelists with pagination
|
||||||
func (h *PricelistHandler) List(c *gin.Context) {
|
func (h *PricelistHandler) List(c *gin.Context) {
|
||||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||||
if page < 1 {
|
|
||||||
page = 1
|
|
||||||
}
|
|
||||||
if perPage < 1 {
|
|
||||||
perPage = 20
|
|
||||||
}
|
|
||||||
source := c.Query("source")
|
|
||||||
activeOnly := c.DefaultQuery("active_only", "false") == "true"
|
activeOnly := c.DefaultQuery("active_only", "false") == "true"
|
||||||
|
source := c.Query("source")
|
||||||
|
|
||||||
localPLs, err := h.localDB.GetLocalPricelists()
|
var (
|
||||||
|
pricelists any
|
||||||
|
total int64
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
if activeOnly {
|
||||||
|
pricelists, total, err = h.service.ListActiveBySource(page, perPage, source)
|
||||||
|
} else {
|
||||||
|
pricelists, total, err = h.service.ListBySource(page, perPage, source)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if source != "" {
|
|
||||||
filtered := localPLs[:0]
|
// If offline (empty list), fallback to local pricelists
|
||||||
for _, lpl := range localPLs {
|
if total == 0 && h.localDB != nil {
|
||||||
if strings.EqualFold(lpl.Source, source) {
|
localPLs, err := h.localDB.GetLocalPricelists()
|
||||||
filtered = append(filtered, lpl)
|
if err == nil && len(localPLs) > 0 {
|
||||||
|
if source != "" {
|
||||||
|
filtered := localPLs[:0]
|
||||||
|
for _, lpl := range localPLs {
|
||||||
|
if lpl.Source == source {
|
||||||
|
filtered = append(filtered, lpl)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
localPLs = filtered
|
||||||
}
|
}
|
||||||
}
|
// Convert to PricelistSummary format
|
||||||
localPLs = filtered
|
summaries := make([]map[string]interface{}, len(localPLs))
|
||||||
}
|
for i, lpl := range localPLs {
|
||||||
type pricelistWithCount struct {
|
summaries[i] = map[string]interface{}{
|
||||||
pricelist localdb.LocalPricelist
|
"id": lpl.ServerID,
|
||||||
itemCount int64
|
"source": lpl.Source,
|
||||||
usageCount int
|
"version": lpl.Version,
|
||||||
}
|
"created_by": "sync",
|
||||||
withCounts := make([]pricelistWithCount, 0, len(localPLs))
|
"item_count": 0, // Not tracked
|
||||||
for _, lpl := range localPLs {
|
"usage_count": 0, // Not tracked in local
|
||||||
itemCount := h.localDB.CountLocalPricelistItems(lpl.ID)
|
"is_active": true,
|
||||||
if activeOnly && itemCount == 0 {
|
"created_at": lpl.CreatedAt,
|
||||||
continue
|
"synced_from": "local",
|
||||||
}
|
}
|
||||||
usageCount := 0
|
|
||||||
if lpl.IsUsed {
|
|
||||||
usageCount = 1
|
|
||||||
}
|
|
||||||
withCounts = append(withCounts, pricelistWithCount{
|
|
||||||
pricelist: lpl,
|
|
||||||
itemCount: itemCount,
|
|
||||||
usageCount: usageCount,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
localPLs = localPLs[:0]
|
|
||||||
for _, row := range withCounts {
|
|
||||||
localPLs = append(localPLs, row.pricelist)
|
|
||||||
}
|
|
||||||
sort.SliceStable(localPLs, func(i, j int) bool { return localPLs[i].CreatedAt.After(localPLs[j].CreatedAt) })
|
|
||||||
total := len(localPLs)
|
|
||||||
start := (page - 1) * perPage
|
|
||||||
if start > total {
|
|
||||||
start = total
|
|
||||||
}
|
|
||||||
end := start + perPage
|
|
||||||
if end > total {
|
|
||||||
end = total
|
|
||||||
}
|
|
||||||
pageSlice := localPLs[start:end]
|
|
||||||
summaries := make([]map[string]interface{}, 0, len(pageSlice))
|
|
||||||
for _, lpl := range pageSlice {
|
|
||||||
itemCount := int64(0)
|
|
||||||
usageCount := 0
|
|
||||||
for _, row := range withCounts {
|
|
||||||
if row.pricelist.ID == lpl.ID {
|
|
||||||
itemCount = row.itemCount
|
|
||||||
usageCount = row.usageCount
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"pricelists": summaries,
|
||||||
|
"total": len(summaries),
|
||||||
|
"page": page,
|
||||||
|
"per_page": perPage,
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
}
|
}
|
||||||
summaries = append(summaries, map[string]interface{}{
|
|
||||||
"id": lpl.ServerID,
|
|
||||||
"source": lpl.Source,
|
|
||||||
"version": lpl.Version,
|
|
||||||
"created_by": "sync",
|
|
||||||
"item_count": itemCount,
|
|
||||||
"usage_count": usageCount,
|
|
||||||
"is_active": true,
|
|
||||||
"created_at": lpl.CreatedAt,
|
|
||||||
"synced_from": "local",
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"pricelists": summaries,
|
"pricelists": pricelists,
|
||||||
"total": total,
|
"total": total,
|
||||||
"page": page,
|
"page": page,
|
||||||
"per_page": perPage,
|
"per_page": perPage,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a single pricelist by ID.
|
// Get returns a single pricelist by ID
|
||||||
func (h *PricelistHandler) Get(c *gin.Context) {
|
func (h *PricelistHandler) Get(c *gin.Context) {
|
||||||
idStr := c.Param("id")
|
idStr := c.Param("id")
|
||||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||||
@@ -123,25 +102,210 @@ func (h *PricelistHandler) Get(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
localPL, err := h.localDB.GetLocalPricelistByServerID(uint(id))
|
pl, err := h.service.GetByID(uint(id))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, pl)
|
||||||
"id": localPL.ServerID,
|
}
|
||||||
"source": localPL.Source,
|
|
||||||
"version": localPL.Version,
|
// Create creates a new pricelist from current prices
|
||||||
"created_by": "sync",
|
func (h *PricelistHandler) Create(c *gin.Context) {
|
||||||
"item_count": h.localDB.CountLocalPricelistItems(localPL.ID),
|
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||||
"is_active": true,
|
if !canWrite {
|
||||||
"created_at": localPL.CreatedAt,
|
c.JSON(http.StatusForbidden, gin.H{
|
||||||
"synced_from": "local",
|
"error": "pricelist write is not allowed",
|
||||||
|
"debug": debugInfo,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req struct {
|
||||||
|
Source string `json:"source"`
|
||||||
|
Items []struct {
|
||||||
|
LotName string `json:"lot_name"`
|
||||||
|
Price float64 `json:"price"`
|
||||||
|
} `json:"items"`
|
||||||
|
}
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
source := string(models.NormalizePricelistSource(req.Source))
|
||||||
|
|
||||||
|
// Get the database username as the creator
|
||||||
|
createdBy := h.localDB.GetDBUser()
|
||||||
|
if createdBy == "" {
|
||||||
|
createdBy = "unknown"
|
||||||
|
}
|
||||||
|
sourceItems := make([]pricelist.CreateItemInput, 0, len(req.Items))
|
||||||
|
for _, item := range req.Items {
|
||||||
|
sourceItems = append(sourceItems, pricelist.CreateItemInput{
|
||||||
|
LotName: item.LotName,
|
||||||
|
Price: item.Price,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pl, err := h.service.CreateForSourceWithProgress(createdBy, source, sourceItems, nil)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusCreated, pl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateWithProgress creates a pricelist and streams progress updates over SSE.
|
||||||
|
func (h *PricelistHandler) CreateWithProgress(c *gin.Context) {
|
||||||
|
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||||
|
if !canWrite {
|
||||||
|
c.JSON(http.StatusForbidden, gin.H{
|
||||||
|
"error": "pricelist write is not allowed",
|
||||||
|
"debug": debugInfo,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req struct {
|
||||||
|
Source string `json:"source"`
|
||||||
|
Items []struct {
|
||||||
|
LotName string `json:"lot_name"`
|
||||||
|
Price float64 `json:"price"`
|
||||||
|
} `json:"items"`
|
||||||
|
}
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
source := string(models.NormalizePricelistSource(req.Source))
|
||||||
|
|
||||||
|
createdBy := h.localDB.GetDBUser()
|
||||||
|
if createdBy == "" {
|
||||||
|
createdBy = "unknown"
|
||||||
|
}
|
||||||
|
sourceItems := make([]pricelist.CreateItemInput, 0, len(req.Items))
|
||||||
|
for _, item := range req.Items {
|
||||||
|
sourceItems = append(sourceItems, pricelist.CreateItemInput{
|
||||||
|
LotName: item.LotName,
|
||||||
|
Price: item.Price,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Header("Content-Type", "text/event-stream")
|
||||||
|
c.Header("Cache-Control", "no-cache")
|
||||||
|
c.Header("Connection", "keep-alive")
|
||||||
|
c.Header("X-Accel-Buffering", "no")
|
||||||
|
|
||||||
|
flusher, ok := c.Writer.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
pl, err := h.service.CreateForSourceWithProgress(createdBy, source, sourceItems, nil)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.JSON(http.StatusCreated, pl)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sendProgress := func(payload gin.H) {
|
||||||
|
c.SSEvent("progress", payload)
|
||||||
|
flusher.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
sendProgress(gin.H{"current": 0, "total": 4, "status": "starting", "message": "Запуск..."})
|
||||||
|
pl, err := h.service.CreateForSourceWithProgress(createdBy, source, sourceItems, func(p pricelist.CreateProgress) {
|
||||||
|
sendProgress(gin.H{
|
||||||
|
"current": p.Current,
|
||||||
|
"total": p.Total,
|
||||||
|
"status": p.Status,
|
||||||
|
"message": p.Message,
|
||||||
|
"updated": p.Updated,
|
||||||
|
"errors": p.Errors,
|
||||||
|
"lot_name": p.LotName,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
sendProgress(gin.H{
|
||||||
|
"current": 0,
|
||||||
|
"total": 4,
|
||||||
|
"status": "error",
|
||||||
|
"message": fmt.Sprintf("Ошибка: %v", err),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sendProgress(gin.H{
|
||||||
|
"current": 4,
|
||||||
|
"total": 4,
|
||||||
|
"status": "completed",
|
||||||
|
"message": "Готово",
|
||||||
|
"pricelist": pl,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetItems returns items for a pricelist with pagination.
|
// Delete deletes a pricelist by ID
|
||||||
|
func (h *PricelistHandler) Delete(c *gin.Context) {
|
||||||
|
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||||
|
if !canWrite {
|
||||||
|
c.JSON(http.StatusForbidden, gin.H{
|
||||||
|
"error": "pricelist write is not allowed",
|
||||||
|
"debug": debugInfo,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
idStr := c.Param("id")
|
||||||
|
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pricelist ID"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.service.Delete(uint(id)); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "pricelist deleted"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetActive toggles active flag on a pricelist.
|
||||||
|
func (h *PricelistHandler) SetActive(c *gin.Context) {
|
||||||
|
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||||
|
if !canWrite {
|
||||||
|
c.JSON(http.StatusForbidden, gin.H{
|
||||||
|
"error": "pricelist write is not allowed",
|
||||||
|
"debug": debugInfo,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
idStr := c.Param("id")
|
||||||
|
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pricelist ID"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req struct {
|
||||||
|
IsActive bool `json:"is_active"`
|
||||||
|
}
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.service.SetActive(uint(id), req.IsActive); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "updated", "is_active": req.IsActive})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItems returns items for a pricelist with pagination
|
||||||
func (h *PricelistHandler) GetItems(c *gin.Context) {
|
func (h *PricelistHandler) GetItems(c *gin.Context) {
|
||||||
idStr := c.Param("id")
|
idStr := c.Param("id")
|
||||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||||
@@ -154,126 +318,67 @@ func (h *PricelistHandler) GetItems(c *gin.Context) {
|
|||||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "50"))
|
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "50"))
|
||||||
search := c.Query("search")
|
search := c.Query("search")
|
||||||
|
|
||||||
localPL, err := h.localDB.GetLocalPricelistByServerID(uint(id))
|
items, total, err := h.service.GetItems(uint(id), page, perPage, search)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if page < 1 {
|
pl, _ := h.service.GetByID(uint(id))
|
||||||
page = 1
|
source := ""
|
||||||
}
|
if pl != nil {
|
||||||
if perPage < 1 {
|
source = pl.Source
|
||||||
perPage = 50
|
|
||||||
}
|
|
||||||
var items []localdb.LocalPricelistItem
|
|
||||||
dbq := h.localDB.DB().Model(&localdb.LocalPricelistItem{}).Where("pricelist_id = ?", localPL.ID)
|
|
||||||
if strings.TrimSpace(search) != "" {
|
|
||||||
dbq = dbq.Where("lot_name LIKE ?", "%"+strings.TrimSpace(search)+"%")
|
|
||||||
}
|
|
||||||
var total int64
|
|
||||||
if err := dbq.Count(&total).Error; err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
offset := (page - 1) * perPage
|
|
||||||
|
|
||||||
if err := dbq.Order("lot_name").Offset(offset).Limit(perPage).Find(&items).Error; err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lotNames := make([]string, len(items))
|
|
||||||
for i, item := range items {
|
|
||||||
lotNames[i] = item.LotName
|
|
||||||
}
|
|
||||||
type compRow struct {
|
|
||||||
LotName string
|
|
||||||
LotDescription string
|
|
||||||
}
|
|
||||||
var comps []compRow
|
|
||||||
if len(lotNames) > 0 {
|
|
||||||
h.localDB.DB().Table("local_components").
|
|
||||||
Select("lot_name, lot_description").
|
|
||||||
Where("lot_name IN ?", lotNames).
|
|
||||||
Scan(&comps)
|
|
||||||
}
|
|
||||||
descMap := make(map[string]string, len(comps))
|
|
||||||
for _, c := range comps {
|
|
||||||
descMap[c.LotName] = c.LotDescription
|
|
||||||
}
|
|
||||||
|
|
||||||
resultItems := make([]gin.H, 0, len(items))
|
|
||||||
for _, item := range items {
|
|
||||||
resultItems = append(resultItems, gin.H{
|
|
||||||
"id": item.ID,
|
|
||||||
"lot_name": item.LotName,
|
|
||||||
"lot_description": descMap[item.LotName],
|
|
||||||
"price": item.Price,
|
|
||||||
"category": item.LotCategory,
|
|
||||||
"available_qty": item.AvailableQty,
|
|
||||||
"partnumbers": []string(item.Partnumbers),
|
|
||||||
"partnumber_qtys": map[string]interface{}{},
|
|
||||||
"competitor_names": []string{},
|
|
||||||
"price_spread_pct": nil,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"source": localPL.Source,
|
"source": source,
|
||||||
"items": resultItems,
|
"items": items,
|
||||||
"total": total,
|
"total": total,
|
||||||
"page": page,
|
"page": page,
|
||||||
"per_page": perPage,
|
"per_page": perPage,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *PricelistHandler) GetLotNames(c *gin.Context) {
|
// CanWrite returns whether the current user can create pricelists
|
||||||
idStr := c.Param("id")
|
func (h *PricelistHandler) CanWrite(c *gin.Context) {
|
||||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||||
if err != nil {
|
c.JSON(http.StatusOK, gin.H{"can_write": canWrite, "debug": debugInfo})
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pricelist ID"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
localPL, err := h.localDB.GetLocalPricelistByServerID(uint(id))
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
items, err := h.localDB.GetLocalPricelistItems(localPL.ID)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lotNames := make([]string, 0, len(items))
|
|
||||||
for _, item := range items {
|
|
||||||
lotNames = append(lotNames, item.LotName)
|
|
||||||
}
|
|
||||||
sort.Strings(lotNames)
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"lot_names": lotNames,
|
|
||||||
"total": len(lotNames),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLatest returns the most recent active pricelist.
|
// GetLatest returns the most recent active pricelist
|
||||||
func (h *PricelistHandler) GetLatest(c *gin.Context) {
|
func (h *PricelistHandler) GetLatest(c *gin.Context) {
|
||||||
source := c.DefaultQuery("source", string(models.PricelistSourceEstimate))
|
source := c.DefaultQuery("source", string(models.PricelistSourceEstimate))
|
||||||
source = string(models.NormalizePricelistSource(source))
|
source = string(models.NormalizePricelistSource(source))
|
||||||
|
|
||||||
localPL, err := h.localDB.GetLatestLocalPricelistBySource(source)
|
// Try to get from server first
|
||||||
|
pl, err := h.service.GetLatestActiveBySource(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "no pricelists available"})
|
// If offline or no server pricelists, try to get from local cache
|
||||||
|
if h.localDB == nil {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": "no database available"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
localPL, localErr := h.localDB.GetLatestLocalPricelistBySource(source)
|
||||||
|
if localErr != nil {
|
||||||
|
// No local pricelists either
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{
|
||||||
|
"error": "no pricelists available",
|
||||||
|
"local_error": localErr.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Return local pricelist
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"id": localPL.ServerID,
|
||||||
|
"source": localPL.Source,
|
||||||
|
"version": localPL.Version,
|
||||||
|
"created_by": "sync",
|
||||||
|
"item_count": 0, // Not tracked in local pricelists
|
||||||
|
"is_active": true,
|
||||||
|
"created_at": localPL.CreatedAt,
|
||||||
|
"synced_from": "local",
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"id": localPL.ServerID,
|
c.JSON(http.StatusOK, pl)
|
||||||
"source": localPL.Source,
|
|
||||||
"version": localPL.Version,
|
|
||||||
"created_by": "sync",
|
|
||||||
"item_count": h.localDB.CountLocalPricelistItems(localPL.ID),
|
|
||||||
"is_active": true,
|
|
||||||
"created_at": localPL.CreatedAt,
|
|
||||||
"synced_from": "local",
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,161 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPricelistGetItems_ReturnsLotCategoryFromLocalPricelistItems(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init local db: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
|
||||||
ServerID: 1,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "S-2026-02-11-001",
|
|
||||||
Name: "test",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
SyncedAt: time.Now(),
|
|
||||||
IsUsed: false,
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
localPL, err := local.GetLocalPricelistByServerID(1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("get local pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
|
||||||
{
|
|
||||||
PricelistID: localPL.ID,
|
|
||||||
LotName: "NO_UNDERSCORE_NAME",
|
|
||||||
LotCategory: "CPU",
|
|
||||||
Price: 10,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save local pricelist items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
h := NewPricelistHandler(local)
|
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "/api/pricelists/1/items?page=1&per_page=50", nil)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
c.Params = gin.Params{{Key: "id", Value: "1"}}
|
|
||||||
|
|
||||||
h.GetItems(c)
|
|
||||||
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp struct {
|
|
||||||
Items []struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
Category string `json:"category"`
|
|
||||||
UnitPrice any `json:"price"`
|
|
||||||
} `json:"items"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
|
||||||
t.Fatalf("unmarshal response: %v", err)
|
|
||||||
}
|
|
||||||
if len(resp.Items) != 1 {
|
|
||||||
t.Fatalf("expected 1 item, got %d", len(resp.Items))
|
|
||||||
}
|
|
||||||
if resp.Items[0].LotName != "NO_UNDERSCORE_NAME" {
|
|
||||||
t.Fatalf("expected lot_name NO_UNDERSCORE_NAME, got %q", resp.Items[0].LotName)
|
|
||||||
}
|
|
||||||
if resp.Items[0].Category != "CPU" {
|
|
||||||
t.Fatalf("expected category CPU, got %q", resp.Items[0].Category)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPricelistList_ActiveOnlyExcludesPricelistsWithoutItems(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local_active_only.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init local db: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
|
||||||
ServerID: 10,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "E-1",
|
|
||||||
Name: "with-items",
|
|
||||||
CreatedAt: time.Now().Add(-time.Minute),
|
|
||||||
SyncedAt: time.Now().Add(-time.Minute),
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save with-items pricelist: %v", err)
|
|
||||||
}
|
|
||||||
withItems, err := local.GetLocalPricelistByServerID(10)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("load with-items pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
|
||||||
{
|
|
||||||
PricelistID: withItems.ID,
|
|
||||||
LotName: "CPU_X",
|
|
||||||
LotCategory: "CPU",
|
|
||||||
Price: 100,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save with-items pricelist items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
|
||||||
ServerID: 11,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "E-2",
|
|
||||||
Name: "without-items",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
SyncedAt: time.Now(),
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save without-items pricelist: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
h := NewPricelistHandler(local)
|
|
||||||
|
|
||||||
req, _ := http.NewRequest("GET", "/api/pricelists?source=estimate&active_only=true", nil)
|
|
||||||
w := httptest.NewRecorder()
|
|
||||||
c, _ := gin.CreateTestContext(w)
|
|
||||||
c.Request = req
|
|
||||||
|
|
||||||
h.List(c)
|
|
||||||
|
|
||||||
if w.Code != http.StatusOK {
|
|
||||||
t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp struct {
|
|
||||||
Pricelists []struct {
|
|
||||||
ID uint `json:"id"`
|
|
||||||
} `json:"pricelists"`
|
|
||||||
Total int `json:"total"`
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
|
||||||
t.Fatalf("unmarshal response: %v", err)
|
|
||||||
}
|
|
||||||
if resp.Total != 1 {
|
|
||||||
t.Fatalf("expected total=1, got %d", resp.Total)
|
|
||||||
}
|
|
||||||
if len(resp.Pricelists) != 1 {
|
|
||||||
t.Fatalf("expected 1 pricelist, got %d", len(resp.Pricelists))
|
|
||||||
}
|
|
||||||
if resp.Pricelists[0].ID != 10 {
|
|
||||||
t.Fatalf("expected pricelist id=10, got %d", resp.Pricelists[0].ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
938
internal/handlers/pricing.go
Normal file
938
internal/handlers/pricing.go
Normal file
@@ -0,0 +1,938 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services/alerts"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services/pricing"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// calculateMedian returns the median of a sorted slice of prices
|
||||||
|
func calculateMedian(prices []float64) float64 {
|
||||||
|
if len(prices) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
sort.Float64s(prices)
|
||||||
|
n := len(prices)
|
||||||
|
if n%2 == 0 {
|
||||||
|
return (prices[n/2-1] + prices[n/2]) / 2
|
||||||
|
}
|
||||||
|
return prices[n/2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateAverage returns the arithmetic mean of prices
|
||||||
|
func calculateAverage(prices []float64) float64 {
|
||||||
|
if len(prices) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var sum float64
|
||||||
|
for _, p := range prices {
|
||||||
|
sum += p
|
||||||
|
}
|
||||||
|
return sum / float64(len(prices))
|
||||||
|
}
|
||||||
|
|
||||||
|
type PricingHandler struct {
|
||||||
|
db *gorm.DB
|
||||||
|
pricingService *pricing.Service
|
||||||
|
alertService *alerts.Service
|
||||||
|
componentRepo *repository.ComponentRepository
|
||||||
|
priceRepo *repository.PriceRepository
|
||||||
|
statsRepo *repository.StatsRepository
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPricingHandler(
|
||||||
|
db *gorm.DB,
|
||||||
|
pricingService *pricing.Service,
|
||||||
|
alertService *alerts.Service,
|
||||||
|
componentRepo *repository.ComponentRepository,
|
||||||
|
priceRepo *repository.PriceRepository,
|
||||||
|
statsRepo *repository.StatsRepository,
|
||||||
|
) *PricingHandler {
|
||||||
|
return &PricingHandler{
|
||||||
|
db: db,
|
||||||
|
pricingService: pricingService,
|
||||||
|
alertService: alertService,
|
||||||
|
componentRepo: componentRepo,
|
||||||
|
priceRepo: priceRepo,
|
||||||
|
statsRepo: statsRepo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) GetStats(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.statsRepo == nil || h.alertService == nil {
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"new_alerts_count": 0,
|
||||||
|
"top_components": []interface{}{},
|
||||||
|
"trending_components": []interface{}{},
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
newAlerts, _ := h.alertService.GetNewAlertsCount()
|
||||||
|
topComponents, _ := h.statsRepo.GetTopComponents(10)
|
||||||
|
trendingComponents, _ := h.statsRepo.GetTrendingComponents(10)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"new_alerts_count": newAlerts,
|
||||||
|
"top_components": topComponents,
|
||||||
|
"trending_components": trendingComponents,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type ComponentWithCount struct {
|
||||||
|
models.LotMetadata
|
||||||
|
QuoteCount int64 `json:"quote_count"`
|
||||||
|
UsedInMeta []string `json:"used_in_meta,omitempty"` // List of meta-articles that use this component
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) ListComponents(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.componentRepo == nil {
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"components": []ComponentWithCount{},
|
||||||
|
"total": 0,
|
||||||
|
"page": 1,
|
||||||
|
"per_page": 20,
|
||||||
|
"offline": true,
|
||||||
|
"message": "Управление ценами доступно только в онлайн режиме",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||||
|
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||||
|
|
||||||
|
filter := repository.ComponentFilter{
|
||||||
|
Category: c.Query("category"),
|
||||||
|
Search: c.Query("search"),
|
||||||
|
SortField: c.Query("sort"),
|
||||||
|
SortDir: c.Query("dir"),
|
||||||
|
}
|
||||||
|
|
||||||
|
if page < 1 {
|
||||||
|
page = 1
|
||||||
|
}
|
||||||
|
if perPage < 1 || perPage > 100 {
|
||||||
|
perPage = 20
|
||||||
|
}
|
||||||
|
offset := (page - 1) * perPage
|
||||||
|
|
||||||
|
components, total, err := h.componentRepo.List(filter, offset, perPage)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get quote counts
|
||||||
|
lotNames := make([]string, len(components))
|
||||||
|
for i, comp := range components {
|
||||||
|
lotNames[i] = comp.LotName
|
||||||
|
}
|
||||||
|
|
||||||
|
counts, _ := h.priceRepo.GetQuoteCounts(lotNames)
|
||||||
|
|
||||||
|
// Get meta usage information
|
||||||
|
metaUsage := h.getMetaUsageMap(lotNames)
|
||||||
|
|
||||||
|
// Combine components with counts
|
||||||
|
result := make([]ComponentWithCount, len(components))
|
||||||
|
for i, comp := range components {
|
||||||
|
result[i] = ComponentWithCount{
|
||||||
|
LotMetadata: comp,
|
||||||
|
QuoteCount: counts[comp.LotName],
|
||||||
|
UsedInMeta: metaUsage[comp.LotName],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"components": result,
|
||||||
|
"total": total,
|
||||||
|
"page": page,
|
||||||
|
"per_page": perPage,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMetaUsageMap returns a map of lot_name -> list of meta-articles that use this component
|
||||||
|
func (h *PricingHandler) getMetaUsageMap(lotNames []string) map[string][]string {
|
||||||
|
result := make(map[string][]string)
|
||||||
|
|
||||||
|
// Get all components with meta_prices
|
||||||
|
var metaComponents []models.LotMetadata
|
||||||
|
h.db.Where("meta_prices IS NOT NULL AND meta_prices != ''").Find(&metaComponents)
|
||||||
|
|
||||||
|
// Build reverse lookup: which components are used in which meta-articles
|
||||||
|
for _, meta := range metaComponents {
|
||||||
|
sources := strings.Split(meta.MetaPrices, ",")
|
||||||
|
for _, source := range sources {
|
||||||
|
source = strings.TrimSpace(source)
|
||||||
|
if source == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle wildcard patterns
|
||||||
|
if strings.HasSuffix(source, "*") {
|
||||||
|
prefix := strings.TrimSuffix(source, "*")
|
||||||
|
for _, lotName := range lotNames {
|
||||||
|
if strings.HasPrefix(lotName, prefix) && lotName != meta.LotName {
|
||||||
|
result[lotName] = append(result[lotName], meta.LotName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Direct match
|
||||||
|
for _, lotName := range lotNames {
|
||||||
|
if lotName == source && lotName != meta.LotName {
|
||||||
|
result[lotName] = append(result[lotName], meta.LotName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandMetaPrices expands meta_prices string to list of actual lot names
|
||||||
|
func (h *PricingHandler) expandMetaPrices(metaPrices, excludeLot string) []string {
|
||||||
|
sources := strings.Split(metaPrices, ",")
|
||||||
|
var result []string
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, source := range sources {
|
||||||
|
source = strings.TrimSpace(source)
|
||||||
|
if source == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(source, "*") {
|
||||||
|
// Wildcard pattern - find matching lots
|
||||||
|
prefix := strings.TrimSuffix(source, "*")
|
||||||
|
var matchingLots []string
|
||||||
|
h.db.Model(&models.LotMetadata{}).
|
||||||
|
Where("lot_name LIKE ? AND lot_name != ?", prefix+"%", excludeLot).
|
||||||
|
Pluck("lot_name", &matchingLots)
|
||||||
|
for _, lot := range matchingLots {
|
||||||
|
if !seen[lot] {
|
||||||
|
result = append(result, lot)
|
||||||
|
seen[lot] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if source != excludeLot && !seen[source] {
|
||||||
|
result = append(result, source)
|
||||||
|
seen[source] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) GetComponentPricing(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.componentRepo == nil || h.pricingService == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Управление ценами доступно только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lotName := c.Param("lot_name")
|
||||||
|
|
||||||
|
component, err := h.componentRepo.GetByLotName(lotName)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err := h.pricingService.GetPriceStats(lotName, 0)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"component": component,
|
||||||
|
"price_stats": stats,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type UpdatePriceRequest struct {
|
||||||
|
LotName string `json:"lot_name" binding:"required"`
|
||||||
|
Method models.PriceMethod `json:"method"`
|
||||||
|
PeriodDays int `json:"period_days"`
|
||||||
|
Coefficient float64 `json:"coefficient"`
|
||||||
|
ManualPrice *float64 `json:"manual_price"`
|
||||||
|
ClearManual bool `json:"clear_manual"`
|
||||||
|
MetaEnabled bool `json:"meta_enabled"`
|
||||||
|
MetaPrices string `json:"meta_prices"`
|
||||||
|
MetaMethod string `json:"meta_method"`
|
||||||
|
MetaPeriod int `json:"meta_period"`
|
||||||
|
IsHidden bool `json:"is_hidden"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) UpdatePrice(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Обновление цен доступно только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req UpdatePriceRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
updates := map[string]interface{}{}
|
||||||
|
|
||||||
|
// Update method if specified
|
||||||
|
if req.Method != "" {
|
||||||
|
updates["price_method"] = req.Method
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update period days
|
||||||
|
if req.PeriodDays >= 0 {
|
||||||
|
updates["price_period_days"] = req.PeriodDays
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update coefficient
|
||||||
|
updates["price_coefficient"] = req.Coefficient
|
||||||
|
|
||||||
|
// Handle meta prices
|
||||||
|
if req.MetaEnabled && req.MetaPrices != "" {
|
||||||
|
updates["meta_prices"] = req.MetaPrices
|
||||||
|
} else {
|
||||||
|
updates["meta_prices"] = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle hidden flag
|
||||||
|
updates["is_hidden"] = req.IsHidden
|
||||||
|
|
||||||
|
// Handle manual price
|
||||||
|
if req.ClearManual {
|
||||||
|
updates["manual_price"] = nil
|
||||||
|
} else if req.ManualPrice != nil {
|
||||||
|
updates["manual_price"] = *req.ManualPrice
|
||||||
|
// Also update current price immediately when setting manual
|
||||||
|
updates["current_price"] = *req.ManualPrice
|
||||||
|
updates["price_updated_at"] = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := h.db.Model(&models.LotMetadata{}).
|
||||||
|
Where("lot_name = ?", req.LotName).
|
||||||
|
Updates(updates).Error
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recalculate price if not using manual price
|
||||||
|
if req.ManualPrice == nil {
|
||||||
|
h.recalculateSinglePrice(req.LotName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get updated component to return new price
|
||||||
|
var comp models.LotMetadata
|
||||||
|
h.db.Where("lot_name = ?", req.LotName).First(&comp)
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"message": "price updated",
|
||||||
|
"current_price": comp.CurrentPrice,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) recalculateSinglePrice(lotName string) {
|
||||||
|
var comp models.LotMetadata
|
||||||
|
if err := h.db.Where("lot_name = ?", lotName).First(&comp).Error; err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if manual price is set
|
||||||
|
if comp.ManualPrice != nil && *comp.ManualPrice > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
periodDays := comp.PricePeriodDays
|
||||||
|
method := comp.PriceMethod
|
||||||
|
if method == "" {
|
||||||
|
method = models.PriceMethodMedian
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine which lot names to use for price calculation
|
||||||
|
lotNames := []string{lotName}
|
||||||
|
if comp.MetaPrices != "" {
|
||||||
|
lotNames = h.expandMetaPrices(comp.MetaPrices, lotName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get prices based on period from all relevant lots
|
||||||
|
var prices []float64
|
||||||
|
for _, ln := range lotNames {
|
||||||
|
var lotPrices []float64
|
||||||
|
if strings.HasSuffix(ln, "*") {
|
||||||
|
pattern := strings.TrimSuffix(ln, "*") + "%"
|
||||||
|
if periodDays > 0 {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||||
|
pattern, periodDays).Pluck("price", &lotPrices)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? ORDER BY price`, pattern).Pluck("price", &lotPrices)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if periodDays > 0 {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||||
|
ln, periodDays).Pluck("price", &lotPrices)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? ORDER BY price`, ln).Pluck("price", &lotPrices)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prices = append(prices, lotPrices...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no prices in period, try all time
|
||||||
|
if len(prices) == 0 && periodDays > 0 {
|
||||||
|
for _, ln := range lotNames {
|
||||||
|
var lotPrices []float64
|
||||||
|
if strings.HasSuffix(ln, "*") {
|
||||||
|
pattern := strings.TrimSuffix(ln, "*") + "%"
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? ORDER BY price`, pattern).Pluck("price", &lotPrices)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? ORDER BY price`, ln).Pluck("price", &lotPrices)
|
||||||
|
}
|
||||||
|
prices = append(prices, lotPrices...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(prices) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate price based on method
|
||||||
|
sortFloat64s(prices)
|
||||||
|
var finalPrice float64
|
||||||
|
switch method {
|
||||||
|
case models.PriceMethodMedian:
|
||||||
|
finalPrice = calculateMedian(prices)
|
||||||
|
case models.PriceMethodAverage:
|
||||||
|
finalPrice = calculateAverage(prices)
|
||||||
|
default:
|
||||||
|
finalPrice = calculateMedian(prices)
|
||||||
|
}
|
||||||
|
|
||||||
|
if finalPrice <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply coefficient
|
||||||
|
if comp.PriceCoefficient != 0 {
|
||||||
|
finalPrice = finalPrice * (1 + comp.PriceCoefficient/100)
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
// Only update price, preserve all user settings
|
||||||
|
h.db.Model(&models.LotMetadata{}).
|
||||||
|
Where("lot_name = ?", lotName).
|
||||||
|
Updates(map[string]interface{}{
|
||||||
|
"current_price": finalPrice,
|
||||||
|
"price_updated_at": now,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) RecalculateAll(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Пересчёт цен доступен только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set headers for SSE
|
||||||
|
c.Header("Content-Type", "text/event-stream")
|
||||||
|
c.Header("Cache-Control", "no-cache")
|
||||||
|
c.Header("Connection", "keep-alive")
|
||||||
|
|
||||||
|
// Get all components with their settings
|
||||||
|
var components []models.LotMetadata
|
||||||
|
h.db.Find(&components)
|
||||||
|
total := int64(len(components))
|
||||||
|
|
||||||
|
// Pre-load all lot names for efficient wildcard matching
|
||||||
|
var allLotNames []string
|
||||||
|
h.db.Model(&models.LotMetadata{}).Pluck("lot_name", &allLotNames)
|
||||||
|
lotNameSet := make(map[string]bool, len(allLotNames))
|
||||||
|
for _, ln := range allLotNames {
|
||||||
|
lotNameSet[ln] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-load latest quote dates for all lots (for checking updates)
|
||||||
|
type LotDate struct {
|
||||||
|
Lot string
|
||||||
|
Date time.Time
|
||||||
|
}
|
||||||
|
var latestDates []LotDate
|
||||||
|
h.db.Raw(`SELECT lot, MAX(date) as date FROM lot_log GROUP BY lot`).Scan(&latestDates)
|
||||||
|
lotLatestDate := make(map[string]time.Time, len(latestDates))
|
||||||
|
for _, ld := range latestDates {
|
||||||
|
lotLatestDate[ld.Lot] = ld.Date
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send initial progress
|
||||||
|
c.SSEvent("progress", gin.H{"current": 0, "total": total, "status": "starting"})
|
||||||
|
c.Writer.Flush()
|
||||||
|
|
||||||
|
// Process components individually to respect their settings
|
||||||
|
var updated, skipped, manual, unchanged, errors int
|
||||||
|
now := time.Now()
|
||||||
|
progressCounter := 0
|
||||||
|
|
||||||
|
for _, comp := range components {
|
||||||
|
progressCounter++
|
||||||
|
|
||||||
|
// If manual price is set, skip recalculation
|
||||||
|
if comp.ManualPrice != nil && *comp.ManualPrice > 0 {
|
||||||
|
manual++
|
||||||
|
goto sendProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate price based on component's individual settings
|
||||||
|
{
|
||||||
|
periodDays := comp.PricePeriodDays
|
||||||
|
method := comp.PriceMethod
|
||||||
|
if method == "" {
|
||||||
|
method = models.PriceMethodMedian
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine source lots for price calculation (using cached lot names)
|
||||||
|
var sourceLots []string
|
||||||
|
if comp.MetaPrices != "" {
|
||||||
|
sourceLots = expandMetaPricesWithCache(comp.MetaPrices, comp.LotName, allLotNames)
|
||||||
|
} else {
|
||||||
|
sourceLots = []string{comp.LotName}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sourceLots) == 0 {
|
||||||
|
skipped++
|
||||||
|
goto sendProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if there are new quotes since last update (using cached dates)
|
||||||
|
if comp.PriceUpdatedAt != nil {
|
||||||
|
hasNewData := false
|
||||||
|
for _, lot := range sourceLots {
|
||||||
|
if latestDate, ok := lotLatestDate[lot]; ok {
|
||||||
|
if latestDate.After(*comp.PriceUpdatedAt) {
|
||||||
|
hasNewData = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasNewData {
|
||||||
|
unchanged++
|
||||||
|
goto sendProgress
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get prices from source lots
|
||||||
|
var prices []float64
|
||||||
|
if periodDays > 0 {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot IN ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||||
|
sourceLots, periodDays).Pluck("price", &prices)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot IN ? ORDER BY price`,
|
||||||
|
sourceLots).Pluck("price", &prices)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no prices in period, try all time
|
||||||
|
if len(prices) == 0 && periodDays > 0 {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot IN ? ORDER BY price`, sourceLots).Pluck("price", &prices)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(prices) == 0 {
|
||||||
|
skipped++
|
||||||
|
goto sendProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate price based on method
|
||||||
|
var basePrice float64
|
||||||
|
switch method {
|
||||||
|
case models.PriceMethodMedian:
|
||||||
|
basePrice = calculateMedian(prices)
|
||||||
|
case models.PriceMethodAverage:
|
||||||
|
basePrice = calculateAverage(prices)
|
||||||
|
default:
|
||||||
|
basePrice = calculateMedian(prices)
|
||||||
|
}
|
||||||
|
|
||||||
|
if basePrice <= 0 {
|
||||||
|
skipped++
|
||||||
|
goto sendProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
finalPrice := basePrice
|
||||||
|
|
||||||
|
// Apply coefficient
|
||||||
|
if comp.PriceCoefficient != 0 {
|
||||||
|
finalPrice = finalPrice * (1 + comp.PriceCoefficient/100)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update only price fields
|
||||||
|
err := h.db.Model(&models.LotMetadata{}).
|
||||||
|
Where("lot_name = ?", comp.LotName).
|
||||||
|
Updates(map[string]interface{}{
|
||||||
|
"current_price": finalPrice,
|
||||||
|
"price_updated_at": now,
|
||||||
|
}).Error
|
||||||
|
if err != nil {
|
||||||
|
errors++
|
||||||
|
} else {
|
||||||
|
updated++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sendProgress:
|
||||||
|
// Send progress update every 10 components to reduce overhead
|
||||||
|
if progressCounter%10 == 0 || progressCounter == int(total) {
|
||||||
|
c.SSEvent("progress", gin.H{
|
||||||
|
"current": updated + skipped + manual + unchanged + errors,
|
||||||
|
"total": total,
|
||||||
|
"updated": updated,
|
||||||
|
"skipped": skipped,
|
||||||
|
"manual": manual,
|
||||||
|
"unchanged": unchanged,
|
||||||
|
"errors": errors,
|
||||||
|
"status": "processing",
|
||||||
|
"lot_name": comp.LotName,
|
||||||
|
})
|
||||||
|
c.Writer.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update popularity scores
|
||||||
|
h.statsRepo.UpdatePopularityScores()
|
||||||
|
|
||||||
|
// Send completion
|
||||||
|
c.SSEvent("progress", gin.H{
|
||||||
|
"current": updated + skipped + manual + unchanged + errors,
|
||||||
|
"total": total,
|
||||||
|
"updated": updated,
|
||||||
|
"skipped": skipped,
|
||||||
|
"manual": manual,
|
||||||
|
"unchanged": unchanged,
|
||||||
|
"errors": errors,
|
||||||
|
"status": "completed",
|
||||||
|
})
|
||||||
|
c.Writer.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) ListAlerts(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"alerts": []interface{}{},
|
||||||
|
"total": 0,
|
||||||
|
"page": 1,
|
||||||
|
"per_page": 20,
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||||
|
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||||
|
|
||||||
|
filter := repository.AlertFilter{
|
||||||
|
Status: models.AlertStatus(c.Query("status")),
|
||||||
|
Severity: models.AlertSeverity(c.Query("severity")),
|
||||||
|
Type: models.AlertType(c.Query("type")),
|
||||||
|
LotName: c.Query("lot_name"),
|
||||||
|
}
|
||||||
|
|
||||||
|
alertsList, total, err := h.alertService.List(filter, page, perPage)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"alerts": alertsList,
|
||||||
|
"total": total,
|
||||||
|
"page": page,
|
||||||
|
"per_page": perPage,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) AcknowledgeAlert(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Управление алертами доступно только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := strconv.ParseUint(c.Param("id"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid alert id"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.alertService.Acknowledge(uint(id)); err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "acknowledged"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) ResolveAlert(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Управление алертами доступно только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := strconv.ParseUint(c.Param("id"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid alert id"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.alertService.Resolve(uint(id)); err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "resolved"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) IgnoreAlert(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Управление алертами доступно только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := strconv.ParseUint(c.Param("id"), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid alert id"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.alertService.Ignore(uint(id)); err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{"message": "ignored"})
|
||||||
|
}
|
||||||
|
|
||||||
|
type PreviewPriceRequest struct {
|
||||||
|
LotName string `json:"lot_name" binding:"required"`
|
||||||
|
Method string `json:"method"`
|
||||||
|
PeriodDays int `json:"period_days"`
|
||||||
|
Coefficient float64 `json:"coefficient"`
|
||||||
|
MetaEnabled bool `json:"meta_enabled"`
|
||||||
|
MetaPrices string `json:"meta_prices"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *PricingHandler) PreviewPrice(c *gin.Context) {
|
||||||
|
// Check if we're in offline mode
|
||||||
|
if h.db == nil {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"error": "Предпросмотр цены доступен только в онлайн режиме",
|
||||||
|
"offline": true,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var req PreviewPriceRequest
|
||||||
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get component
|
||||||
|
var comp models.LotMetadata
|
||||||
|
if err := h.db.Where("lot_name = ?", req.LotName).First(&comp).Error; err != nil {
|
||||||
|
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine which lot names to use for price calculation
|
||||||
|
lotNames := []string{req.LotName}
|
||||||
|
if req.MetaEnabled && req.MetaPrices != "" {
|
||||||
|
lotNames = h.expandMetaPrices(req.MetaPrices, req.LotName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all prices for calculations (from all relevant lots)
|
||||||
|
var allPrices []float64
|
||||||
|
for _, lotName := range lotNames {
|
||||||
|
var lotPrices []float64
|
||||||
|
if strings.HasSuffix(lotName, "*") {
|
||||||
|
// Wildcard pattern
|
||||||
|
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? ORDER BY price`, pattern).Pluck("price", &lotPrices)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? ORDER BY price`, lotName).Pluck("price", &lotPrices)
|
||||||
|
}
|
||||||
|
allPrices = append(allPrices, lotPrices...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate median for all time
|
||||||
|
var medianAllTime *float64
|
||||||
|
if len(allPrices) > 0 {
|
||||||
|
sortFloat64s(allPrices)
|
||||||
|
median := calculateMedian(allPrices)
|
||||||
|
medianAllTime = &median
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get quote count (from all relevant lots) - total count
|
||||||
|
var quoteCountTotal int64
|
||||||
|
for _, lotName := range lotNames {
|
||||||
|
var count int64
|
||||||
|
if strings.HasSuffix(lotName, "*") {
|
||||||
|
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||||
|
h.db.Model(&models.LotLog{}).Where("lot LIKE ?", pattern).Count(&count)
|
||||||
|
} else {
|
||||||
|
h.db.Model(&models.LotLog{}).Where("lot = ?", lotName).Count(&count)
|
||||||
|
}
|
||||||
|
quoteCountTotal += count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get quote count for specified period (if period is > 0)
|
||||||
|
var quoteCountPeriod int64
|
||||||
|
if req.PeriodDays > 0 {
|
||||||
|
for _, lotName := range lotNames {
|
||||||
|
var count int64
|
||||||
|
if strings.HasSuffix(lotName, "*") {
|
||||||
|
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||||
|
h.db.Raw(`SELECT COUNT(*) FROM lot_log WHERE lot LIKE ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY)`, pattern, req.PeriodDays).Scan(&count)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT COUNT(*) FROM lot_log WHERE lot = ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY)`, lotName, req.PeriodDays).Scan(&count)
|
||||||
|
}
|
||||||
|
quoteCountPeriod += count
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If no period specified, period count equals total count
|
||||||
|
quoteCountPeriod = quoteCountTotal
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get last received price (from the main lot only)
|
||||||
|
var lastPrice struct {
|
||||||
|
Price *float64
|
||||||
|
Date *time.Time
|
||||||
|
}
|
||||||
|
h.db.Raw(`SELECT price, date FROM lot_log WHERE lot = ? ORDER BY date DESC, lot_log_id DESC LIMIT 1`, req.LotName).Scan(&lastPrice)
|
||||||
|
|
||||||
|
// Calculate new price based on parameters (method, period, coefficient)
|
||||||
|
method := req.Method
|
||||||
|
if method == "" {
|
||||||
|
method = "median"
|
||||||
|
}
|
||||||
|
|
||||||
|
var prices []float64
|
||||||
|
if req.PeriodDays > 0 {
|
||||||
|
for _, lotName := range lotNames {
|
||||||
|
var lotPrices []float64
|
||||||
|
if strings.HasSuffix(lotName, "*") {
|
||||||
|
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||||
|
pattern, req.PeriodDays).Pluck("price", &lotPrices)
|
||||||
|
} else {
|
||||||
|
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||||
|
lotName, req.PeriodDays).Pluck("price", &lotPrices)
|
||||||
|
}
|
||||||
|
prices = append(prices, lotPrices...)
|
||||||
|
}
|
||||||
|
// Fall back to all time if no prices in period
|
||||||
|
if len(prices) == 0 {
|
||||||
|
prices = allPrices
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
prices = allPrices
|
||||||
|
}
|
||||||
|
|
||||||
|
var newPrice *float64
|
||||||
|
if len(prices) > 0 {
|
||||||
|
sortFloat64s(prices)
|
||||||
|
var basePrice float64
|
||||||
|
if method == "average" {
|
||||||
|
basePrice = calculateAverage(prices)
|
||||||
|
} else {
|
||||||
|
basePrice = calculateMedian(prices)
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Coefficient != 0 {
|
||||||
|
basePrice = basePrice * (1 + req.Coefficient/100)
|
||||||
|
}
|
||||||
|
newPrice = &basePrice
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"lot_name": req.LotName,
|
||||||
|
"current_price": comp.CurrentPrice,
|
||||||
|
"median_all_time": medianAllTime,
|
||||||
|
"new_price": newPrice,
|
||||||
|
"quote_count_total": quoteCountTotal,
|
||||||
|
"quote_count_period": quoteCountPeriod,
|
||||||
|
"manual_price": comp.ManualPrice,
|
||||||
|
"last_price": lastPrice.Price,
|
||||||
|
"last_price_date": lastPrice.Date,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortFloat64s sorts a slice of float64 in ascending order
|
||||||
|
func sortFloat64s(data []float64) {
|
||||||
|
sort.Float64s(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandMetaPricesWithCache expands meta_prices using pre-loaded lot names (no DB queries)
|
||||||
|
func expandMetaPricesWithCache(metaPrices, excludeLot string, allLotNames []string) []string {
|
||||||
|
sources := strings.Split(metaPrices, ",")
|
||||||
|
var result []string
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, source := range sources {
|
||||||
|
source = strings.TrimSpace(source)
|
||||||
|
if source == "" || source == excludeLot {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(source, "*") {
|
||||||
|
// Wildcard pattern - find matching lots from cache
|
||||||
|
prefix := strings.TrimSuffix(source, "*")
|
||||||
|
for _, lot := range allLotNames {
|
||||||
|
if strings.HasPrefix(lot, prefix) && lot != excludeLot && !seen[lot] {
|
||||||
|
result = append(result, lot)
|
||||||
|
seen[lot] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !seen[source] {
|
||||||
|
result = append(result, source)
|
||||||
|
seen[source] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
@@ -18,13 +18,13 @@ func NewQuoteHandler(quoteService *services.QuoteService) *QuoteHandler {
|
|||||||
func (h *QuoteHandler) Validate(c *gin.Context) {
|
func (h *QuoteHandler) Validate(c *gin.Context) {
|
||||||
var req services.QuoteRequest
|
var req services.QuoteRequest
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := h.quoteService.ValidateAndCalculate(&req)
|
result, err := h.quoteService.ValidateAndCalculate(&req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,13 +34,13 @@ func (h *QuoteHandler) Validate(c *gin.Context) {
|
|||||||
func (h *QuoteHandler) Calculate(c *gin.Context) {
|
func (h *QuoteHandler) Calculate(c *gin.Context) {
|
||||||
var req services.QuoteRequest
|
var req services.QuoteRequest
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := h.quoteService.ValidateAndCalculate(&req)
|
result, err := h.quoteService.ValidateAndCalculate(&req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,13 +53,13 @@ func (h *QuoteHandler) Calculate(c *gin.Context) {
|
|||||||
func (h *QuoteHandler) PriceLevels(c *gin.Context) {
|
func (h *QuoteHandler) PriceLevels(c *gin.Context) {
|
||||||
var req services.PriceLevelsRequest
|
var req services.PriceLevelsRequest
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := h.quoteService.CalculatePriceLevels(&req)
|
result, err := h.quoteService.CalculatePriceLevels(&req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,73 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
func RespondError(c *gin.Context, status int, fallback string, err error) {
|
|
||||||
if err != nil {
|
|
||||||
_ = c.Error(err)
|
|
||||||
}
|
|
||||||
c.JSON(status, gin.H{"error": clientFacingErrorMessage(status, fallback, err)})
|
|
||||||
}
|
|
||||||
|
|
||||||
func clientFacingErrorMessage(status int, fallback string, err error) string {
|
|
||||||
if err == nil {
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
if status >= 500 {
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
if isRequestDecodeError(err) {
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
message := strings.TrimSpace(err.Error())
|
|
||||||
if message == "" {
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
if looksTechnicalError(message) {
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
return message
|
|
||||||
}
|
|
||||||
|
|
||||||
func isRequestDecodeError(err error) bool {
|
|
||||||
var syntaxErr *json.SyntaxError
|
|
||||||
if errors.As(err, &syntaxErr) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var unmarshalTypeErr *json.UnmarshalTypeError
|
|
||||||
if errors.As(err, &unmarshalTypeErr) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF)
|
|
||||||
}
|
|
||||||
|
|
||||||
func looksTechnicalError(message string) bool {
|
|
||||||
lower := strings.ToLower(strings.TrimSpace(message))
|
|
||||||
needles := []string{
|
|
||||||
"sql",
|
|
||||||
"gorm",
|
|
||||||
"driver",
|
|
||||||
"constraint",
|
|
||||||
"syntax error",
|
|
||||||
"unexpected eof",
|
|
||||||
"record not found",
|
|
||||||
"no such table",
|
|
||||||
"stack trace",
|
|
||||||
}
|
|
||||||
for _, needle := range needles {
|
|
||||||
if strings.Contains(lower, needle) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestClientFacingErrorMessageKeepsDomain4xx(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
got := clientFacingErrorMessage(400, "invalid request", &json.SyntaxError{Offset: 1})
|
|
||||||
if got != "invalid request" {
|
|
||||||
t.Fatalf("expected fallback for decode error, got %q", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClientFacingErrorMessagePreservesBusinessMessage(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
err := errString("main project variant cannot be deleted")
|
|
||||||
got := clientFacingErrorMessage(400, "invalid request", err)
|
|
||||||
if got != err.Error() {
|
|
||||||
t.Fatalf("expected business message, got %q", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestClientFacingErrorMessageHidesTechnical4xx(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
err := errString("sql: no rows in result set")
|
|
||||||
got := clientFacingErrorMessage(404, "resource not found", err)
|
|
||||||
if got != "resource not found" {
|
|
||||||
t.Fatalf("expected fallback for technical error, got %q", got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type errString string
|
|
||||||
|
|
||||||
func (e errString) Error() string {
|
|
||||||
return string(e)
|
|
||||||
}
|
|
||||||
@@ -1,20 +1,21 @@
|
|||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
"html/template"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
qfassets "git.mchus.pro/mchus/quoteforge"
|
qfassets "git.mchus.pro/mchus/quoteforge"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/db"
|
"git.mchus.pro/mchus/quoteforge/internal/db"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
mysqlDriver "github.com/go-sql-driver/mysql"
|
mysqlDriver "github.com/go-sql-driver/mysql"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
gormmysql "gorm.io/driver/mysql"
|
gormmysql "gorm.io/driver/mysql"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
"gorm.io/gorm/logger"
|
"gorm.io/gorm/logger"
|
||||||
@@ -27,9 +28,7 @@ type SetupHandler struct {
|
|||||||
restartSig chan struct{}
|
restartSig chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errPermissionProbeRollback = errors.New("permission probe rollback")
|
func NewSetupHandler(localDB *localdb.LocalDB, connMgr *db.ConnectionManager, templatesPath string, restartSig chan struct{}) (*SetupHandler, error) {
|
||||||
|
|
||||||
func NewSetupHandler(localDB *localdb.LocalDB, connMgr *db.ConnectionManager, _ string, restartSig chan struct{}) (*SetupHandler, error) {
|
|
||||||
funcMap := template.FuncMap{
|
funcMap := template.FuncMap{
|
||||||
"sub": func(a, b int) int { return a - b },
|
"sub": func(a, b int) int { return a - b },
|
||||||
"add": func(a, b int) int { return a + b },
|
"add": func(a, b int) int { return a + b },
|
||||||
@@ -38,9 +37,14 @@ func NewSetupHandler(localDB *localdb.LocalDB, connMgr *db.ConnectionManager, _
|
|||||||
templates := make(map[string]*template.Template)
|
templates := make(map[string]*template.Template)
|
||||||
|
|
||||||
// Load setup template (standalone, no base needed)
|
// Load setup template (standalone, no base needed)
|
||||||
|
setupPath := filepath.Join(templatesPath, "setup.html")
|
||||||
var tmpl *template.Template
|
var tmpl *template.Template
|
||||||
var err error
|
var err error
|
||||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(qfassets.TemplatesFS, "web/templates/setup.html")
|
if stat, statErr := os.Stat(templatesPath); statErr == nil && stat.IsDir() {
|
||||||
|
tmpl, err = template.New("").Funcs(funcMap).ParseFiles(setupPath)
|
||||||
|
} else {
|
||||||
|
tmpl, err = template.New("").Funcs(funcMap).ParseFS(qfassets.TemplatesFS, "web/templates/setup.html")
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("parsing setup template: %w", err)
|
return nil, fmt.Errorf("parsing setup template: %w", err)
|
||||||
}
|
}
|
||||||
@@ -67,8 +71,7 @@ func (h *SetupHandler) ShowSetup(c *gin.Context) {
|
|||||||
|
|
||||||
tmpl := h.templates["setup.html"]
|
tmpl := h.templates["setup.html"]
|
||||||
if err := tmpl.ExecuteTemplate(c.Writer, "setup.html", data); err != nil {
|
if err := tmpl.ExecuteTemplate(c.Writer, "setup.html", data); err != nil {
|
||||||
_ = c.Error(err)
|
c.String(http.StatusInternalServerError, "Template error: %v", err)
|
||||||
c.String(http.StatusInternalServerError, "Template error")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,16 +96,49 @@ func (h *SetupHandler) TestConnection(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dsn := buildMySQLDSN(host, port, database, user, password, 5*time.Second)
|
dsn := buildMySQLDSN(host, port, database, user, password, 5*time.Second)
|
||||||
lotCount, canWrite, err := validateMariaDBConnection(dsn)
|
|
||||||
|
db, err := gorm.Open(gormmysql.Open(dsn), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.Error(err)
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "Connection check failed",
|
"error": fmt.Sprintf("Connection failed: %v", err),
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sqlDB, err := db.DB()
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": fmt.Sprintf("Failed to get database handle: %v", err),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer sqlDB.Close()
|
||||||
|
|
||||||
|
if err := sqlDB.Ping(); err != nil {
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": fmt.Sprintf("Ping failed: %v", err),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for required tables
|
||||||
|
var lotCount int64
|
||||||
|
if err := db.Table("lot").Count(&lotCount).Error; err != nil {
|
||||||
|
c.JSON(http.StatusOK, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": fmt.Sprintf("Table 'lot' not found or inaccessible: %v", err),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check write permission
|
||||||
|
canWrite := testWritePermission(db)
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"success": true,
|
"success": true,
|
||||||
"lot_count": lotCount,
|
"lot_count": lotCount,
|
||||||
@@ -135,21 +171,26 @@ func (h *SetupHandler) SaveConnection(c *gin.Context) {
|
|||||||
|
|
||||||
// Test connection first
|
// Test connection first
|
||||||
dsn := buildMySQLDSN(host, port, database, user, password, 5*time.Second)
|
dsn := buildMySQLDSN(host, port, database, user, password, 5*time.Second)
|
||||||
if _, _, err := validateMariaDBConnection(dsn); err != nil {
|
|
||||||
_ = c.Error(err)
|
db, err := gorm.Open(gormmysql.Open(dsn), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
c.JSON(http.StatusBadRequest, gin.H{
|
c.JSON(http.StatusBadRequest, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "Connection check failed",
|
"error": fmt.Sprintf("Connection failed: %v", err),
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sqlDB, _ := db.DB()
|
||||||
|
sqlDB.Close()
|
||||||
|
|
||||||
// Save settings
|
// Save settings
|
||||||
if err := h.localDB.SaveSettings(host, port, database, user, password); err != nil {
|
if err := h.localDB.SaveSettings(host, port, database, user, password); err != nil {
|
||||||
_ = c.Error(err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "Failed to save settings",
|
"error": fmt.Sprintf("Failed to save settings: %v", err),
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -198,6 +239,22 @@ func (h *SetupHandler) GetStatus(c *gin.Context) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testWritePermission(db *gorm.DB) bool {
|
||||||
|
// Simple check: try to create a temporary table and drop it
|
||||||
|
testTable := fmt.Sprintf("qt_write_test_%d", time.Now().UnixNano())
|
||||||
|
|
||||||
|
// Try to create a test table
|
||||||
|
err := db.Exec(fmt.Sprintf("CREATE TABLE %s (id INT)", testTable)).Error
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop it immediately
|
||||||
|
db.Exec(fmt.Sprintf("DROP TABLE %s", testTable))
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func buildMySQLDSN(host string, port int, database, user, password string, timeout time.Duration) string {
|
func buildMySQLDSN(host string, port int, database, user, password string, timeout time.Duration) string {
|
||||||
cfg := mysqlDriver.NewConfig()
|
cfg := mysqlDriver.NewConfig()
|
||||||
cfg.User = user
|
cfg.User = user
|
||||||
@@ -213,47 +270,3 @@ func buildMySQLDSN(host string, port int, database, user, password string, timeo
|
|||||||
}
|
}
|
||||||
return cfg.FormatDSN()
|
return cfg.FormatDSN()
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateMariaDBConnection(dsn string) (int64, bool, error) {
|
|
||||||
db, err := gorm.Open(gormmysql.Open(dsn), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return 0, false, fmt.Errorf("open MariaDB connection: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sqlDB, err := db.DB()
|
|
||||||
if err != nil {
|
|
||||||
return 0, false, fmt.Errorf("get database handle: %w", err)
|
|
||||||
}
|
|
||||||
defer sqlDB.Close()
|
|
||||||
|
|
||||||
if err := sqlDB.Ping(); err != nil {
|
|
||||||
return 0, false, fmt.Errorf("ping MariaDB: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var lotCount int64
|
|
||||||
if err := db.Table("lot").Count(&lotCount).Error; err != nil {
|
|
||||||
return 0, false, fmt.Errorf("check required table lot: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return lotCount, testSyncWritePermission(db), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSyncWritePermission(db *gorm.DB) bool {
|
|
||||||
sentinel := fmt.Sprintf("quoteforge-permission-check-%d", time.Now().UnixNano())
|
|
||||||
err := db.Transaction(func(tx *gorm.DB) error {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
INSERT INTO qt_client_schema_state (username, hostname, last_checked_at, updated_at)
|
|
||||||
VALUES (?, ?, NOW(), NOW())
|
|
||||||
ON DUPLICATE KEY UPDATE
|
|
||||||
last_checked_at = VALUES(last_checked_at),
|
|
||||||
updated_at = VALUES(updated_at)
|
|
||||||
`, sentinel, "setup-check").Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return errPermissionProbeRollback
|
|
||||||
})
|
|
||||||
|
|
||||||
return errors.Is(err, errPermissionProbeRollback)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
"html/template"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"os"
|
||||||
stdsync "sync"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
qfassets "git.mchus.pro/mchus/quoteforge"
|
qfassets "git.mchus.pro/mchus/quoteforge"
|
||||||
@@ -25,15 +23,19 @@ type SyncHandler struct {
|
|||||||
autoSyncInterval time.Duration
|
autoSyncInterval time.Duration
|
||||||
onlineGraceFactor float64
|
onlineGraceFactor float64
|
||||||
tmpl *template.Template
|
tmpl *template.Template
|
||||||
readinessMu stdsync.Mutex
|
|
||||||
readinessCached *sync.SyncReadiness
|
|
||||||
readinessCachedAt time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSyncHandler creates a new sync handler
|
// NewSyncHandler creates a new sync handler
|
||||||
func NewSyncHandler(localDB *localdb.LocalDB, syncService *sync.Service, connMgr *db.ConnectionManager, _ string, autoSyncInterval time.Duration) (*SyncHandler, error) {
|
func NewSyncHandler(localDB *localdb.LocalDB, syncService *sync.Service, connMgr *db.ConnectionManager, templatesPath string, autoSyncInterval time.Duration) (*SyncHandler, error) {
|
||||||
// Load sync_status partial template
|
// Load sync_status partial template
|
||||||
tmpl, err := template.ParseFS(qfassets.TemplatesFS, "web/templates/partials/sync_status.html")
|
partialPath := filepath.Join(templatesPath, "partials", "sync_status.html")
|
||||||
|
var tmpl *template.Template
|
||||||
|
var err error
|
||||||
|
if stat, statErr := os.Stat(templatesPath); statErr == nil && stat.IsDir() {
|
||||||
|
tmpl, err = template.ParseFiles(partialPath)
|
||||||
|
} else {
|
||||||
|
tmpl, err = template.ParseFS(qfassets.TemplatesFS, "web/templates/partials/sync_status.html")
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -50,117 +52,56 @@ func NewSyncHandler(localDB *localdb.LocalDB, syncService *sync.Service, connMgr
|
|||||||
|
|
||||||
// SyncStatusResponse represents the sync status
|
// SyncStatusResponse represents the sync status
|
||||||
type SyncStatusResponse struct {
|
type SyncStatusResponse struct {
|
||||||
LastComponentSync *time.Time `json:"last_component_sync"`
|
LastComponentSync *time.Time `json:"last_component_sync"`
|
||||||
LastPricelistSync *time.Time `json:"last_pricelist_sync"`
|
LastPricelistSync *time.Time `json:"last_pricelist_sync"`
|
||||||
LastPricelistAttemptAt *time.Time `json:"last_pricelist_attempt_at,omitempty"`
|
IsOnline bool `json:"is_online"`
|
||||||
LastPricelistSyncStatus string `json:"last_pricelist_sync_status,omitempty"`
|
ComponentsCount int64 `json:"components_count"`
|
||||||
LastPricelistSyncError string `json:"last_pricelist_sync_error,omitempty"`
|
PricelistsCount int64 `json:"pricelists_count"`
|
||||||
HasIncompleteServerSync bool `json:"has_incomplete_server_sync"`
|
ServerPricelists int `json:"server_pricelists"`
|
||||||
KnownServerChangesMiss bool `json:"known_server_changes_missing"`
|
NeedComponentSync bool `json:"need_component_sync"`
|
||||||
IsOnline bool `json:"is_online"`
|
NeedPricelistSync bool `json:"need_pricelist_sync"`
|
||||||
ComponentsCount int64 `json:"components_count"`
|
|
||||||
PricelistsCount int64 `json:"pricelists_count"`
|
|
||||||
ServerPricelists int `json:"server_pricelists"`
|
|
||||||
NeedComponentSync bool `json:"need_component_sync"`
|
|
||||||
NeedPricelistSync bool `json:"need_pricelist_sync"`
|
|
||||||
Readiness *sync.SyncReadiness `json:"readiness,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SyncReadinessResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Blocked bool `json:"blocked"`
|
|
||||||
ReasonCode string `json:"reason_code,omitempty"`
|
|
||||||
ReasonText string `json:"reason_text,omitempty"`
|
|
||||||
RequiredMinAppVersion *string `json:"required_min_app_version,omitempty"`
|
|
||||||
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStatus returns current sync status
|
// GetStatus returns current sync status
|
||||||
// GET /api/sync/status
|
// GET /api/sync/status
|
||||||
func (h *SyncHandler) GetStatus(c *gin.Context) {
|
func (h *SyncHandler) GetStatus(c *gin.Context) {
|
||||||
connStatus := h.connMgr.GetStatus()
|
// Check online status by pinging MariaDB
|
||||||
isOnline := connStatus.IsConnected && strings.TrimSpace(connStatus.LastError) == ""
|
isOnline := h.checkOnline()
|
||||||
|
|
||||||
|
// Get sync times
|
||||||
lastComponentSync := h.localDB.GetComponentSyncTime()
|
lastComponentSync := h.localDB.GetComponentSyncTime()
|
||||||
lastPricelistSync := h.localDB.GetLastSyncTime()
|
lastPricelistSync := h.localDB.GetLastSyncTime()
|
||||||
|
|
||||||
|
// Get counts
|
||||||
componentsCount := h.localDB.CountLocalComponents()
|
componentsCount := h.localDB.CountLocalComponents()
|
||||||
pricelistsCount := h.localDB.CountLocalPricelists()
|
pricelistsCount := h.localDB.CountLocalPricelists()
|
||||||
lastPricelistAttemptAt := h.localDB.GetLastPricelistSyncAttemptAt()
|
|
||||||
lastPricelistSyncStatus := h.localDB.GetLastPricelistSyncStatus()
|
// Get server pricelist count if online
|
||||||
lastPricelistSyncError := h.localDB.GetLastPricelistSyncError()
|
serverPricelists := 0
|
||||||
hasFailedSync := strings.EqualFold(lastPricelistSyncStatus, "failed")
|
needPricelistSync := false
|
||||||
|
if isOnline {
|
||||||
|
status, err := h.syncService.GetStatus()
|
||||||
|
if err == nil {
|
||||||
|
serverPricelists = status.ServerPricelists
|
||||||
|
needPricelistSync = status.NeedsSync
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if component sync is needed (older than 24 hours)
|
||||||
needComponentSync := h.localDB.NeedComponentSync(24)
|
needComponentSync := h.localDB.NeedComponentSync(24)
|
||||||
readiness := h.getReadinessLocal()
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, SyncStatusResponse{
|
c.JSON(http.StatusOK, SyncStatusResponse{
|
||||||
LastComponentSync: lastComponentSync,
|
LastComponentSync: lastComponentSync,
|
||||||
LastPricelistSync: lastPricelistSync,
|
LastPricelistSync: lastPricelistSync,
|
||||||
LastPricelistAttemptAt: lastPricelistAttemptAt,
|
IsOnline: isOnline,
|
||||||
LastPricelistSyncStatus: lastPricelistSyncStatus,
|
ComponentsCount: componentsCount,
|
||||||
LastPricelistSyncError: lastPricelistSyncError,
|
PricelistsCount: pricelistsCount,
|
||||||
HasIncompleteServerSync: hasFailedSync,
|
ServerPricelists: serverPricelists,
|
||||||
KnownServerChangesMiss: hasFailedSync,
|
NeedComponentSync: needComponentSync,
|
||||||
IsOnline: isOnline,
|
NeedPricelistSync: needPricelistSync,
|
||||||
ComponentsCount: componentsCount,
|
|
||||||
PricelistsCount: pricelistsCount,
|
|
||||||
ServerPricelists: 0,
|
|
||||||
NeedComponentSync: needComponentSync,
|
|
||||||
NeedPricelistSync: lastPricelistSync == nil || hasFailedSync,
|
|
||||||
Readiness: readiness,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetReadiness returns sync readiness guard status.
|
|
||||||
// GET /api/sync/readiness
|
|
||||||
func (h *SyncHandler) GetReadiness(c *gin.Context) {
|
|
||||||
readiness, err := h.syncService.GetReadiness()
|
|
||||||
if err != nil && readiness == nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if readiness == nil {
|
|
||||||
c.JSON(http.StatusOK, SyncReadinessResponse{Status: sync.ReadinessUnknown, Blocked: false})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusOK, SyncReadinessResponse{
|
|
||||||
Status: readiness.Status,
|
|
||||||
Blocked: readiness.Blocked,
|
|
||||||
ReasonCode: readiness.ReasonCode,
|
|
||||||
ReasonText: readiness.ReasonText,
|
|
||||||
RequiredMinAppVersion: readiness.RequiredMinAppVersion,
|
|
||||||
LastCheckedAt: readiness.LastCheckedAt,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *SyncHandler) ensureSyncReadiness(c *gin.Context) bool {
|
|
||||||
readiness, err := h.syncService.EnsureReadinessForSync()
|
|
||||||
if err == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
blocked := &sync.SyncBlockedError{}
|
|
||||||
if errors.As(err, &blocked) {
|
|
||||||
c.JSON(http.StatusLocked, gin.H{
|
|
||||||
"success": false,
|
|
||||||
"error": blocked.Error(),
|
|
||||||
"reason_code": blocked.Readiness.ReasonCode,
|
|
||||||
"reason_text": blocked.Readiness.ReasonText,
|
|
||||||
"required_min_app_version": blocked.Readiness.RequiredMinAppVersion,
|
|
||||||
"status": blocked.Readiness.Status,
|
|
||||||
"blocked": true,
|
|
||||||
"last_checked_at": blocked.Readiness.LastCheckedAt,
|
|
||||||
})
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
|
||||||
"success": false,
|
|
||||||
"error": "internal server error",
|
|
||||||
})
|
|
||||||
_ = c.Error(err)
|
|
||||||
_ = readiness
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncResultResponse represents sync operation result
|
// SyncResultResponse represents sync operation result
|
||||||
type SyncResultResponse struct {
|
type SyncResultResponse struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
@@ -172,7 +113,11 @@ type SyncResultResponse struct {
|
|||||||
// SyncComponents syncs components from MariaDB to local SQLite
|
// SyncComponents syncs components from MariaDB to local SQLite
|
||||||
// POST /api/sync/components
|
// POST /api/sync/components
|
||||||
func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
||||||
if !h.ensureSyncReadiness(c) {
|
if !h.checkOnline() {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": "Database is offline",
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -181,9 +126,8 @@ func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "database connection failed",
|
"error": "Database connection failed: " + err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,9 +136,8 @@ func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
|||||||
slog.Error("component sync failed", "error", err)
|
slog.Error("component sync failed", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "component sync failed",
|
"error": err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -209,7 +152,11 @@ func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
|||||||
// SyncPricelists syncs pricelists from MariaDB to local SQLite
|
// SyncPricelists syncs pricelists from MariaDB to local SQLite
|
||||||
// POST /api/sync/pricelists
|
// POST /api/sync/pricelists
|
||||||
func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
||||||
if !h.ensureSyncReadiness(c) {
|
if !h.checkOnline() {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": "Database is offline",
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,9 +166,8 @@ func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
|||||||
slog.Error("pricelist sync failed", "error", err)
|
slog.Error("pricelist sync failed", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "pricelist sync failed",
|
"error": err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -234,34 +180,6 @@ func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
|||||||
h.syncService.RecordSyncHeartbeat()
|
h.syncService.RecordSyncHeartbeat()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncPartnumberBooks syncs partnumber book snapshots from MariaDB to local SQLite.
|
|
||||||
// POST /api/sync/partnumber-books
|
|
||||||
func (h *SyncHandler) SyncPartnumberBooks(c *gin.Context) {
|
|
||||||
if !h.ensureSyncReadiness(c) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
startTime := time.Now()
|
|
||||||
pulled, err := h.syncService.PullPartnumberBooks()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error("partnumber books pull failed", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
|
||||||
"success": false,
|
|
||||||
"error": "partnumber books sync failed",
|
|
||||||
})
|
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, SyncResultResponse{
|
|
||||||
Success: true,
|
|
||||||
Message: "Partnumber books synced successfully",
|
|
||||||
Synced: pulled,
|
|
||||||
Duration: time.Since(startTime).String(),
|
|
||||||
})
|
|
||||||
h.syncService.RecordSyncHeartbeat()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncAllResponse represents result of full sync
|
// SyncAllResponse represents result of full sync
|
||||||
type SyncAllResponse struct {
|
type SyncAllResponse struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
@@ -283,7 +201,11 @@ type SyncAllResponse struct {
|
|||||||
// - pull components, pricelists, projects, and configurations from server
|
// - pull components, pricelists, projects, and configurations from server
|
||||||
// POST /api/sync/all
|
// POST /api/sync/all
|
||||||
func (h *SyncHandler) SyncAll(c *gin.Context) {
|
func (h *SyncHandler) SyncAll(c *gin.Context) {
|
||||||
if !h.ensureSyncReadiness(c) {
|
if !h.checkOnline() {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": "Database is offline",
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -296,9 +218,8 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
slog.Error("pending push failed during full sync", "error", err)
|
slog.Error("pending push failed during full sync", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "pending changes push failed",
|
"error": "Pending changes push failed: " + err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,9 +228,8 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "database connection failed",
|
"error": "Database connection failed: " + err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,9 +238,8 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
slog.Error("component sync failed during full sync", "error", err)
|
slog.Error("component sync failed during full sync", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "component sync failed",
|
"error": "Component sync failed: " + err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
componentsSynced = compResult.TotalSynced
|
componentsSynced = compResult.TotalSynced
|
||||||
@@ -331,11 +250,10 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
slog.Error("pricelist sync failed during full sync", "error", err)
|
slog.Error("pricelist sync failed during full sync", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "pricelist sync failed",
|
"error": "Pricelist sync failed: " + err.Error(),
|
||||||
"pending_pushed": pendingPushed,
|
"pending_pushed": pendingPushed,
|
||||||
"components_synced": componentsSynced,
|
"components_synced": componentsSynced,
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -344,12 +262,11 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
slog.Error("project import failed during full sync", "error", err)
|
slog.Error("project import failed during full sync", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "project import failed",
|
"error": "Project import failed: " + err.Error(),
|
||||||
"pending_pushed": pendingPushed,
|
"pending_pushed": pendingPushed,
|
||||||
"components_synced": componentsSynced,
|
"components_synced": componentsSynced,
|
||||||
"pricelists_synced": pricelistsSynced,
|
"pricelists_synced": pricelistsSynced,
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -358,7 +275,7 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
slog.Error("configuration import failed during full sync", "error", err)
|
slog.Error("configuration import failed during full sync", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "configuration import failed",
|
"error": "Configuration import failed: " + err.Error(),
|
||||||
"pending_pushed": pendingPushed,
|
"pending_pushed": pendingPushed,
|
||||||
"components_synced": componentsSynced,
|
"components_synced": componentsSynced,
|
||||||
"pricelists_synced": pricelistsSynced,
|
"pricelists_synced": pricelistsSynced,
|
||||||
@@ -366,7 +283,6 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
|||||||
"projects_updated": projectsResult.Updated,
|
"projects_updated": projectsResult.Updated,
|
||||||
"projects_skipped": projectsResult.Skipped,
|
"projects_skipped": projectsResult.Skipped,
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -395,7 +311,11 @@ func (h *SyncHandler) checkOnline() bool {
|
|||||||
// PushPendingChanges pushes all pending changes to the server
|
// PushPendingChanges pushes all pending changes to the server
|
||||||
// POST /api/sync/push
|
// POST /api/sync/push
|
||||||
func (h *SyncHandler) PushPendingChanges(c *gin.Context) {
|
func (h *SyncHandler) PushPendingChanges(c *gin.Context) {
|
||||||
if !h.ensureSyncReadiness(c) {
|
if !h.checkOnline() {
|
||||||
|
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||||
|
"success": false,
|
||||||
|
"error": "Database is offline",
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -405,9 +325,8 @@ func (h *SyncHandler) PushPendingChanges(c *gin.Context) {
|
|||||||
slog.Error("push pending changes failed", "error", err)
|
slog.Error("push pending changes failed", "error", err)
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
"success": false,
|
"success": false,
|
||||||
"error": "pending changes push failed",
|
"error": err.Error(),
|
||||||
})
|
})
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -434,7 +353,9 @@ func (h *SyncHandler) GetPendingCount(c *gin.Context) {
|
|||||||
func (h *SyncHandler) GetPendingChanges(c *gin.Context) {
|
func (h *SyncHandler) GetPendingChanges(c *gin.Context) {
|
||||||
changes, err := h.localDB.GetPendingChanges()
|
changes, err := h.localDB.GetPendingChanges()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
|
"error": err.Error(),
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -443,58 +364,12 @@ func (h *SyncHandler) GetPendingChanges(c *gin.Context) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RepairPendingChanges attempts to repair errored pending changes
|
// SyncInfoResponse represents sync information
|
||||||
// POST /api/sync/repair
|
|
||||||
func (h *SyncHandler) RepairPendingChanges(c *gin.Context) {
|
|
||||||
repaired, remainingErrors, err := h.localDB.RepairPendingChanges()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error("repair pending changes failed", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
|
||||||
"success": false,
|
|
||||||
"error": "pending changes repair failed",
|
|
||||||
})
|
|
||||||
_ = c.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"success": true,
|
|
||||||
"repaired": repaired,
|
|
||||||
"remaining_errors": remainingErrors,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncInfoResponse represents sync information for the modal
|
|
||||||
type SyncInfoResponse struct {
|
type SyncInfoResponse struct {
|
||||||
// Connection
|
LastSyncAt *time.Time `json:"last_sync_at"`
|
||||||
DBHost string `json:"db_host"`
|
IsOnline bool `json:"is_online"`
|
||||||
DBUser string `json:"db_user"`
|
|
||||||
DBName string `json:"db_name"`
|
|
||||||
|
|
||||||
// Status
|
|
||||||
IsOnline bool `json:"is_online"`
|
|
||||||
LastSyncAt *time.Time `json:"last_sync_at"`
|
|
||||||
LastPricelistAttemptAt *time.Time `json:"last_pricelist_attempt_at,omitempty"`
|
|
||||||
LastPricelistSyncStatus string `json:"last_pricelist_sync_status,omitempty"`
|
|
||||||
LastPricelistSyncError string `json:"last_pricelist_sync_error,omitempty"`
|
|
||||||
NeedPricelistSync bool `json:"need_pricelist_sync"`
|
|
||||||
HasIncompleteServerSync bool `json:"has_incomplete_server_sync"`
|
|
||||||
|
|
||||||
// Statistics
|
|
||||||
LotCount int64 `json:"lot_count"`
|
|
||||||
LotLogCount int64 `json:"lot_log_count"`
|
|
||||||
ConfigCount int64 `json:"config_count"`
|
|
||||||
ProjectCount int64 `json:"project_count"`
|
|
||||||
|
|
||||||
// Pending changes
|
|
||||||
PendingChanges []localdb.PendingChange `json:"pending_changes"`
|
|
||||||
|
|
||||||
// Errors
|
|
||||||
ErrorCount int `json:"error_count"`
|
ErrorCount int `json:"error_count"`
|
||||||
Errors []SyncError `json:"errors,omitempty"`
|
Errors []SyncError `json:"errors,omitempty"`
|
||||||
|
|
||||||
// Readiness guard
|
|
||||||
Readiness *sync.SyncReadiness `json:"readiness,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type SyncUsersStatusResponse struct {
|
type SyncUsersStatusResponse struct {
|
||||||
@@ -514,46 +389,34 @@ type SyncError struct {
|
|||||||
// GetInfo returns sync information for modal
|
// GetInfo returns sync information for modal
|
||||||
// GET /api/sync/info
|
// GET /api/sync/info
|
||||||
func (h *SyncHandler) GetInfo(c *gin.Context) {
|
func (h *SyncHandler) GetInfo(c *gin.Context) {
|
||||||
connStatus := h.connMgr.GetStatus()
|
// Check online status by pinging MariaDB
|
||||||
isOnline := connStatus.IsConnected && strings.TrimSpace(connStatus.LastError) == ""
|
isOnline := h.checkOnline()
|
||||||
|
|
||||||
// Get DB connection info
|
|
||||||
var dbHost, dbUser, dbName string
|
|
||||||
if settings, err := h.localDB.GetSettings(); err == nil {
|
|
||||||
dbHost = settings.Host + ":" + fmt.Sprintf("%d", settings.Port)
|
|
||||||
dbUser = settings.User
|
|
||||||
dbName = settings.Database
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get sync times
|
// Get sync times
|
||||||
lastPricelistSync := h.localDB.GetLastSyncTime()
|
lastPricelistSync := h.localDB.GetLastSyncTime()
|
||||||
lastPricelistAttemptAt := h.localDB.GetLastPricelistSyncAttemptAt()
|
|
||||||
lastPricelistSyncStatus := h.localDB.GetLastPricelistSyncStatus()
|
|
||||||
lastPricelistSyncError := h.localDB.GetLastPricelistSyncError()
|
|
||||||
hasFailedSync := strings.EqualFold(lastPricelistSyncStatus, "failed")
|
|
||||||
needPricelistSync := lastPricelistSync == nil || hasFailedSync
|
|
||||||
hasIncompleteServerSync := hasFailedSync
|
|
||||||
|
|
||||||
// Get local counts
|
|
||||||
configCount := h.localDB.CountConfigurations()
|
|
||||||
projectCount := h.localDB.CountProjects()
|
|
||||||
componentCount := h.localDB.CountLocalComponents()
|
|
||||||
pricelistCount := h.localDB.CountLocalPricelists()
|
|
||||||
|
|
||||||
// Get error count (only changes with LastError != "")
|
// Get error count (only changes with LastError != "")
|
||||||
errorCount := int(h.localDB.CountErroredChanges())
|
errorCount := int(h.localDB.CountErroredChanges())
|
||||||
|
|
||||||
// Get pending changes
|
// Get recent errors (last 10)
|
||||||
changes, err := h.localDB.GetPendingChanges()
|
changes, err := h.localDB.GetPendingChanges()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("failed to get pending changes for sync info", "error", err)
|
slog.Error("failed to get pending changes for sync info", "error", err)
|
||||||
changes = []localdb.PendingChange{}
|
// Even if we can't get changes, we can still return the error count
|
||||||
|
c.JSON(http.StatusOK, SyncInfoResponse{
|
||||||
|
LastSyncAt: lastPricelistSync,
|
||||||
|
IsOnline: isOnline,
|
||||||
|
ErrorCount: errorCount,
|
||||||
|
Errors: []SyncError{}, // Return empty errors list
|
||||||
|
})
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var syncErrors []SyncError
|
var errors []SyncError
|
||||||
for _, change := range changes {
|
for _, change := range changes {
|
||||||
|
// Check if there's a last error and it's not empty
|
||||||
if change.LastError != "" {
|
if change.LastError != "" {
|
||||||
syncErrors = append(syncErrors, SyncError{
|
errors = append(errors, SyncError{
|
||||||
Timestamp: change.CreatedAt,
|
Timestamp: change.CreatedAt,
|
||||||
Message: change.LastError,
|
Message: change.LastError,
|
||||||
})
|
})
|
||||||
@@ -561,31 +424,15 @@ func (h *SyncHandler) GetInfo(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Limit to last 10 errors
|
// Limit to last 10 errors
|
||||||
if len(syncErrors) > 10 {
|
if len(errors) > 10 {
|
||||||
syncErrors = syncErrors[:10]
|
errors = errors[:10]
|
||||||
}
|
}
|
||||||
|
|
||||||
readiness := h.getReadinessLocal()
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, SyncInfoResponse{
|
c.JSON(http.StatusOK, SyncInfoResponse{
|
||||||
DBHost: dbHost,
|
LastSyncAt: lastPricelistSync,
|
||||||
DBUser: dbUser,
|
IsOnline: isOnline,
|
||||||
DBName: dbName,
|
ErrorCount: errorCount,
|
||||||
IsOnline: isOnline,
|
Errors: errors,
|
||||||
LastSyncAt: lastPricelistSync,
|
|
||||||
LastPricelistAttemptAt: lastPricelistAttemptAt,
|
|
||||||
LastPricelistSyncStatus: lastPricelistSyncStatus,
|
|
||||||
LastPricelistSyncError: lastPricelistSyncError,
|
|
||||||
NeedPricelistSync: needPricelistSync,
|
|
||||||
HasIncompleteServerSync: hasIncompleteServerSync,
|
|
||||||
LotCount: componentCount,
|
|
||||||
LotLogCount: pricelistCount,
|
|
||||||
ConfigCount: configCount,
|
|
||||||
ProjectCount: projectCount,
|
|
||||||
PendingChanges: changes,
|
|
||||||
ErrorCount: errorCount,
|
|
||||||
Errors: syncErrors,
|
|
||||||
Readiness: readiness,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -611,7 +458,9 @@ func (h *SyncHandler) GetUsersStatus(c *gin.Context) {
|
|||||||
|
|
||||||
users, err := h.syncService.ListUserSyncStatuses(threshold)
|
users, err := h.syncService.ListUserSyncStatuses(threshold)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
c.JSON(http.StatusInternalServerError, gin.H{
|
||||||
|
"error": err.Error(),
|
||||||
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -640,109 +489,17 @@ func (h *SyncHandler) SyncStatusPartial(c *gin.Context) {
|
|||||||
|
|
||||||
// Get pending count
|
// Get pending count
|
||||||
pendingCount := h.localDB.GetPendingCount()
|
pendingCount := h.localDB.GetPendingCount()
|
||||||
readiness := h.getReadinessLocal()
|
|
||||||
isBlocked := readiness != nil && readiness.Blocked
|
|
||||||
lastPricelistSyncStatus := h.localDB.GetLastPricelistSyncStatus()
|
|
||||||
lastPricelistSyncError := h.localDB.GetLastPricelistSyncError()
|
|
||||||
hasFailedSync := strings.EqualFold(lastPricelistSyncStatus, "failed")
|
|
||||||
hasIncompleteServerSync := hasFailedSync
|
|
||||||
|
|
||||||
slog.Debug("rendering sync status", "is_offline", isOffline, "pending_count", pendingCount, "sync_blocked", isBlocked)
|
slog.Debug("rendering sync status", "is_offline", isOffline, "pending_count", pendingCount)
|
||||||
|
|
||||||
data := gin.H{
|
data := gin.H{
|
||||||
"IsOffline": isOffline,
|
"IsOffline": isOffline,
|
||||||
"PendingCount": pendingCount,
|
"PendingCount": pendingCount,
|
||||||
"IsBlocked": isBlocked,
|
|
||||||
"HasFailedSync": hasFailedSync,
|
|
||||||
"HasIncompleteServerSync": hasIncompleteServerSync,
|
|
||||||
"SyncIssueTitle": func() string {
|
|
||||||
if hasIncompleteServerSync {
|
|
||||||
return "Последняя синхронизация прайслистов прервалась. На сервере есть изменения, которые не загружены локально."
|
|
||||||
}
|
|
||||||
if hasFailedSync {
|
|
||||||
if lastPricelistSyncError != "" {
|
|
||||||
return lastPricelistSyncError
|
|
||||||
}
|
|
||||||
return "Последняя синхронизация прайслистов завершилась ошибкой."
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}(),
|
|
||||||
"BlockedReason": func() string {
|
|
||||||
if readiness == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return readiness.ReasonText
|
|
||||||
}(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
if err := h.tmpl.ExecuteTemplate(c.Writer, "sync_status", data); err != nil {
|
if err := h.tmpl.ExecuteTemplate(c.Writer, "sync_status", data); err != nil {
|
||||||
slog.Error("failed to render sync_status template", "error", err)
|
slog.Error("failed to render sync_status template", "error", err)
|
||||||
_ = c.Error(err)
|
c.String(http.StatusInternalServerError, "Template error: "+err.Error())
|
||||||
c.String(http.StatusInternalServerError, "Template error")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *SyncHandler) getReadinessLocal() *sync.SyncReadiness {
|
|
||||||
h.readinessMu.Lock()
|
|
||||||
if h.readinessCached != nil && time.Since(h.readinessCachedAt) < 10*time.Second {
|
|
||||||
cached := *h.readinessCached
|
|
||||||
h.readinessMu.Unlock()
|
|
||||||
return &cached
|
|
||||||
}
|
|
||||||
h.readinessMu.Unlock()
|
|
||||||
|
|
||||||
state, err := h.localDB.GetSyncGuardState()
|
|
||||||
if err != nil || state == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
readiness := &sync.SyncReadiness{
|
|
||||||
Status: state.Status,
|
|
||||||
Blocked: state.Status == sync.ReadinessBlocked,
|
|
||||||
ReasonCode: state.ReasonCode,
|
|
||||||
ReasonText: state.ReasonText,
|
|
||||||
RequiredMinAppVersion: state.RequiredMinAppVersion,
|
|
||||||
LastCheckedAt: state.LastCheckedAt,
|
|
||||||
}
|
|
||||||
|
|
||||||
h.readinessMu.Lock()
|
|
||||||
h.readinessCached = readiness
|
|
||||||
h.readinessCachedAt = time.Now()
|
|
||||||
h.readinessMu.Unlock()
|
|
||||||
return readiness
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReportPartnumberSeen pushes unresolved vendor partnumbers to qt_vendor_partnumber_seen on MariaDB.
|
|
||||||
// POST /api/sync/partnumber-seen
|
|
||||||
func (h *SyncHandler) ReportPartnumberSeen(c *gin.Context) {
|
|
||||||
var body struct {
|
|
||||||
Items []struct {
|
|
||||||
Partnumber string `json:"partnumber"`
|
|
||||||
Description string `json:"description"`
|
|
||||||
Ignored bool `json:"ignored"`
|
|
||||||
} `json:"items"`
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&body); err != nil {
|
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
items := make([]sync.SeenPartnumber, 0, len(body.Items))
|
|
||||||
for _, it := range body.Items {
|
|
||||||
if it.Partnumber != "" {
|
|
||||||
items = append(items, sync.SeenPartnumber{
|
|
||||||
Partnumber: it.Partnumber,
|
|
||||||
Description: it.Description,
|
|
||||||
Ignored: it.Ignored,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.syncService.PushPartnumberSeen(items); err != nil {
|
|
||||||
RespondError(c, http.StatusServiceUnavailable, "service unavailable", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"reported": len(items)})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
syncsvc "git.mchus.pro/mchus/quoteforge/internal/services/sync"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSyncReadinessOfflineBlocked(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
local, err := localdb.New(filepath.Join(dir, "qfs.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("init local db: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
service := syncsvc.NewService(nil, local)
|
|
||||||
h, err := NewSyncHandler(local, service, nil, filepath.Join("web", "templates"), 5*time.Minute)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("new sync handler: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
router := gin.New()
|
|
||||||
router.GET("/api/sync/readiness", h.GetReadiness)
|
|
||||||
router.POST("/api/sync/push", h.PushPendingChanges)
|
|
||||||
|
|
||||||
readinessResp := httptest.NewRecorder()
|
|
||||||
readinessReq, _ := http.NewRequest(http.MethodGet, "/api/sync/readiness", nil)
|
|
||||||
router.ServeHTTP(readinessResp, readinessReq)
|
|
||||||
if readinessResp.Code != http.StatusOK {
|
|
||||||
t.Fatalf("unexpected readiness status: %d", readinessResp.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
var readinessBody map[string]any
|
|
||||||
if err := json.Unmarshal(readinessResp.Body.Bytes(), &readinessBody); err != nil {
|
|
||||||
t.Fatalf("decode readiness body: %v", err)
|
|
||||||
}
|
|
||||||
if blocked, _ := readinessBody["blocked"].(bool); !blocked {
|
|
||||||
t.Fatalf("expected blocked readiness, got %v", readinessBody["blocked"])
|
|
||||||
}
|
|
||||||
|
|
||||||
pushResp := httptest.NewRecorder()
|
|
||||||
pushReq, _ := http.NewRequest(http.MethodPost, "/api/sync/push", nil)
|
|
||||||
router.ServeHTTP(pushResp, pushReq)
|
|
||||||
if pushResp.Code != http.StatusLocked {
|
|
||||||
t.Fatalf("expected 423 for blocked sync push, got %d body=%s", pushResp.Code, pushResp.Body.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
var pushBody map[string]any
|
|
||||||
if err := json.Unmarshal(pushResp.Body.Bytes(), &pushBody); err != nil {
|
|
||||||
t.Fatalf("decode push body: %v", err)
|
|
||||||
}
|
|
||||||
if pushBody["reason_text"] == nil || pushBody["reason_text"] == "" {
|
|
||||||
t.Fatalf("expected reason_text in blocked response, got %v", pushBody)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,201 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// VendorSpecHandler handles vendor BOM spec operations for a configuration.
|
|
||||||
type VendorSpecHandler struct {
|
|
||||||
localDB *localdb.LocalDB
|
|
||||||
configService *services.LocalConfigurationService
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewVendorSpecHandler(localDB *localdb.LocalDB) *VendorSpecHandler {
|
|
||||||
return &VendorSpecHandler{
|
|
||||||
localDB: localDB,
|
|
||||||
configService: services.NewLocalConfigurationService(localDB, nil, nil, func() bool { return false }),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookupConfig finds an active configuration by UUID using the standard localDB method.
|
|
||||||
func (h *VendorSpecHandler) lookupConfig(uuid string) (*localdb.LocalConfiguration, error) {
|
|
||||||
cfg, err := h.localDB.GetConfigurationByUUID(uuid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !cfg.IsActive {
|
|
||||||
return nil, errors.New("not active")
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVendorSpec returns the vendor spec (BOM) for a configuration.
|
|
||||||
// GET /api/configs/:uuid/vendor-spec
|
|
||||||
func (h *VendorSpecHandler) GetVendorSpec(c *gin.Context) {
|
|
||||||
cfg, err := h.lookupConfig(c.Param("uuid"))
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
spec := cfg.VendorSpec
|
|
||||||
if spec == nil {
|
|
||||||
spec = localdb.VendorSpec{}
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusOK, gin.H{"vendor_spec": spec})
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutVendorSpec saves (replaces) the vendor spec for a configuration.
|
|
||||||
// PUT /api/configs/:uuid/vendor-spec
|
|
||||||
func (h *VendorSpecHandler) PutVendorSpec(c *gin.Context) {
|
|
||||||
cfg, err := h.lookupConfig(c.Param("uuid"))
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var body struct {
|
|
||||||
VendorSpec []localdb.VendorSpecItem `json:"vendor_spec"`
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&body); err != nil {
|
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range body.VendorSpec {
|
|
||||||
if body.VendorSpec[i].SortOrder == 0 {
|
|
||||||
body.VendorSpec[i].SortOrder = (i + 1) * 10
|
|
||||||
}
|
|
||||||
// Persist canonical LOT mapping only.
|
|
||||||
body.VendorSpec[i].LotMappings = normalizeLotMappings(body.VendorSpec[i].LotMappings)
|
|
||||||
body.VendorSpec[i].ResolvedLotName = ""
|
|
||||||
body.VendorSpec[i].ResolutionSource = ""
|
|
||||||
body.VendorSpec[i].ManualLotSuggestion = ""
|
|
||||||
body.VendorSpec[i].LotQtyPerPN = 0
|
|
||||||
body.VendorSpec[i].LotAllocations = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
spec := localdb.VendorSpec(body.VendorSpec)
|
|
||||||
if _, err := h.configService.UpdateVendorSpecNoAuth(cfg.UUID, spec); err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"vendor_spec": spec})
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeLotMappings(in []localdb.VendorSpecLotMapping) []localdb.VendorSpecLotMapping {
|
|
||||||
if len(in) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
merged := make(map[string]int, len(in))
|
|
||||||
order := make([]string, 0, len(in))
|
|
||||||
for _, m := range in {
|
|
||||||
lot := strings.TrimSpace(m.LotName)
|
|
||||||
if lot == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
qty := m.QuantityPerPN
|
|
||||||
if qty < 1 {
|
|
||||||
qty = 1
|
|
||||||
}
|
|
||||||
if _, exists := merged[lot]; !exists {
|
|
||||||
order = append(order, lot)
|
|
||||||
}
|
|
||||||
merged[lot] += qty
|
|
||||||
}
|
|
||||||
out := make([]localdb.VendorSpecLotMapping, 0, len(order))
|
|
||||||
for _, lot := range order {
|
|
||||||
out = append(out, localdb.VendorSpecLotMapping{
|
|
||||||
LotName: lot,
|
|
||||||
QuantityPerPN: merged[lot],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if len(out) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveVendorSpec resolves vendor PN → LOT without modifying the cart.
|
|
||||||
// POST /api/configs/:uuid/vendor-spec/resolve
|
|
||||||
func (h *VendorSpecHandler) ResolveVendorSpec(c *gin.Context) {
|
|
||||||
if _, err := h.lookupConfig(c.Param("uuid")); err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var body struct {
|
|
||||||
VendorSpec []localdb.VendorSpecItem `json:"vendor_spec"`
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&body); err != nil {
|
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bookRepo := repository.NewPartnumberBookRepository(h.localDB.DB())
|
|
||||||
resolver := services.NewVendorSpecResolver(bookRepo)
|
|
||||||
|
|
||||||
resolved, err := resolver.Resolve(body.VendorSpec)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
book, _ := bookRepo.GetActiveBook()
|
|
||||||
aggregated, err := services.AggregateLOTs(resolved, book, bookRepo)
|
|
||||||
if err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"resolved": resolved,
|
|
||||||
"aggregated": aggregated,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyVendorSpec applies the resolved BOM to the cart (Estimate items).
|
|
||||||
// POST /api/configs/:uuid/vendor-spec/apply
|
|
||||||
func (h *VendorSpecHandler) ApplyVendorSpec(c *gin.Context) {
|
|
||||||
cfg, err := h.lookupConfig(c.Param("uuid"))
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var body struct {
|
|
||||||
Items []struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
Quantity int `json:"quantity"`
|
|
||||||
UnitPrice float64 `json:"unit_price"`
|
|
||||||
} `json:"items"`
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&body); err != nil {
|
|
||||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
newItems := make(localdb.LocalConfigItems, 0, len(body.Items))
|
|
||||||
for _, it := range body.Items {
|
|
||||||
newItems = append(newItems, localdb.LocalConfigItem{
|
|
||||||
LotName: it.LotName,
|
|
||||||
Quantity: it.Quantity,
|
|
||||||
UnitPrice: it.UnitPrice,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := h.configService.ApplyVendorSpecItemsNoAuth(cfg.UUID, newItems); err != nil {
|
|
||||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"items": newItems})
|
|
||||||
}
|
|
||||||
@@ -1,24 +1,23 @@
|
|||||||
package handlers
|
package handlers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"html/template"
|
"html/template"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
|
|
||||||
qfassets "git.mchus.pro/mchus/quoteforge"
|
qfassets "git.mchus.pro/mchus/quoteforge"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/appmeta"
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type WebHandler struct {
|
type WebHandler struct {
|
||||||
templates map[string]*template.Template
|
templates map[string]*template.Template
|
||||||
localDB *localdb.LocalDB
|
componentService *services.ComponentService
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
func NewWebHandler(templatesPath string, componentService *services.ComponentService) (*WebHandler, error) {
|
||||||
funcMap := template.FuncMap{
|
funcMap := template.FuncMap{
|
||||||
"sub": func(a, b int) int { return a - b },
|
"sub": func(a, b int) int { return a - b },
|
||||||
"add": func(a, b int) int { return a + b },
|
"add": func(a, b int) int { return a + b },
|
||||||
@@ -61,16 +60,27 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
templates := make(map[string]*template.Template)
|
templates := make(map[string]*template.Template)
|
||||||
|
basePath := filepath.Join(templatesPath, "base.html")
|
||||||
|
useDisk := false
|
||||||
|
if stat, statErr := os.Stat(templatesPath); statErr == nil && stat.IsDir() {
|
||||||
|
useDisk = true
|
||||||
|
}
|
||||||
|
|
||||||
// Load each page template with base
|
// Load each page template with base
|
||||||
simplePages := []string{"configs.html", "projects.html", "project_detail.html", "pricelists.html", "pricelist_detail.html", "config_revisions.html", "partnumber_books.html"}
|
simplePages := []string{"login.html", "configs.html", "projects.html", "project_detail.html", "admin_pricing.html", "pricelists.html", "pricelist_detail.html"}
|
||||||
for _, page := range simplePages {
|
for _, page := range simplePages {
|
||||||
|
pagePath := filepath.Join(templatesPath, page)
|
||||||
var tmpl *template.Template
|
var tmpl *template.Template
|
||||||
var err error
|
var err error
|
||||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
if useDisk {
|
||||||
qfassets.TemplatesFS,
|
tmpl, err = template.New("").Funcs(funcMap).ParseFiles(basePath, pagePath)
|
||||||
"web/templates/base.html",
|
} else {
|
||||||
"web/templates/"+page,
|
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||||
)
|
qfassets.TemplatesFS,
|
||||||
|
"web/templates/base.html",
|
||||||
|
"web/templates/"+page,
|
||||||
|
)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -78,14 +88,20 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Index page needs components_list.html as well
|
// Index page needs components_list.html as well
|
||||||
|
indexPath := filepath.Join(templatesPath, "index.html")
|
||||||
|
componentsListPath := filepath.Join(templatesPath, "components_list.html")
|
||||||
var indexTmpl *template.Template
|
var indexTmpl *template.Template
|
||||||
var err error
|
var err error
|
||||||
indexTmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
if useDisk {
|
||||||
qfassets.TemplatesFS,
|
indexTmpl, err = template.New("").Funcs(funcMap).ParseFiles(basePath, indexPath, componentsListPath)
|
||||||
"web/templates/base.html",
|
} else {
|
||||||
"web/templates/index.html",
|
indexTmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||||
"web/templates/components_list.html",
|
qfassets.TemplatesFS,
|
||||||
)
|
"web/templates/base.html",
|
||||||
|
"web/templates/index.html",
|
||||||
|
"web/templates/components_list.html",
|
||||||
|
)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -94,12 +110,17 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
|||||||
// Load partial templates (no base needed)
|
// Load partial templates (no base needed)
|
||||||
partials := []string{"components_list.html"}
|
partials := []string{"components_list.html"}
|
||||||
for _, partial := range partials {
|
for _, partial := range partials {
|
||||||
|
partialPath := filepath.Join(templatesPath, partial)
|
||||||
var tmpl *template.Template
|
var tmpl *template.Template
|
||||||
var err error
|
var err error
|
||||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
if useDisk {
|
||||||
qfassets.TemplatesFS,
|
tmpl, err = template.New("").Funcs(funcMap).ParseFiles(partialPath)
|
||||||
"web/templates/"+partial,
|
} else {
|
||||||
)
|
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||||
|
qfassets.TemplatesFS,
|
||||||
|
"web/templates/"+partial,
|
||||||
|
)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -107,55 +128,60 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &WebHandler{
|
return &WebHandler{
|
||||||
templates: templates,
|
templates: templates,
|
||||||
localDB: localDB,
|
componentService: componentService,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *WebHandler) render(c *gin.Context, name string, data gin.H) {
|
func (h *WebHandler) render(c *gin.Context, name string, data gin.H) {
|
||||||
data["AppVersion"] = appmeta.Version()
|
|
||||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
tmpl, ok := h.templates[name]
|
tmpl, ok := h.templates[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
_ = c.Error(fmt.Errorf("template %q not found", name))
|
c.String(500, "Template not found: %s", name)
|
||||||
c.String(500, "Template error")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Execute the page template which will use base
|
// Execute the page template which will use base
|
||||||
if err := tmpl.ExecuteTemplate(c.Writer, name, data); err != nil {
|
if err := tmpl.ExecuteTemplate(c.Writer, name, data); err != nil {
|
||||||
_ = c.Error(err)
|
c.String(500, "Template error: %v", err)
|
||||||
c.String(500, "Template error")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *WebHandler) Index(c *gin.Context) {
|
func (h *WebHandler) Index(c *gin.Context) {
|
||||||
// Redirect to projects page - configurator is accessed via /configurator?uuid=...
|
// Redirect to configs page - configurator is accessed via /configurator?uuid=...
|
||||||
c.Redirect(302, "/projects")
|
c.Redirect(302, "/configs")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *WebHandler) Configurator(c *gin.Context) {
|
func (h *WebHandler) Configurator(c *gin.Context) {
|
||||||
|
categories, _ := h.componentService.GetCategories()
|
||||||
uuid := c.Query("uuid")
|
uuid := c.Query("uuid")
|
||||||
categories, _ := h.localCategories()
|
|
||||||
components, total, err := h.localDB.ListComponents(localdb.ComponentFilter{}, 0, 20)
|
filter := repository.ComponentFilter{}
|
||||||
|
result, err := h.componentService.List(filter, 1, 20)
|
||||||
|
|
||||||
data := gin.H{
|
data := gin.H{
|
||||||
"ActivePage": "configurator",
|
"ActivePage": "configurator",
|
||||||
"Categories": categories,
|
"Categories": categories,
|
||||||
"Components": []localComponentView{},
|
"Components": []interface{}{},
|
||||||
"Total": int64(0),
|
"Total": int64(0),
|
||||||
"Page": 1,
|
"Page": 1,
|
||||||
"PerPage": 20,
|
"PerPage": 20,
|
||||||
"ConfigUUID": uuid,
|
"ConfigUUID": uuid,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil && result != nil {
|
||||||
data["Components"] = toLocalComponentViews(components)
|
data["Components"] = result.Components
|
||||||
data["Total"] = total
|
data["Total"] = result.Total
|
||||||
|
data["Page"] = result.Page
|
||||||
|
data["PerPage"] = result.PerPage
|
||||||
}
|
}
|
||||||
|
|
||||||
h.render(c, "index.html", data)
|
h.render(c, "index.html", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *WebHandler) Login(c *gin.Context) {
|
||||||
|
h.render(c, "login.html", nil)
|
||||||
|
}
|
||||||
|
|
||||||
func (h *WebHandler) Configs(c *gin.Context) {
|
func (h *WebHandler) Configs(c *gin.Context) {
|
||||||
h.render(c, "configs.html", gin.H{"ActivePage": "configs"})
|
h.render(c, "configs.html", gin.H{"ActivePage": "configs"})
|
||||||
}
|
}
|
||||||
@@ -171,11 +197,8 @@ func (h *WebHandler) ProjectDetail(c *gin.Context) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *WebHandler) ConfigRevisions(c *gin.Context) {
|
func (h *WebHandler) AdminPricing(c *gin.Context) {
|
||||||
h.render(c, "config_revisions.html", gin.H{
|
h.render(c, "admin_pricing.html", gin.H{"ActivePage": "admin"})
|
||||||
"ActivePage": "configs",
|
|
||||||
"ConfigUUID": c.Param("uuid"),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *WebHandler) Pricelists(c *gin.Context) {
|
func (h *WebHandler) Pricelists(c *gin.Context) {
|
||||||
@@ -186,38 +209,29 @@ func (h *WebHandler) PricelistDetail(c *gin.Context) {
|
|||||||
h.render(c, "pricelist_detail.html", gin.H{"ActivePage": "pricelists"})
|
h.render(c, "pricelist_detail.html", gin.H{"ActivePage": "pricelists"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *WebHandler) PartnumberBooks(c *gin.Context) {
|
|
||||||
h.render(c, "partnumber_books.html", gin.H{"ActivePage": "partnumber-books"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Partials for htmx
|
// Partials for htmx
|
||||||
|
|
||||||
func (h *WebHandler) ComponentsPartial(c *gin.Context) {
|
func (h *WebHandler) ComponentsPartial(c *gin.Context) {
|
||||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||||
if page < 1 {
|
|
||||||
page = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := localdb.ComponentFilter{
|
filter := repository.ComponentFilter{
|
||||||
Category: c.Query("category"),
|
Category: c.Query("category"),
|
||||||
Search: c.Query("search"),
|
Search: c.Query("search"),
|
||||||
}
|
}
|
||||||
if c.Query("has_price") == "true" {
|
|
||||||
filter.HasPrice = true
|
|
||||||
}
|
|
||||||
offset := (page - 1) * 20
|
|
||||||
|
|
||||||
data := gin.H{
|
data := gin.H{
|
||||||
"Components": []localComponentView{},
|
"Components": []interface{}{},
|
||||||
"Total": int64(0),
|
"Total": int64(0),
|
||||||
"Page": page,
|
"Page": page,
|
||||||
"PerPage": 20,
|
"PerPage": 20,
|
||||||
}
|
}
|
||||||
|
|
||||||
components, total, err := h.localDB.ListComponents(filter, offset, 20)
|
result, err := h.componentService.List(filter, page, 20)
|
||||||
if err == nil {
|
if err == nil && result != nil {
|
||||||
data["Components"] = toLocalComponentViews(components)
|
data["Components"] = result.Components
|
||||||
data["Total"] = total
|
data["Total"] = result.Total
|
||||||
|
data["Page"] = result.Page
|
||||||
|
data["PerPage"] = result.PerPage
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||||
@@ -225,46 +239,3 @@ func (h *WebHandler) ComponentsPartial(c *gin.Context) {
|
|||||||
tmpl.ExecuteTemplate(c.Writer, "components_list.html", data)
|
tmpl.ExecuteTemplate(c.Writer, "components_list.html", data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type localComponentView struct {
|
|
||||||
LotName string
|
|
||||||
Description string
|
|
||||||
Category string
|
|
||||||
CategoryName string
|
|
||||||
Model string
|
|
||||||
CurrentPrice *float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func toLocalComponentViews(items []localdb.LocalComponent) []localComponentView {
|
|
||||||
result := make([]localComponentView, 0, len(items))
|
|
||||||
for _, item := range items {
|
|
||||||
result = append(result, localComponentView{
|
|
||||||
LotName: item.LotName,
|
|
||||||
Description: item.LotDescription,
|
|
||||||
Category: item.Category,
|
|
||||||
CategoryName: item.Category,
|
|
||||||
Model: item.Model,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *WebHandler) localCategories() ([]models.Category, error) {
|
|
||||||
codes, err := h.localDB.GetLocalComponentCategories()
|
|
||||||
if err != nil || len(codes) == 0 {
|
|
||||||
return []models.Category{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
categories := make([]models.Category, 0, len(codes))
|
|
||||||
for _, code := range codes {
|
|
||||||
trimmed := strings.TrimSpace(code)
|
|
||||||
if trimmed == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
categories = append(categories, models.Category{
|
|
||||||
Code: trimmed,
|
|
||||||
Name: trimmed,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return categories, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,47 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"html/template"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httptest"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestWebHandlerRenderHidesTemplateExecutionError(t *testing.T) {
|
|
||||||
gin.SetMode(gin.TestMode)
|
|
||||||
|
|
||||||
tmpl := template.Must(template.New("broken.html").Funcs(template.FuncMap{
|
|
||||||
"boom": func() (string, error) {
|
|
||||||
return "", errors.New("secret template failure")
|
|
||||||
},
|
|
||||||
}).Parse(`{{define "broken.html"}}{{boom}}{{end}}`))
|
|
||||||
|
|
||||||
handler := &WebHandler{
|
|
||||||
templates: map[string]*template.Template{
|
|
||||||
"broken.html": tmpl,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
rec := httptest.NewRecorder()
|
|
||||||
ctx, _ := gin.CreateTestContext(rec)
|
|
||||||
ctx.Request = httptest.NewRequest(http.MethodGet, "/broken", nil)
|
|
||||||
|
|
||||||
handler.render(ctx, "broken.html", gin.H{})
|
|
||||||
|
|
||||||
if rec.Code != http.StatusInternalServerError {
|
|
||||||
t.Fatalf("expected 500, got %d", rec.Code)
|
|
||||||
}
|
|
||||||
if body := strings.TrimSpace(rec.Body.String()); body != "Template error" {
|
|
||||||
t.Fatalf("expected generic template error, got %q", body)
|
|
||||||
}
|
|
||||||
if len(ctx.Errors) != 1 {
|
|
||||||
t.Fatalf("expected logged template error, got %d", len(ctx.Errors))
|
|
||||||
}
|
|
||||||
if !strings.Contains(ctx.Errors.String(), "secret template failure") {
|
|
||||||
t.Fatalf("expected original error in gin context, got %q", ctx.Errors.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -28,42 +28,29 @@ type ComponentSyncResult struct {
|
|||||||
func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error) {
|
func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error) {
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
|
|
||||||
// Build the component catalog from every runtime source of LOT names.
|
// Query to join lot with qt_lot_metadata
|
||||||
// Storage lots may exist in qt_lot_metadata / qt_pricelist_items before they appear in lot,
|
// Use LEFT JOIN to include lots without metadata
|
||||||
// so the sync cannot start from lot alone.
|
|
||||||
type componentRow struct {
|
type componentRow struct {
|
||||||
LotName string
|
LotName string
|
||||||
LotDescription string
|
LotDescription string
|
||||||
Category *string
|
Category *string
|
||||||
Model *string
|
Model *string
|
||||||
|
CurrentPrice *float64
|
||||||
}
|
}
|
||||||
|
|
||||||
var rows []componentRow
|
var rows []componentRow
|
||||||
err := mariaDB.Raw(`
|
err := mariaDB.Raw(`
|
||||||
SELECT
|
SELECT
|
||||||
src.lot_name,
|
l.lot_name,
|
||||||
COALESCE(MAX(NULLIF(TRIM(l.lot_description), '')), '') AS lot_description,
|
l.lot_description,
|
||||||
COALESCE(
|
COALESCE(c.code, SUBSTRING_INDEX(l.lot_name, '_', 1)) as category,
|
||||||
MAX(NULLIF(TRIM(c.code), '')),
|
m.model,
|
||||||
MAX(NULLIF(TRIM(l.lot_category), '')),
|
m.current_price
|
||||||
SUBSTRING_INDEX(src.lot_name, '_', 1)
|
FROM lot l
|
||||||
) AS category,
|
LEFT JOIN qt_lot_metadata m ON l.lot_name = m.lot_name
|
||||||
MAX(NULLIF(TRIM(m.model), '')) AS model
|
|
||||||
FROM (
|
|
||||||
SELECT lot_name FROM lot
|
|
||||||
UNION
|
|
||||||
SELECT lot_name FROM qt_lot_metadata
|
|
||||||
WHERE is_hidden = FALSE OR is_hidden IS NULL
|
|
||||||
UNION
|
|
||||||
SELECT lot_name FROM qt_pricelist_items
|
|
||||||
) src
|
|
||||||
LEFT JOIN lot l ON l.lot_name = src.lot_name
|
|
||||||
LEFT JOIN qt_lot_metadata m
|
|
||||||
ON m.lot_name = src.lot_name
|
|
||||||
AND (m.is_hidden = FALSE OR m.is_hidden IS NULL)
|
|
||||||
LEFT JOIN qt_categories c ON m.category_id = c.id
|
LEFT JOIN qt_categories c ON m.category_id = c.id
|
||||||
GROUP BY src.lot_name
|
WHERE m.is_hidden = FALSE OR m.is_hidden IS NULL
|
||||||
ORDER BY src.lot_name
|
ORDER BY l.lot_name
|
||||||
`).Scan(&rows).Error
|
`).Scan(&rows).Error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("querying components from MariaDB: %w", err)
|
return nil, fmt.Errorf("querying components from MariaDB: %w", err)
|
||||||
@@ -86,25 +73,18 @@ func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error)
|
|||||||
existingMap[c.LotName] = true
|
existingMap[c.LotName] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare components for batch insert/update.
|
// Prepare components for batch insert/update
|
||||||
// Source joins may duplicate the same lot_name, so collapse them before insert.
|
|
||||||
syncTime := time.Now()
|
syncTime := time.Now()
|
||||||
components := make([]LocalComponent, 0, len(rows))
|
components := make([]LocalComponent, 0, len(rows))
|
||||||
componentIndex := make(map[string]int, len(rows))
|
|
||||||
newCount := 0
|
newCount := 0
|
||||||
|
|
||||||
for _, row := range rows {
|
for _, row := range rows {
|
||||||
lotName := strings.TrimSpace(row.LotName)
|
|
||||||
if lotName == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
category := ""
|
category := ""
|
||||||
if row.Category != nil {
|
if row.Category != nil {
|
||||||
category = strings.TrimSpace(*row.Category)
|
category = *row.Category
|
||||||
} else {
|
} else {
|
||||||
// Parse category from lot_name (e.g., "CPU_AMD_9654" -> "CPU")
|
// Parse category from lot_name (e.g., "CPU_AMD_9654" -> "CPU")
|
||||||
parts := strings.SplitN(lotName, "_", 2)
|
parts := strings.SplitN(row.LotName, "_", 2)
|
||||||
if len(parts) >= 1 {
|
if len(parts) >= 1 {
|
||||||
category = parts[0]
|
category = parts[0]
|
||||||
}
|
}
|
||||||
@@ -112,34 +92,20 @@ func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error)
|
|||||||
|
|
||||||
model := ""
|
model := ""
|
||||||
if row.Model != nil {
|
if row.Model != nil {
|
||||||
model = strings.TrimSpace(*row.Model)
|
model = *row.Model
|
||||||
}
|
}
|
||||||
|
|
||||||
comp := LocalComponent{
|
comp := LocalComponent{
|
||||||
LotName: lotName,
|
LotName: row.LotName,
|
||||||
LotDescription: strings.TrimSpace(row.LotDescription),
|
LotDescription: row.LotDescription,
|
||||||
Category: category,
|
Category: category,
|
||||||
Model: model,
|
Model: model,
|
||||||
|
CurrentPrice: row.CurrentPrice,
|
||||||
|
SyncedAt: syncTime,
|
||||||
}
|
}
|
||||||
|
|
||||||
if idx, exists := componentIndex[lotName]; exists {
|
|
||||||
// Keep the first row, but fill any missing metadata from duplicates.
|
|
||||||
if components[idx].LotDescription == "" && comp.LotDescription != "" {
|
|
||||||
components[idx].LotDescription = comp.LotDescription
|
|
||||||
}
|
|
||||||
if components[idx].Category == "" && comp.Category != "" {
|
|
||||||
components[idx].Category = comp.Category
|
|
||||||
}
|
|
||||||
if components[idx].Model == "" && comp.Model != "" {
|
|
||||||
components[idx].Model = comp.Model
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
componentIndex[lotName] = len(components)
|
|
||||||
components = append(components, comp)
|
components = append(components, comp)
|
||||||
|
|
||||||
if !existingMap[lotName] {
|
if !existingMap[row.LotName] {
|
||||||
newCount++
|
newCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -255,6 +221,11 @@ func (l *LocalDB) ListComponents(filter ComponentFilter, offset, limit int) ([]L
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply price filter
|
||||||
|
if filter.HasPrice {
|
||||||
|
db = db.Where("current_price IS NOT NULL")
|
||||||
|
}
|
||||||
|
|
||||||
// Get total count
|
// Get total count
|
||||||
var total int64
|
var total int64
|
||||||
if err := db.Model(&LocalComponent{}).Count(&total).Error; err != nil {
|
if err := db.Model(&LocalComponent{}).Count(&total).Error; err != nil {
|
||||||
@@ -280,31 +251,6 @@ func (l *LocalDB) GetLocalComponent(lotName string) (*LocalComponent, error) {
|
|||||||
return &component, nil
|
return &component, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLocalComponentCategoriesByLotNames returns category for each lot_name in the local component cache.
|
|
||||||
// Missing lots are not included in the map; caller is responsible for strict validation.
|
|
||||||
func (l *LocalDB) GetLocalComponentCategoriesByLotNames(lotNames []string) (map[string]string, error) {
|
|
||||||
result := make(map[string]string, len(lotNames))
|
|
||||||
if len(lotNames) == 0 {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type row struct {
|
|
||||||
LotName string `gorm:"column:lot_name"`
|
|
||||||
Category string `gorm:"column:category"`
|
|
||||||
}
|
|
||||||
var rows []row
|
|
||||||
if err := l.db.Model(&LocalComponent{}).
|
|
||||||
Select("lot_name, category").
|
|
||||||
Where("lot_name IN ?", lotNames).
|
|
||||||
Find(&rows).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, r := range rows {
|
|
||||||
result[r.LotName] = r.Category
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLocalComponentCategories returns distinct categories from local components
|
// GetLocalComponentCategories returns distinct categories from local components
|
||||||
func (l *LocalDB) GetLocalComponentCategories() ([]string, error) {
|
func (l *LocalDB) GetLocalComponentCategories() ([]string, error) {
|
||||||
var categories []string
|
var categories []string
|
||||||
@@ -365,3 +311,99 @@ func (l *LocalDB) NeedComponentSync(maxAgeHours int) bool {
|
|||||||
}
|
}
|
||||||
return time.Since(*syncTime).Hours() > float64(maxAgeHours)
|
return time.Since(*syncTime).Hours() > float64(maxAgeHours)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateComponentPricesFromPricelist updates current_price in local_components from pricelist items
|
||||||
|
// This allows offline price updates using synced pricelists without MariaDB connection
|
||||||
|
func (l *LocalDB) UpdateComponentPricesFromPricelist(pricelistID uint) (int, error) {
|
||||||
|
// Get all items from the specified pricelist
|
||||||
|
var items []LocalPricelistItem
|
||||||
|
if err := l.db.Where("pricelist_id = ?", pricelistID).Find(&items).Error; err != nil {
|
||||||
|
return 0, fmt.Errorf("fetching pricelist items: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(items) == 0 {
|
||||||
|
slog.Warn("no items found in pricelist", "pricelist_id", pricelistID)
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update current_price for each component
|
||||||
|
updated := 0
|
||||||
|
err := l.db.Transaction(func(tx *gorm.DB) error {
|
||||||
|
for _, item := range items {
|
||||||
|
result := tx.Model(&LocalComponent{}).
|
||||||
|
Where("lot_name = ?", item.LotName).
|
||||||
|
Update("current_price", item.Price)
|
||||||
|
|
||||||
|
if result.Error != nil {
|
||||||
|
return fmt.Errorf("updating price for %s: %w", item.LotName, result.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.RowsAffected > 0 {
|
||||||
|
updated++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("updated component prices from pricelist",
|
||||||
|
"pricelist_id", pricelistID,
|
||||||
|
"total_items", len(items),
|
||||||
|
"updated_components", updated)
|
||||||
|
|
||||||
|
return updated, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureComponentPricesFromPricelists loads prices from the latest pricelist into local_components
|
||||||
|
// if no components exist or all current prices are NULL
|
||||||
|
func (l *LocalDB) EnsureComponentPricesFromPricelists() error {
|
||||||
|
// Check if we have any components with prices
|
||||||
|
var count int64
|
||||||
|
if err := l.db.Model(&LocalComponent{}).Where("current_price IS NOT NULL").Count(&count).Error; err != nil {
|
||||||
|
return fmt.Errorf("checking component prices: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have components with prices, don't load from pricelists
|
||||||
|
if count > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have any components at all
|
||||||
|
var totalComponents int64
|
||||||
|
if err := l.db.Model(&LocalComponent{}).Count(&totalComponents).Error; err != nil {
|
||||||
|
return fmt.Errorf("counting components: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have no components, we need to load them from pricelists
|
||||||
|
if totalComponents == 0 {
|
||||||
|
slog.Info("no components found in local database, loading from latest pricelist")
|
||||||
|
// This would typically be called from the sync service or setup process
|
||||||
|
// For now, we'll just return nil to indicate no action needed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have components but no prices, load from latest estimate pricelist.
|
||||||
|
var latestPricelist LocalPricelist
|
||||||
|
if err := l.db.Where("source = ?", "estimate").Order("created_at DESC").First(&latestPricelist).Error; err != nil {
|
||||||
|
if err == gorm.ErrRecordNotFound {
|
||||||
|
slog.Warn("no pricelists found in local database")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("finding latest pricelist: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update prices from the latest pricelist
|
||||||
|
updated, err := l.UpdateComponentPricesFromPricelist(latestPricelist.ID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("updating component prices from pricelist: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("loaded component prices from latest pricelist",
|
||||||
|
"pricelist_id", latestPricelist.ID,
|
||||||
|
"updated_components", updated)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,154 +0,0 @@
|
|||||||
package localdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfigurationConvertersPreserveBusinessFields(t *testing.T) {
|
|
||||||
estimateID := uint(11)
|
|
||||||
warehouseID := uint(22)
|
|
||||||
competitorID := uint(33)
|
|
||||||
|
|
||||||
cfg := &models.Configuration{
|
|
||||||
UUID: "cfg-1",
|
|
||||||
OwnerUsername: "tester",
|
|
||||||
Name: "Config",
|
|
||||||
PricelistID: &estimateID,
|
|
||||||
WarehousePricelistID: &warehouseID,
|
|
||||||
CompetitorPricelistID: &competitorID,
|
|
||||||
DisablePriceRefresh: true,
|
|
||||||
OnlyInStock: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
local := ConfigurationToLocal(cfg)
|
|
||||||
if local.WarehousePricelistID == nil || *local.WarehousePricelistID != warehouseID {
|
|
||||||
t.Fatalf("warehouse pricelist lost in ConfigurationToLocal: %+v", local.WarehousePricelistID)
|
|
||||||
}
|
|
||||||
if local.CompetitorPricelistID == nil || *local.CompetitorPricelistID != competitorID {
|
|
||||||
t.Fatalf("competitor pricelist lost in ConfigurationToLocal: %+v", local.CompetitorPricelistID)
|
|
||||||
}
|
|
||||||
if !local.DisablePriceRefresh {
|
|
||||||
t.Fatalf("disable_price_refresh lost in ConfigurationToLocal")
|
|
||||||
}
|
|
||||||
|
|
||||||
back := LocalToConfiguration(local)
|
|
||||||
if back.WarehousePricelistID == nil || *back.WarehousePricelistID != warehouseID {
|
|
||||||
t.Fatalf("warehouse pricelist lost in LocalToConfiguration: %+v", back.WarehousePricelistID)
|
|
||||||
}
|
|
||||||
if back.CompetitorPricelistID == nil || *back.CompetitorPricelistID != competitorID {
|
|
||||||
t.Fatalf("competitor pricelist lost in LocalToConfiguration: %+v", back.CompetitorPricelistID)
|
|
||||||
}
|
|
||||||
if !back.DisablePriceRefresh {
|
|
||||||
t.Fatalf("disable_price_refresh lost in LocalToConfiguration")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigurationSnapshotPreservesBusinessFields(t *testing.T) {
|
|
||||||
estimateID := uint(11)
|
|
||||||
warehouseID := uint(22)
|
|
||||||
competitorID := uint(33)
|
|
||||||
|
|
||||||
cfg := &LocalConfiguration{
|
|
||||||
UUID: "cfg-1",
|
|
||||||
Name: "Config",
|
|
||||||
PricelistID: &estimateID,
|
|
||||||
WarehousePricelistID: &warehouseID,
|
|
||||||
CompetitorPricelistID: &competitorID,
|
|
||||||
DisablePriceRefresh: true,
|
|
||||||
OnlyInStock: true,
|
|
||||||
VendorSpec: VendorSpec{
|
|
||||||
{
|
|
||||||
SortOrder: 10,
|
|
||||||
VendorPartnumber: "PN-1",
|
|
||||||
Quantity: 1,
|
|
||||||
LotMappings: []VendorSpecLotMapping{
|
|
||||||
{LotName: "LOT_A", QuantityPerPN: 2},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, err := BuildConfigurationSnapshot(cfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("BuildConfigurationSnapshot: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
decoded, err := DecodeConfigurationSnapshot(raw)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("DecodeConfigurationSnapshot: %v", err)
|
|
||||||
}
|
|
||||||
if decoded.WarehousePricelistID == nil || *decoded.WarehousePricelistID != warehouseID {
|
|
||||||
t.Fatalf("warehouse pricelist lost in snapshot: %+v", decoded.WarehousePricelistID)
|
|
||||||
}
|
|
||||||
if decoded.CompetitorPricelistID == nil || *decoded.CompetitorPricelistID != competitorID {
|
|
||||||
t.Fatalf("competitor pricelist lost in snapshot: %+v", decoded.CompetitorPricelistID)
|
|
||||||
}
|
|
||||||
if !decoded.DisablePriceRefresh {
|
|
||||||
t.Fatalf("disable_price_refresh lost in snapshot")
|
|
||||||
}
|
|
||||||
if len(decoded.VendorSpec) != 1 || decoded.VendorSpec[0].VendorPartnumber != "PN-1" {
|
|
||||||
t.Fatalf("vendor_spec lost in snapshot: %+v", decoded.VendorSpec)
|
|
||||||
}
|
|
||||||
if len(decoded.VendorSpec[0].LotMappings) != 1 || decoded.VendorSpec[0].LotMappings[0].LotName != "LOT_A" {
|
|
||||||
t.Fatalf("lot mappings lost in snapshot: %+v", decoded.VendorSpec)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConfigurationFingerprintIncludesPricingSelectorsAndVendorSpec(t *testing.T) {
|
|
||||||
estimateID := uint(11)
|
|
||||||
warehouseID := uint(22)
|
|
||||||
competitorID := uint(33)
|
|
||||||
|
|
||||||
base := &LocalConfiguration{
|
|
||||||
UUID: "cfg-1",
|
|
||||||
Name: "Config",
|
|
||||||
ServerCount: 1,
|
|
||||||
Items: LocalConfigItems{{LotName: "LOT_A", Quantity: 1, UnitPrice: 100}},
|
|
||||||
PricelistID: &estimateID,
|
|
||||||
WarehousePricelistID: &warehouseID,
|
|
||||||
CompetitorPricelistID: &competitorID,
|
|
||||||
DisablePriceRefresh: true,
|
|
||||||
OnlyInStock: true,
|
|
||||||
VendorSpec: VendorSpec{
|
|
||||||
{
|
|
||||||
SortOrder: 10,
|
|
||||||
VendorPartnumber: "PN-1",
|
|
||||||
Quantity: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
baseFingerprint, err := BuildConfigurationSpecPriceFingerprint(base)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("base fingerprint: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
changedPricelist := *base
|
|
||||||
newEstimateID := uint(44)
|
|
||||||
changedPricelist.PricelistID = &newEstimateID
|
|
||||||
pricelistFingerprint, err := BuildConfigurationSpecPriceFingerprint(&changedPricelist)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("pricelist fingerprint: %v", err)
|
|
||||||
}
|
|
||||||
if pricelistFingerprint == baseFingerprint {
|
|
||||||
t.Fatalf("expected pricelist selector to affect fingerprint")
|
|
||||||
}
|
|
||||||
|
|
||||||
changedVendorSpec := *base
|
|
||||||
changedVendorSpec.VendorSpec = VendorSpec{
|
|
||||||
{
|
|
||||||
SortOrder: 10,
|
|
||||||
VendorPartnumber: "PN-2",
|
|
||||||
Quantity: 1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
vendorFingerprint, err := BuildConfigurationSpecPriceFingerprint(&changedVendorSpec)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("vendor fingerprint: %v", err)
|
|
||||||
}
|
|
||||||
if vendorFingerprint == baseFingerprint {
|
|
||||||
t.Fatalf("expected vendor spec to affect fingerprint")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -18,33 +18,27 @@ func ConfigurationToLocal(cfg *models.Configuration) *LocalConfiguration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
local := &LocalConfiguration{
|
local := &LocalConfiguration{
|
||||||
UUID: cfg.UUID,
|
UUID: cfg.UUID,
|
||||||
ProjectUUID: cfg.ProjectUUID,
|
ProjectUUID: cfg.ProjectUUID,
|
||||||
IsActive: true,
|
IsActive: true,
|
||||||
Name: cfg.Name,
|
Name: cfg.Name,
|
||||||
Items: items,
|
Items: items,
|
||||||
TotalPrice: cfg.TotalPrice,
|
TotalPrice: cfg.TotalPrice,
|
||||||
CustomPrice: cfg.CustomPrice,
|
CustomPrice: cfg.CustomPrice,
|
||||||
Notes: cfg.Notes,
|
Notes: cfg.Notes,
|
||||||
IsTemplate: cfg.IsTemplate,
|
IsTemplate: cfg.IsTemplate,
|
||||||
ServerCount: cfg.ServerCount,
|
ServerCount: cfg.ServerCount,
|
||||||
ServerModel: cfg.ServerModel,
|
PricelistID: cfg.PricelistID,
|
||||||
SupportCode: cfg.SupportCode,
|
PriceUpdatedAt: cfg.PriceUpdatedAt,
|
||||||
Article: cfg.Article,
|
CreatedAt: cfg.CreatedAt,
|
||||||
PricelistID: cfg.PricelistID,
|
UpdatedAt: time.Now(),
|
||||||
WarehousePricelistID: cfg.WarehousePricelistID,
|
SyncStatus: "pending",
|
||||||
CompetitorPricelistID: cfg.CompetitorPricelistID,
|
OriginalUserID: derefUint(cfg.UserID),
|
||||||
ConfigType: cfg.ConfigType,
|
OriginalUsername: cfg.OwnerUsername,
|
||||||
VendorSpec: modelVendorSpecToLocal(cfg.VendorSpec),
|
}
|
||||||
DisablePriceRefresh: cfg.DisablePriceRefresh,
|
|
||||||
OnlyInStock: cfg.OnlyInStock,
|
if local.OriginalUsername == "" && cfg.User != nil {
|
||||||
Line: cfg.Line,
|
local.OriginalUsername = cfg.User.Username
|
||||||
PriceUpdatedAt: cfg.PriceUpdatedAt,
|
|
||||||
CreatedAt: cfg.CreatedAt,
|
|
||||||
UpdatedAt: time.Now(),
|
|
||||||
SyncStatus: "pending",
|
|
||||||
OriginalUserID: derefUint(cfg.UserID),
|
|
||||||
OriginalUsername: cfg.OwnerUsername,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.ID > 0 {
|
if cfg.ID > 0 {
|
||||||
@@ -67,29 +61,19 @@ func LocalToConfiguration(local *LocalConfiguration) *models.Configuration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cfg := &models.Configuration{
|
cfg := &models.Configuration{
|
||||||
UUID: local.UUID,
|
UUID: local.UUID,
|
||||||
OwnerUsername: local.OriginalUsername,
|
OwnerUsername: local.OriginalUsername,
|
||||||
ProjectUUID: local.ProjectUUID,
|
ProjectUUID: local.ProjectUUID,
|
||||||
Name: local.Name,
|
Name: local.Name,
|
||||||
Items: items,
|
Items: items,
|
||||||
TotalPrice: local.TotalPrice,
|
TotalPrice: local.TotalPrice,
|
||||||
CustomPrice: local.CustomPrice,
|
CustomPrice: local.CustomPrice,
|
||||||
Notes: local.Notes,
|
Notes: local.Notes,
|
||||||
IsTemplate: local.IsTemplate,
|
IsTemplate: local.IsTemplate,
|
||||||
ServerCount: local.ServerCount,
|
ServerCount: local.ServerCount,
|
||||||
ServerModel: local.ServerModel,
|
PricelistID: local.PricelistID,
|
||||||
SupportCode: local.SupportCode,
|
PriceUpdatedAt: local.PriceUpdatedAt,
|
||||||
Article: local.Article,
|
CreatedAt: local.CreatedAt,
|
||||||
PricelistID: local.PricelistID,
|
|
||||||
WarehousePricelistID: local.WarehousePricelistID,
|
|
||||||
CompetitorPricelistID: local.CompetitorPricelistID,
|
|
||||||
ConfigType: local.ConfigType,
|
|
||||||
VendorSpec: localVendorSpecToModel(local.VendorSpec),
|
|
||||||
DisablePriceRefresh: local.DisablePriceRefresh,
|
|
||||||
OnlyInStock: local.OnlyInStock,
|
|
||||||
Line: local.Line,
|
|
||||||
PriceUpdatedAt: local.PriceUpdatedAt,
|
|
||||||
CreatedAt: local.CreatedAt,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if local.ServerID != nil {
|
if local.ServerID != nil {
|
||||||
@@ -99,9 +83,6 @@ func LocalToConfiguration(local *LocalConfiguration) *models.Configuration {
|
|||||||
userID := local.OriginalUserID
|
userID := local.OriginalUserID
|
||||||
cfg.UserID = &userID
|
cfg.UserID = &userID
|
||||||
}
|
}
|
||||||
if local.CurrentVersion != nil {
|
|
||||||
cfg.CurrentVersionNo = local.CurrentVersion.VersionNo
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg
|
return cfg
|
||||||
}
|
}
|
||||||
@@ -113,94 +94,10 @@ func derefUint(v *uint) uint {
|
|||||||
return *v
|
return *v
|
||||||
}
|
}
|
||||||
|
|
||||||
func modelVendorSpecToLocal(spec models.VendorSpec) VendorSpec {
|
|
||||||
if len(spec) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := make(VendorSpec, 0, len(spec))
|
|
||||||
for _, item := range spec {
|
|
||||||
row := VendorSpecItem{
|
|
||||||
SortOrder: item.SortOrder,
|
|
||||||
VendorPartnumber: item.VendorPartnumber,
|
|
||||||
Quantity: item.Quantity,
|
|
||||||
Description: item.Description,
|
|
||||||
UnitPrice: item.UnitPrice,
|
|
||||||
TotalPrice: item.TotalPrice,
|
|
||||||
ResolvedLotName: item.ResolvedLotName,
|
|
||||||
ResolutionSource: item.ResolutionSource,
|
|
||||||
ManualLotSuggestion: item.ManualLotSuggestion,
|
|
||||||
LotQtyPerPN: item.LotQtyPerPN,
|
|
||||||
}
|
|
||||||
if len(item.LotAllocations) > 0 {
|
|
||||||
row.LotAllocations = make([]VendorSpecLotAllocation, 0, len(item.LotAllocations))
|
|
||||||
for _, alloc := range item.LotAllocations {
|
|
||||||
row.LotAllocations = append(row.LotAllocations, VendorSpecLotAllocation{
|
|
||||||
LotName: alloc.LotName,
|
|
||||||
Quantity: alloc.Quantity,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(item.LotMappings) > 0 {
|
|
||||||
row.LotMappings = make([]VendorSpecLotMapping, 0, len(item.LotMappings))
|
|
||||||
for _, mapping := range item.LotMappings {
|
|
||||||
row.LotMappings = append(row.LotMappings, VendorSpecLotMapping{
|
|
||||||
LotName: mapping.LotName,
|
|
||||||
QuantityPerPN: mapping.QuantityPerPN,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out = append(out, row)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func localVendorSpecToModel(spec VendorSpec) models.VendorSpec {
|
|
||||||
if len(spec) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := make(models.VendorSpec, 0, len(spec))
|
|
||||||
for _, item := range spec {
|
|
||||||
row := models.VendorSpecItem{
|
|
||||||
SortOrder: item.SortOrder,
|
|
||||||
VendorPartnumber: item.VendorPartnumber,
|
|
||||||
Quantity: item.Quantity,
|
|
||||||
Description: item.Description,
|
|
||||||
UnitPrice: item.UnitPrice,
|
|
||||||
TotalPrice: item.TotalPrice,
|
|
||||||
ResolvedLotName: item.ResolvedLotName,
|
|
||||||
ResolutionSource: item.ResolutionSource,
|
|
||||||
ManualLotSuggestion: item.ManualLotSuggestion,
|
|
||||||
LotQtyPerPN: item.LotQtyPerPN,
|
|
||||||
}
|
|
||||||
if len(item.LotAllocations) > 0 {
|
|
||||||
row.LotAllocations = make([]models.VendorSpecLotAllocation, 0, len(item.LotAllocations))
|
|
||||||
for _, alloc := range item.LotAllocations {
|
|
||||||
row.LotAllocations = append(row.LotAllocations, models.VendorSpecLotAllocation{
|
|
||||||
LotName: alloc.LotName,
|
|
||||||
Quantity: alloc.Quantity,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(item.LotMappings) > 0 {
|
|
||||||
row.LotMappings = make([]models.VendorSpecLotMapping, 0, len(item.LotMappings))
|
|
||||||
for _, mapping := range item.LotMappings {
|
|
||||||
row.LotMappings = append(row.LotMappings, models.VendorSpecLotMapping{
|
|
||||||
LotName: mapping.LotName,
|
|
||||||
QuantityPerPN: mapping.QuantityPerPN,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out = append(out, row)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func ProjectToLocal(project *models.Project) *LocalProject {
|
func ProjectToLocal(project *models.Project) *LocalProject {
|
||||||
local := &LocalProject{
|
local := &LocalProject{
|
||||||
UUID: project.UUID,
|
UUID: project.UUID,
|
||||||
OwnerUsername: project.OwnerUsername,
|
OwnerUsername: project.OwnerUsername,
|
||||||
Code: project.Code,
|
|
||||||
Variant: project.Variant,
|
|
||||||
Name: project.Name,
|
Name: project.Name,
|
||||||
TrackerURL: project.TrackerURL,
|
TrackerURL: project.TrackerURL,
|
||||||
IsActive: project.IsActive,
|
IsActive: project.IsActive,
|
||||||
@@ -220,8 +117,6 @@ func LocalToProject(local *LocalProject) *models.Project {
|
|||||||
project := &models.Project{
|
project := &models.Project{
|
||||||
UUID: local.UUID,
|
UUID: local.UUID,
|
||||||
OwnerUsername: local.OwnerUsername,
|
OwnerUsername: local.OwnerUsername,
|
||||||
Code: local.Code,
|
|
||||||
Variant: local.Variant,
|
|
||||||
Name: local.Name,
|
Name: local.Name,
|
||||||
TrackerURL: local.TrackerURL,
|
TrackerURL: local.TrackerURL,
|
||||||
IsActive: local.IsActive,
|
IsActive: local.IsActive,
|
||||||
@@ -267,30 +162,20 @@ func LocalToPricelist(local *LocalPricelist) *models.Pricelist {
|
|||||||
|
|
||||||
// PricelistItemToLocal converts models.PricelistItem to LocalPricelistItem
|
// PricelistItemToLocal converts models.PricelistItem to LocalPricelistItem
|
||||||
func PricelistItemToLocal(item *models.PricelistItem, localPricelistID uint) *LocalPricelistItem {
|
func PricelistItemToLocal(item *models.PricelistItem, localPricelistID uint) *LocalPricelistItem {
|
||||||
partnumbers := make(LocalStringList, 0, len(item.Partnumbers))
|
|
||||||
partnumbers = append(partnumbers, item.Partnumbers...)
|
|
||||||
return &LocalPricelistItem{
|
return &LocalPricelistItem{
|
||||||
PricelistID: localPricelistID,
|
PricelistID: localPricelistID,
|
||||||
LotName: item.LotName,
|
LotName: item.LotName,
|
||||||
LotCategory: item.LotCategory,
|
Price: item.Price,
|
||||||
Price: item.Price,
|
|
||||||
AvailableQty: item.AvailableQty,
|
|
||||||
Partnumbers: partnumbers,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalToPricelistItem converts LocalPricelistItem to models.PricelistItem
|
// LocalToPricelistItem converts LocalPricelistItem to models.PricelistItem
|
||||||
func LocalToPricelistItem(local *LocalPricelistItem, serverPricelistID uint) *models.PricelistItem {
|
func LocalToPricelistItem(local *LocalPricelistItem, serverPricelistID uint) *models.PricelistItem {
|
||||||
partnumbers := make([]string, 0, len(local.Partnumbers))
|
|
||||||
partnumbers = append(partnumbers, local.Partnumbers...)
|
|
||||||
return &models.PricelistItem{
|
return &models.PricelistItem{
|
||||||
ID: local.ID,
|
ID: local.ID,
|
||||||
PricelistID: serverPricelistID,
|
PricelistID: serverPricelistID,
|
||||||
LotName: local.LotName,
|
LotName: local.LotName,
|
||||||
LotCategory: local.LotCategory,
|
Price: local.Price,
|
||||||
Price: local.Price,
|
|
||||||
AvailableQty: local.AvailableQty,
|
|
||||||
Partnumbers: partnumbers,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,14 +203,17 @@ func ComponentToLocal(meta *models.LotMetadata) *LocalComponent {
|
|||||||
LotDescription: lotDesc,
|
LotDescription: lotDesc,
|
||||||
Category: category,
|
Category: category,
|
||||||
Model: meta.Model,
|
Model: meta.Model,
|
||||||
|
CurrentPrice: meta.CurrentPrice,
|
||||||
|
SyncedAt: time.Now(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalToComponent converts LocalComponent to models.LotMetadata
|
// LocalToComponent converts LocalComponent to models.LotMetadata
|
||||||
func LocalToComponent(local *LocalComponent) *models.LotMetadata {
|
func LocalToComponent(local *LocalComponent) *models.LotMetadata {
|
||||||
return &models.LotMetadata{
|
return &models.LotMetadata{
|
||||||
LotName: local.LotName,
|
LotName: local.LotName,
|
||||||
Model: local.Model,
|
Model: local.Model,
|
||||||
|
CurrentPrice: local.CurrentPrice,
|
||||||
Lot: &models.Lot{
|
Lot: &models.Lot{
|
||||||
LotName: local.LotName,
|
LotName: local.LotName,
|
||||||
LotDescription: local.LotDescription,
|
LotDescription: local.LotDescription,
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
package localdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPricelistItemToLocal_PreservesLotCategory(t *testing.T) {
|
|
||||||
item := &models.PricelistItem{
|
|
||||||
LotName: "CPU_A",
|
|
||||||
LotCategory: "CPU",
|
|
||||||
Price: 10,
|
|
||||||
}
|
|
||||||
|
|
||||||
local := PricelistItemToLocal(item, 123)
|
|
||||||
if local.LotCategory != "CPU" {
|
|
||||||
t.Fatalf("expected LotCategory=CPU, got %q", local.LotCategory)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLocalToPricelistItem_PreservesLotCategory(t *testing.T) {
|
|
||||||
local := &LocalPricelistItem{
|
|
||||||
LotName: "CPU_A",
|
|
||||||
LotCategory: "CPU",
|
|
||||||
Price: 10,
|
|
||||||
}
|
|
||||||
|
|
||||||
item := LocalToPricelistItem(local, 456)
|
|
||||||
if item.LotCategory != "CPU" {
|
|
||||||
t.Fatalf("expected LotCategory=CPU, got %q", item.LotCategory)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -7,104 +7,19 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const encryptionKeyFileName = "local_encryption.key"
|
// getEncryptionKey derives a 32-byte key from environment variable or machine ID
|
||||||
|
func getEncryptionKey() []byte {
|
||||||
// getEncryptionKey resolves the active encryption key.
|
|
||||||
// Preference order:
|
|
||||||
// 1. QUOTEFORGE_ENCRYPTION_KEY env var
|
|
||||||
// 2. application-managed random key file in the user state directory
|
|
||||||
func getEncryptionKey() ([]byte, error) {
|
|
||||||
key := os.Getenv("QUOTEFORGE_ENCRYPTION_KEY")
|
key := os.Getenv("QUOTEFORGE_ENCRYPTION_KEY")
|
||||||
if key != "" {
|
if key == "" {
|
||||||
hash := sha256.Sum256([]byte(key))
|
// Fallback to a machine-based key (hostname + fixed salt)
|
||||||
return hash[:], nil
|
hostname, _ := os.Hostname()
|
||||||
|
key = hostname + "quoteforge-salt-2024"
|
||||||
}
|
}
|
||||||
|
// Hash to get exactly 32 bytes for AES-256
|
||||||
stateDir, err := resolveEncryptionStateDir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("resolve encryption state dir: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return loadOrCreateEncryptionKey(filepath.Join(stateDir, encryptionKeyFileName))
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveEncryptionStateDir() (string, error) {
|
|
||||||
configPath, err := appstate.ResolveConfigPath("")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return filepath.Dir(configPath), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadOrCreateEncryptionKey(path string) ([]byte, error) {
|
|
||||||
if data, err := os.ReadFile(path); err == nil {
|
|
||||||
return parseEncryptionKeyFile(data)
|
|
||||||
} else if !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return nil, fmt.Errorf("read encryption key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
|
||||||
return nil, fmt.Errorf("create encryption key dir: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
raw := make([]byte, 32)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, raw); err != nil {
|
|
||||||
return nil, fmt.Errorf("generate encryption key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
encoded := base64.StdEncoding.EncodeToString(raw)
|
|
||||||
if err := writeKeyFile(path, []byte(encoded+"\n")); err != nil {
|
|
||||||
if errors.Is(err, os.ErrExist) {
|
|
||||||
data, readErr := os.ReadFile(path)
|
|
||||||
if readErr != nil {
|
|
||||||
return nil, fmt.Errorf("read concurrent encryption key: %w", readErr)
|
|
||||||
}
|
|
||||||
return parseEncryptionKeyFile(data)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return raw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeKeyFile(path string, data []byte) error {
|
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
if _, err := file.Write(data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return file.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEncryptionKeyFile(data []byte) ([]byte, error) {
|
|
||||||
trimmed := strings.TrimSpace(string(data))
|
|
||||||
decoded, err := base64.StdEncoding.DecodeString(trimmed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("decode encryption key file: %w", err)
|
|
||||||
}
|
|
||||||
if len(decoded) != 32 {
|
|
||||||
return nil, fmt.Errorf("invalid encryption key length: %d", len(decoded))
|
|
||||||
}
|
|
||||||
return decoded, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLegacyEncryptionKey() []byte {
|
|
||||||
hostname, _ := os.Hostname()
|
|
||||||
key := hostname + "quoteforge-salt-2024"
|
|
||||||
hash := sha256.Sum256([]byte(key))
|
hash := sha256.Sum256([]byte(key))
|
||||||
return hash[:]
|
return hash[:]
|
||||||
}
|
}
|
||||||
@@ -115,10 +30,7 @@ func Encrypt(plaintext string) (string, error) {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := getEncryptionKey()
|
key := getEncryptionKey()
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
block, err := aes.NewCipher(key)
|
block, err := aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -144,50 +56,12 @@ func Decrypt(ciphertext string) (string, error) {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := getEncryptionKey()
|
key := getEncryptionKey()
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
plaintext, legacy, err := decryptWithKeys(ciphertext, key, getLegacyEncryptionKey())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
_ = legacy
|
|
||||||
return plaintext, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func DecryptWithMetadata(ciphertext string) (string, bool, error) {
|
|
||||||
if ciphertext == "" {
|
|
||||||
return "", false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := getEncryptionKey()
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
return decryptWithKeys(ciphertext, key, getLegacyEncryptionKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
func decryptWithKeys(ciphertext string, primaryKey, legacyKey []byte) (string, bool, error) {
|
|
||||||
data, err := base64.StdEncoding.DecodeString(ciphertext)
|
data, err := base64.StdEncoding.DecodeString(ciphertext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
plaintext, err := decryptWithKey(data, primaryKey)
|
|
||||||
if err == nil {
|
|
||||||
return plaintext, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
legacyPlaintext, legacyErr := decryptWithKey(data, legacyKey)
|
|
||||||
if legacyErr == nil {
|
|
||||||
return legacyPlaintext, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func decryptWithKey(data, key []byte) (string, error) {
|
|
||||||
block, err := aes.NewCipher(key)
|
block, err := aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|||||||
@@ -1,97 +0,0 @@
|
|||||||
package localdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/aes"
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEncryptCreatesPersistentKeyFile(t *testing.T) {
|
|
||||||
stateDir := t.TempDir()
|
|
||||||
t.Setenv("QFS_STATE_DIR", stateDir)
|
|
||||||
t.Setenv("QUOTEFORGE_ENCRYPTION_KEY", "")
|
|
||||||
|
|
||||||
ciphertext, err := Encrypt("secret-password")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("encrypt: %v", err)
|
|
||||||
}
|
|
||||||
if ciphertext == "" {
|
|
||||||
t.Fatal("expected ciphertext")
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPath := filepath.Join(stateDir, encryptionKeyFileName)
|
|
||||||
info, err := os.Stat(keyPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("stat key file: %v", err)
|
|
||||||
}
|
|
||||||
if info.Mode().Perm() != 0600 {
|
|
||||||
t.Fatalf("expected 0600 key file, got %v", info.Mode().Perm())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDecryptMigratesLegacyCiphertext(t *testing.T) {
|
|
||||||
stateDir := t.TempDir()
|
|
||||||
t.Setenv("QFS_STATE_DIR", stateDir)
|
|
||||||
t.Setenv("QUOTEFORGE_ENCRYPTION_KEY", "")
|
|
||||||
|
|
||||||
legacyCiphertext := encryptWithKeyForTest(t, getLegacyEncryptionKey(), "legacy-password")
|
|
||||||
|
|
||||||
plaintext, migrated, err := DecryptWithMetadata(legacyCiphertext)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("decrypt legacy: %v", err)
|
|
||||||
}
|
|
||||||
if plaintext != "legacy-password" {
|
|
||||||
t.Fatalf("unexpected plaintext: %q", plaintext)
|
|
||||||
}
|
|
||||||
if !migrated {
|
|
||||||
t.Fatal("expected legacy ciphertext to require migration")
|
|
||||||
}
|
|
||||||
|
|
||||||
currentCiphertext, err := Encrypt("legacy-password")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("encrypt current: %v", err)
|
|
||||||
}
|
|
||||||
plaintext, migrated, err = DecryptWithMetadata(currentCiphertext)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("decrypt current: %v", err)
|
|
||||||
}
|
|
||||||
if migrated {
|
|
||||||
t.Fatal("did not expect current ciphertext to require migration")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func encryptWithKeyForTest(t *testing.T, key []byte, plaintext string) string {
|
|
||||||
t.Helper()
|
|
||||||
|
|
||||||
block, err := aes.NewCipher(key)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("new cipher: %v", err)
|
|
||||||
}
|
|
||||||
gcm, err := cipher.NewGCM(block)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("new gcm: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nonce := make([]byte, gcm.NonceSize())
|
|
||||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
|
||||||
t.Fatalf("read nonce: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil)
|
|
||||||
return base64.StdEncoding.EncodeToString(ciphertext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLegacyEncryptionKeyRemainsDeterministic(t *testing.T) {
|
|
||||||
hostname, _ := os.Hostname()
|
|
||||||
expected := sha256.Sum256([]byte(hostname + "quoteforge-salt-2024"))
|
|
||||||
actual := getLegacyEncryptionKey()
|
|
||||||
if string(actual) != string(expected[:]) {
|
|
||||||
t.Fatal("legacy key derivation changed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,11 +4,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/glebarez/sqlite"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"gorm.io/gorm/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRunLocalMigrationsBackfillsExistingConfigurations(t *testing.T) {
|
func TestRunLocalMigrationsBackfillsExistingConfigurations(t *testing.T) {
|
||||||
@@ -130,466 +125,3 @@ func TestRunLocalMigrationsFixesPricelistVersionUniqueIndex(t *testing.T) {
|
|||||||
t.Fatalf("expected 2 pricelists, got %d", count)
|
t.Fatalf("expected 2 pricelists, got %d", count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRunLocalMigrationsDeduplicatesConfigurationVersionsBySpecAndPrice(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "versions_dedup.db")
|
|
||||||
|
|
||||||
local, err := New(dbPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
cfg := &LocalConfiguration{
|
|
||||||
UUID: "dedup-cfg",
|
|
||||||
Name: "Dedup",
|
|
||||||
Items: LocalConfigItems{{LotName: "CPU_A", Quantity: 1, UnitPrice: 100}},
|
|
||||||
ServerCount: 1,
|
|
||||||
SyncStatus: "pending",
|
|
||||||
OriginalUsername: "tester",
|
|
||||||
IsActive: true,
|
|
||||||
}
|
|
||||||
if err := local.SaveConfiguration(cfg); err != nil {
|
|
||||||
t.Fatalf("save seed config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
baseV1Data, err := BuildConfigurationSnapshot(cfg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("build v1 snapshot: %v", err)
|
|
||||||
}
|
|
||||||
baseV1 := LocalConfigurationVersion{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
ConfigurationUUID: cfg.UUID,
|
|
||||||
VersionNo: 1,
|
|
||||||
Data: baseV1Data,
|
|
||||||
AppVersion: "test",
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
if err := local.DB().Create(&baseV1).Error; err != nil {
|
|
||||||
t.Fatalf("insert base v1: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.DB().Model(&LocalConfiguration{}).
|
|
||||||
Where("uuid = ?", cfg.UUID).
|
|
||||||
Update("current_version_id", baseV1.ID).Error; err != nil {
|
|
||||||
t.Fatalf("set current_version_id to v1: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v2 := LocalConfigurationVersion{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
ConfigurationUUID: cfg.UUID,
|
|
||||||
VersionNo: 2,
|
|
||||||
Data: baseV1.Data,
|
|
||||||
AppVersion: "test",
|
|
||||||
CreatedAt: time.Now().Add(1 * time.Second),
|
|
||||||
}
|
|
||||||
if err := local.DB().Create(&v2).Error; err != nil {
|
|
||||||
t.Fatalf("insert duplicate v2: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
modified := *cfg
|
|
||||||
modified.Items = LocalConfigItems{{LotName: "CPU_A", Quantity: 2, UnitPrice: 100}}
|
|
||||||
total := modified.Items.Total()
|
|
||||||
modified.TotalPrice = &total
|
|
||||||
modified.UpdatedAt = time.Now()
|
|
||||||
v3Data, err := BuildConfigurationSnapshot(&modified)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("build v3 snapshot: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v3 := LocalConfigurationVersion{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
ConfigurationUUID: cfg.UUID,
|
|
||||||
VersionNo: 3,
|
|
||||||
Data: v3Data,
|
|
||||||
AppVersion: "test",
|
|
||||||
CreatedAt: time.Now().Add(2 * time.Second),
|
|
||||||
}
|
|
||||||
if err := local.DB().Create(&v3).Error; err != nil {
|
|
||||||
t.Fatalf("insert v3: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v4 := LocalConfigurationVersion{
|
|
||||||
ID: uuid.NewString(),
|
|
||||||
ConfigurationUUID: cfg.UUID,
|
|
||||||
VersionNo: 4,
|
|
||||||
Data: v3Data,
|
|
||||||
AppVersion: "test",
|
|
||||||
CreatedAt: time.Now().Add(3 * time.Second),
|
|
||||||
}
|
|
||||||
if err := local.DB().Create(&v4).Error; err != nil {
|
|
||||||
t.Fatalf("insert duplicate v4: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := local.DB().Model(&LocalConfiguration{}).
|
|
||||||
Where("uuid = ?", cfg.UUID).
|
|
||||||
Update("current_version_id", v4.ID).Error; err != nil {
|
|
||||||
t.Fatalf("point current_version_id to duplicate v4: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := local.DB().Where("id = ?", "2026_02_19_configuration_versions_dedup_spec_price").
|
|
||||||
Delete(&LocalSchemaMigration{}).Error; err != nil {
|
|
||||||
t.Fatalf("delete dedup migration record: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := runLocalMigrations(local.DB()); err != nil {
|
|
||||||
t.Fatalf("rerun local migrations: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var versions []LocalConfigurationVersion
|
|
||||||
if err := local.DB().Where("configuration_uuid = ?", cfg.UUID).
|
|
||||||
Order("version_no ASC").
|
|
||||||
Find(&versions).Error; err != nil {
|
|
||||||
t.Fatalf("load versions after dedup: %v", err)
|
|
||||||
}
|
|
||||||
if len(versions) != 2 {
|
|
||||||
t.Fatalf("expected 2 versions after dedup, got %d", len(versions))
|
|
||||||
}
|
|
||||||
if versions[0].VersionNo != 1 || versions[1].VersionNo != 3 {
|
|
||||||
t.Fatalf("expected kept version numbers [1,3], got [%d,%d]", versions[0].VersionNo, versions[1].VersionNo)
|
|
||||||
}
|
|
||||||
|
|
||||||
var after LocalConfiguration
|
|
||||||
if err := local.DB().Where("uuid = ?", cfg.UUID).First(&after).Error; err != nil {
|
|
||||||
t.Fatalf("load config after dedup: %v", err)
|
|
||||||
}
|
|
||||||
if after.CurrentVersionID == nil || *after.CurrentVersionID != v3.ID {
|
|
||||||
t.Fatalf("expected current_version_id to point to kept latest version v3")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunLocalMigrationsBackfillsConfigurationLineNo(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "line_no_backfill.db")
|
|
||||||
|
|
||||||
local, err := New(dbPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
projectUUID := "project-line"
|
|
||||||
cfg1 := &LocalConfiguration{
|
|
||||||
UUID: "line-cfg-1",
|
|
||||||
ProjectUUID: &projectUUID,
|
|
||||||
Name: "Cfg 1",
|
|
||||||
Items: LocalConfigItems{},
|
|
||||||
SyncStatus: "pending",
|
|
||||||
OriginalUsername: "tester",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
|
||||||
}
|
|
||||||
cfg2 := &LocalConfiguration{
|
|
||||||
UUID: "line-cfg-2",
|
|
||||||
ProjectUUID: &projectUUID,
|
|
||||||
Name: "Cfg 2",
|
|
||||||
Items: LocalConfigItems{},
|
|
||||||
SyncStatus: "pending",
|
|
||||||
OriginalUsername: "tester",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: time.Now().Add(-1 * time.Hour),
|
|
||||||
}
|
|
||||||
if err := local.SaveConfiguration(cfg1); err != nil {
|
|
||||||
t.Fatalf("save cfg1: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveConfiguration(cfg2); err != nil {
|
|
||||||
t.Fatalf("save cfg2: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := local.DB().Model(&LocalConfiguration{}).Where("uuid IN ?", []string{cfg1.UUID, cfg2.UUID}).Update("line_no", 0).Error; err != nil {
|
|
||||||
t.Fatalf("reset line_no: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.DB().Where("id = ?", "2026_02_19_local_config_line_no").Delete(&LocalSchemaMigration{}).Error; err != nil {
|
|
||||||
t.Fatalf("delete migration record: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := runLocalMigrations(local.DB()); err != nil {
|
|
||||||
t.Fatalf("rerun local migrations: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var rows []LocalConfiguration
|
|
||||||
if err := local.DB().Where("uuid IN ?", []string{cfg1.UUID, cfg2.UUID}).Order("created_at ASC").Find(&rows).Error; err != nil {
|
|
||||||
t.Fatalf("load configurations: %v", err)
|
|
||||||
}
|
|
||||||
if len(rows) != 2 {
|
|
||||||
t.Fatalf("expected 2 configurations, got %d", len(rows))
|
|
||||||
}
|
|
||||||
if rows[0].Line != 10 || rows[1].Line != 20 {
|
|
||||||
t.Fatalf("expected line_no [10,20], got [%d,%d]", rows[0].Line, rows[1].Line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRunLocalMigrationsDeduplicatesCanonicalPartnumberCatalog(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "partnumber_catalog_dedup.db")
|
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open sqlite: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
firstLots := LocalPartnumberBookLots{
|
|
||||||
{LotName: "LOT-A", Qty: 1},
|
|
||||||
}
|
|
||||||
secondLots := LocalPartnumberBookLots{
|
|
||||||
{LotName: "LOT-B", Qty: 2},
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Exec(`
|
|
||||||
CREATE TABLE local_partnumber_book_items (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
partnumber TEXT NOT NULL,
|
|
||||||
lots_json TEXT NOT NULL,
|
|
||||||
description TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("create dirty local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Create(&LocalPartnumberBookItem{
|
|
||||||
Partnumber: "PN-001",
|
|
||||||
LotsJSON: firstLots,
|
|
||||||
Description: "",
|
|
||||||
}).Error; err != nil {
|
|
||||||
t.Fatalf("insert first duplicate row: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Create(&LocalPartnumberBookItem{
|
|
||||||
Partnumber: "PN-001",
|
|
||||||
LotsJSON: secondLots,
|
|
||||||
Description: "Canonical description",
|
|
||||||
}).Error; err != nil {
|
|
||||||
t.Fatalf("insert second duplicate row: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := migrateLocalPartnumberBookCatalog(db); err != nil {
|
|
||||||
t.Fatalf("migrate local partnumber catalog: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []LocalPartnumberBookItem
|
|
||||||
if err := db.Order("partnumber ASC").Find(&items).Error; err != nil {
|
|
||||||
t.Fatalf("load migrated partnumber items: %v", err)
|
|
||||||
}
|
|
||||||
if len(items) != 1 {
|
|
||||||
t.Fatalf("expected 1 deduplicated item, got %d", len(items))
|
|
||||||
}
|
|
||||||
if items[0].Partnumber != "PN-001" {
|
|
||||||
t.Fatalf("unexpected partnumber: %s", items[0].Partnumber)
|
|
||||||
}
|
|
||||||
if items[0].Description != "Canonical description" {
|
|
||||||
t.Fatalf("expected merged description, got %q", items[0].Description)
|
|
||||||
}
|
|
||||||
if len(items[0].LotsJSON) != 2 {
|
|
||||||
t.Fatalf("expected merged lots from duplicates, got %d", len(items[0].LotsJSON))
|
|
||||||
}
|
|
||||||
|
|
||||||
var duplicateCount int64
|
|
||||||
if err := db.Model(&LocalPartnumberBookItem{}).
|
|
||||||
Where("partnumber = ?", "PN-001").
|
|
||||||
Count(&duplicateCount).Error; err != nil {
|
|
||||||
t.Fatalf("count deduplicated partnumber: %v", err)
|
|
||||||
}
|
|
||||||
if duplicateCount != 1 {
|
|
||||||
t.Fatalf("expected unique partnumber row after migration, got %d", duplicateCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSanitizeLocalPartnumberBookCatalogRemovesRowsWithoutPartnumber(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "sanitize_partnumber_catalog.db")
|
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open sqlite: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Exec(`
|
|
||||||
CREATE TABLE local_partnumber_book_items (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
partnumber TEXT NULL,
|
|
||||||
lots_json TEXT NOT NULL,
|
|
||||||
description TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("create local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Exec(`
|
|
||||||
INSERT INTO local_partnumber_book_items (partnumber, lots_json, description) VALUES
|
|
||||||
(NULL, '[]', 'null pn'),
|
|
||||||
('', '[]', 'empty pn'),
|
|
||||||
('PN-OK', '[]', 'valid pn')
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("seed local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sanitizeLocalPartnumberBookCatalog(db); err != nil {
|
|
||||||
t.Fatalf("sanitize local partnumber catalog: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []LocalPartnumberBookItem
|
|
||||||
if err := db.Order("id ASC").Find(&items).Error; err != nil {
|
|
||||||
t.Fatalf("load sanitized items: %v", err)
|
|
||||||
}
|
|
||||||
if len(items) != 1 {
|
|
||||||
t.Fatalf("expected 1 valid item after sanitize, got %d", len(items))
|
|
||||||
}
|
|
||||||
if items[0].Partnumber != "PN-OK" {
|
|
||||||
t.Fatalf("expected remaining partnumber PN-OK, got %q", items[0].Partnumber)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewMigratesLegacyPartnumberBookCatalogBeforeAutoMigrate(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "legacy_partnumber_catalog.db")
|
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open sqlite: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Exec(`
|
|
||||||
CREATE TABLE local_partnumber_book_items (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
partnumber TEXT NOT NULL UNIQUE,
|
|
||||||
lots_json TEXT NOT NULL,
|
|
||||||
is_primary_pn INTEGER NOT NULL DEFAULT 0,
|
|
||||||
description TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("create legacy local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Exec(`
|
|
||||||
INSERT INTO local_partnumber_book_items (partnumber, lots_json, is_primary_pn, description)
|
|
||||||
VALUES ('PN-001', '[{"lot_name":"CPU_A","qty":1}]', 0, 'Legacy row')
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("seed legacy local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
local, err := New(dbPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb with legacy catalog: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
var columns []struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
if err := local.DB().Raw(`SELECT name FROM pragma_table_info('local_partnumber_book_items')`).Scan(&columns).Error; err != nil {
|
|
||||||
t.Fatalf("load local_partnumber_book_items columns: %v", err)
|
|
||||||
}
|
|
||||||
for _, column := range columns {
|
|
||||||
if column.Name == "is_primary_pn" {
|
|
||||||
t.Fatalf("expected legacy is_primary_pn column to be removed before automigrate")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []LocalPartnumberBookItem
|
|
||||||
if err := local.DB().Find(&items).Error; err != nil {
|
|
||||||
t.Fatalf("load migrated local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
if len(items) != 1 || items[0].Partnumber != "PN-001" {
|
|
||||||
t.Fatalf("unexpected migrated rows: %#v", items)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewRecoversBrokenPartnumberBookCatalogCache(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "broken_partnumber_catalog.db")
|
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open sqlite: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Exec(`
|
|
||||||
CREATE TABLE local_partnumber_book_items (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
partnumber TEXT NOT NULL UNIQUE,
|
|
||||||
lots_json TEXT NOT NULL,
|
|
||||||
description TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("create broken local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Exec(`
|
|
||||||
INSERT INTO local_partnumber_book_items (partnumber, lots_json, description)
|
|
||||||
VALUES ('PN-001', '{not-json}', 'Broken cache row')
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("seed broken local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
local, err := New(dbPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb with broken catalog cache: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
var count int64
|
|
||||||
if err := local.DB().Model(&LocalPartnumberBookItem{}).Count(&count).Error; err != nil {
|
|
||||||
t.Fatalf("count recovered local_partnumber_book_items: %v", err)
|
|
||||||
}
|
|
||||||
if count != 0 {
|
|
||||||
t.Fatalf("expected empty recovered local_partnumber_book_items, got %d rows", count)
|
|
||||||
}
|
|
||||||
|
|
||||||
var quarantineTables []struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
if err := local.DB().Raw(`
|
|
||||||
SELECT name
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE type = 'table' AND name LIKE 'local_partnumber_book_items_broken_%'
|
|
||||||
`).Scan(&quarantineTables).Error; err != nil {
|
|
||||||
t.Fatalf("load quarantine tables: %v", err)
|
|
||||||
}
|
|
||||||
if len(quarantineTables) != 1 {
|
|
||||||
t.Fatalf("expected one quarantined broken catalog table, got %d", len(quarantineTables))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCleanupStaleReadOnlyCacheTempTablesDropsShadowTempWhenBaseExists(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "stale_cache_temp.db")
|
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
|
||||||
Logger: logger.Default.LogMode(logger.Silent),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open sqlite: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Exec(`
|
|
||||||
CREATE TABLE local_pricelist_items (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
pricelist_id INTEGER NOT NULL,
|
|
||||||
partnumber TEXT,
|
|
||||||
brand TEXT NOT NULL DEFAULT '',
|
|
||||||
lot_name TEXT NOT NULL,
|
|
||||||
description TEXT,
|
|
||||||
price REAL NOT NULL DEFAULT 0,
|
|
||||||
quantity INTEGER NOT NULL DEFAULT 0,
|
|
||||||
reserve INTEGER NOT NULL DEFAULT 0,
|
|
||||||
available_qty REAL,
|
|
||||||
partnumbers TEXT,
|
|
||||||
lot_category TEXT,
|
|
||||||
created_at DATETIME,
|
|
||||||
updated_at DATETIME
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("create local_pricelist_items: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Exec(`
|
|
||||||
CREATE TABLE local_pricelist_items__temp (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
legacy TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
t.Fatalf("create local_pricelist_items__temp: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cleanupStaleReadOnlyCacheTempTables(db); err != nil {
|
|
||||||
t.Fatalf("cleanup stale read-only cache temp tables: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.Migrator().HasTable("local_pricelist_items__temp") {
|
|
||||||
t.Fatalf("expected stale temp table to be dropped")
|
|
||||||
}
|
|
||||||
if !db.Migrator().HasTable("local_pricelist_items") {
|
|
||||||
t.Fatalf("expected base local_pricelist_items table to remain")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -51,8 +51,8 @@ func TestRunLocalMigrationsBackfillsDefaultProject(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("get system project: %v", err)
|
t.Fatalf("get system project: %v", err)
|
||||||
}
|
}
|
||||||
if project.Name == nil || *project.Name != "Без проекта" {
|
if project.Name != "Без проекта" {
|
||||||
t.Fatalf("expected system project name, got %v", project.Name)
|
t.Fatalf("expected system project name, got %q", project.Name)
|
||||||
}
|
}
|
||||||
if !project.IsSystem {
|
if !project.IsSystem {
|
||||||
t.Fatalf("expected system project flag")
|
t.Fatalf("expected system project flag")
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -59,79 +58,6 @@ var localMigrations = []localMigration{
|
|||||||
name: "Backfill source for local pricelists and create source indexes",
|
name: "Backfill source for local pricelists and create source indexes",
|
||||||
run: backfillLocalPricelistSource,
|
run: backfillLocalPricelistSource,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: "2026_02_09_drop_component_unused_fields",
|
|
||||||
name: "Remove current_price and synced_at from local_components (unused fields)",
|
|
||||||
run: dropComponentUnusedFields,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_09_add_warehouse_competitor_pricelists",
|
|
||||||
name: "Add warehouse_pricelist_id and competitor_pricelist_id to local_configurations",
|
|
||||||
run: addWarehouseCompetitorPriceLists,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_11_local_pricelist_item_category",
|
|
||||||
name: "Add lot_category to local_pricelist_items and create indexes",
|
|
||||||
run: addLocalPricelistItemCategoryAndIndexes,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_11_local_config_article",
|
|
||||||
name: "Add article to local_configurations",
|
|
||||||
run: addLocalConfigurationArticle,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_11_local_config_server_model",
|
|
||||||
name: "Add server_model to local_configurations",
|
|
||||||
run: addLocalConfigurationServerModel,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_11_local_config_support_code",
|
|
||||||
name: "Add support_code to local_configurations",
|
|
||||||
run: addLocalConfigurationSupportCode,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_13_local_project_code",
|
|
||||||
name: "Add project code to local_projects and backfill",
|
|
||||||
run: addLocalProjectCode,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_13_local_project_variant",
|
|
||||||
name: "Add project variant to local_projects and backfill",
|
|
||||||
run: addLocalProjectVariant,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_13_local_project_name_nullable",
|
|
||||||
name: "Allow NULL project names in local_projects",
|
|
||||||
run: allowLocalProjectNameNull,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_19_configuration_versions_dedup_spec_price",
|
|
||||||
name: "Deduplicate configuration revisions by spec+price",
|
|
||||||
run: deduplicateConfigurationVersionsBySpecAndPrice,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_02_19_local_config_line_no",
|
|
||||||
name: "Add line_no to local_configurations and backfill ordering",
|
|
||||||
run: addLocalConfigurationLineNo,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_03_07_local_partnumber_book_catalog",
|
|
||||||
name: "Convert local partnumber book cache to book membership + deduplicated PN catalog",
|
|
||||||
run: migrateLocalPartnumberBookCatalog,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
id: "2026_03_13_pricelist_items_dedup_unique",
|
|
||||||
name: "Deduplicate local_pricelist_items and add unique index on (pricelist_id, lot_name)",
|
|
||||||
run: deduplicatePricelistItemsAndAddUniqueIndex,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type localPartnumberCatalogRow struct {
|
|
||||||
Partnumber string
|
|
||||||
LotsJSON LocalPartnumberBookLots
|
|
||||||
Description string
|
|
||||||
CreatedAt time.Time
|
|
||||||
ServerID int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runLocalMigrations(db *gorm.DB) error {
|
func runLocalMigrations(db *gorm.DB) error {
|
||||||
@@ -268,8 +194,7 @@ func ensureDefaultProjectTx(tx *gorm.DB, ownerUsername string) (*LocalProject, e
|
|||||||
project = LocalProject{
|
project = LocalProject{
|
||||||
UUID: uuid.NewString(),
|
UUID: uuid.NewString(),
|
||||||
OwnerUsername: ownerUsername,
|
OwnerUsername: ownerUsername,
|
||||||
Code: "Без проекта",
|
Name: "Без проекта",
|
||||||
Name: ptrString("Без проекта"),
|
|
||||||
IsActive: true,
|
IsActive: true,
|
||||||
IsSystem: true,
|
IsSystem: true,
|
||||||
CreatedAt: now,
|
CreatedAt: now,
|
||||||
@@ -283,139 +208,6 @@ func ensureDefaultProjectTx(tx *gorm.DB, ownerUsername string) (*LocalProject, e
|
|||||||
return &project, nil
|
return &project, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addLocalProjectCode(tx *gorm.DB) error {
|
|
||||||
if err := tx.Exec(`ALTER TABLE local_projects ADD COLUMN code TEXT`).Error; err != nil {
|
|
||||||
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") &&
|
|
||||||
!strings.Contains(strings.ToLower(err.Error()), "exists") {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop unique index if it already exists to allow de-duplication updates.
|
|
||||||
if err := tx.Exec(`DROP INDEX IF EXISTS idx_local_projects_code`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy code from current project name.
|
|
||||||
if err := tx.Exec(`
|
|
||||||
UPDATE local_projects
|
|
||||||
SET code = TRIM(COALESCE(name, ''))`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure any remaining blanks have a unique fallback.
|
|
||||||
if err := tx.Exec(`
|
|
||||||
UPDATE local_projects
|
|
||||||
SET code = 'P-' || uuid
|
|
||||||
WHERE code IS NULL OR TRIM(code) = ''`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// De-duplicate codes: OPS-1948-2, OPS-1948-3...
|
|
||||||
if err := tx.Exec(`
|
|
||||||
WITH ranked AS (
|
|
||||||
SELECT id, code,
|
|
||||||
ROW_NUMBER() OVER (PARTITION BY code ORDER BY id) AS rn
|
|
||||||
FROM local_projects
|
|
||||||
)
|
|
||||||
UPDATE local_projects
|
|
||||||
SET code = code || '-' || (SELECT rn FROM ranked WHERE ranked.id = local_projects.id)
|
|
||||||
WHERE id IN (SELECT id FROM ranked WHERE rn > 1)`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create unique index for project codes (ignore if exists).
|
|
||||||
if err := tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_projects_code ON local_projects(code)`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addLocalProjectVariant(tx *gorm.DB) error {
|
|
||||||
if err := tx.Exec(`ALTER TABLE local_projects ADD COLUMN variant TEXT NOT NULL DEFAULT ''`).Error; err != nil {
|
|
||||||
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") &&
|
|
||||||
!strings.Contains(strings.ToLower(err.Error()), "exists") {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop legacy code index if present.
|
|
||||||
if err := tx.Exec(`DROP INDEX IF EXISTS idx_local_projects_code`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset code from name and clear variant.
|
|
||||||
if err := tx.Exec(`
|
|
||||||
UPDATE local_projects
|
|
||||||
SET code = TRIM(COALESCE(name, '')),
|
|
||||||
variant = ''`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// De-duplicate by assigning variant numbers: 2,3...
|
|
||||||
if err := tx.Exec(`
|
|
||||||
WITH ranked AS (
|
|
||||||
SELECT id, code,
|
|
||||||
ROW_NUMBER() OVER (PARTITION BY code ORDER BY id) AS rn
|
|
||||||
FROM local_projects
|
|
||||||
)
|
|
||||||
UPDATE local_projects
|
|
||||||
SET variant = CASE
|
|
||||||
WHEN (SELECT rn FROM ranked WHERE ranked.id = local_projects.id) = 1 THEN ''
|
|
||||||
ELSE '-' || CAST((SELECT rn FROM ranked WHERE ranked.id = local_projects.id) AS TEXT)
|
|
||||||
END`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_projects_code_variant ON local_projects(code, variant)`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func allowLocalProjectNameNull(tx *gorm.DB) error {
|
|
||||||
if err := tx.Exec(`ALTER TABLE local_projects RENAME TO local_projects_old`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE TABLE local_projects (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
uuid TEXT NOT NULL UNIQUE,
|
|
||||||
server_id INTEGER NULL,
|
|
||||||
owner_username TEXT NOT NULL,
|
|
||||||
code TEXT NOT NULL,
|
|
||||||
variant TEXT NOT NULL DEFAULT '',
|
|
||||||
name TEXT NULL,
|
|
||||||
tracker_url TEXT NULL,
|
|
||||||
is_active INTEGER NOT NULL DEFAULT 1,
|
|
||||||
is_system INTEGER NOT NULL DEFAULT 0,
|
|
||||||
created_at DATETIME,
|
|
||||||
updated_at DATETIME,
|
|
||||||
synced_at DATETIME NULL,
|
|
||||||
sync_status TEXT DEFAULT 'local'
|
|
||||||
)`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = tx.Exec(`CREATE INDEX IF NOT EXISTS idx_local_projects_owner_username ON local_projects(owner_username)`).Error
|
|
||||||
_ = tx.Exec(`CREATE INDEX IF NOT EXISTS idx_local_projects_is_active ON local_projects(is_active)`).Error
|
|
||||||
_ = tx.Exec(`CREATE INDEX IF NOT EXISTS idx_local_projects_is_system ON local_projects(is_system)`).Error
|
|
||||||
_ = tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_projects_code_variant ON local_projects(code, variant)`).Error
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
INSERT INTO local_projects (id, uuid, server_id, owner_username, code, variant, name, tracker_url, is_active, is_system, created_at, updated_at, synced_at, sync_status)
|
|
||||||
SELECT id, uuid, server_id, owner_username, code, variant, name, tracker_url, is_active, is_system, created_at, updated_at, synced_at, sync_status
|
|
||||||
FROM local_projects_old`).Error; err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = tx.Exec(`DROP TABLE local_projects_old`).Error
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func backfillConfigurationPricelists(tx *gorm.DB) error {
|
func backfillConfigurationPricelists(tx *gorm.DB) error {
|
||||||
var latest LocalPricelist
|
var latest LocalPricelist
|
||||||
if err := tx.Where("source = ?", "estimate").Order("created_at DESC").First(&latest).Error; err != nil {
|
if err := tx.Where("source = ?", "estimate").Order("created_at DESC").First(&latest).Error; err != nil {
|
||||||
@@ -457,93 +249,6 @@ func chooseNonZeroTime(candidate time.Time, fallback time.Time) time.Time {
|
|||||||
return candidate
|
return candidate
|
||||||
}
|
}
|
||||||
|
|
||||||
func deduplicateConfigurationVersionsBySpecAndPrice(tx *gorm.DB) error {
|
|
||||||
var configs []LocalConfiguration
|
|
||||||
if err := tx.Select("uuid", "current_version_id").Find(&configs).Error; err != nil {
|
|
||||||
return fmt.Errorf("load configurations for revision deduplication: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var removedTotal int
|
|
||||||
for i := range configs {
|
|
||||||
cfg := configs[i]
|
|
||||||
|
|
||||||
var versions []LocalConfigurationVersion
|
|
||||||
if err := tx.Where("configuration_uuid = ?", cfg.UUID).
|
|
||||||
Order("version_no ASC, created_at ASC").
|
|
||||||
Find(&versions).Error; err != nil {
|
|
||||||
return fmt.Errorf("load versions for %s: %w", cfg.UUID, err)
|
|
||||||
}
|
|
||||||
if len(versions) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteIDs := make([]string, 0)
|
|
||||||
deleteSet := make(map[string]struct{})
|
|
||||||
kept := make([]LocalConfigurationVersion, 0, len(versions))
|
|
||||||
var prevKey string
|
|
||||||
hasPrev := false
|
|
||||||
|
|
||||||
for _, version := range versions {
|
|
||||||
snapshotCfg, err := DecodeConfigurationSnapshot(version.Data)
|
|
||||||
if err != nil {
|
|
||||||
// Keep malformed snapshots untouched and reset chain to avoid accidental removals.
|
|
||||||
kept = append(kept, version)
|
|
||||||
hasPrev = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := BuildConfigurationSpecPriceFingerprint(snapshotCfg)
|
|
||||||
if err != nil {
|
|
||||||
kept = append(kept, version)
|
|
||||||
hasPrev = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasPrev || key != prevKey {
|
|
||||||
kept = append(kept, version)
|
|
||||||
prevKey = key
|
|
||||||
hasPrev = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteIDs = append(deleteIDs, version.ID)
|
|
||||||
deleteSet[version.ID] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(deleteIDs) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Where("id IN ?", deleteIDs).Delete(&LocalConfigurationVersion{}).Error; err != nil {
|
|
||||||
return fmt.Errorf("delete duplicate versions for %s: %w", cfg.UUID, err)
|
|
||||||
}
|
|
||||||
removedTotal += len(deleteIDs)
|
|
||||||
|
|
||||||
latestKeptID := kept[len(kept)-1].ID
|
|
||||||
if cfg.CurrentVersionID == nil || *cfg.CurrentVersionID == "" {
|
|
||||||
if err := tx.Model(&LocalConfiguration{}).
|
|
||||||
Where("uuid = ?", cfg.UUID).
|
|
||||||
Update("current_version_id", latestKeptID).Error; err != nil {
|
|
||||||
return fmt.Errorf("set missing current_version_id for %s: %w", cfg.UUID, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, deleted := deleteSet[*cfg.CurrentVersionID]; deleted {
|
|
||||||
if err := tx.Model(&LocalConfiguration{}).
|
|
||||||
Where("uuid = ?", cfg.UUID).
|
|
||||||
Update("current_version_id", latestKeptID).Error; err != nil {
|
|
||||||
return fmt.Errorf("repair current_version_id for %s: %w", cfg.UUID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if removedTotal > 0 {
|
|
||||||
slog.Info("deduplicated configuration revisions", "removed_versions", removedTotal)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fixLocalPricelistIndexes(tx *gorm.DB) error {
|
func fixLocalPricelistIndexes(tx *gorm.DB) error {
|
||||||
type indexRow struct {
|
type indexRow struct {
|
||||||
Name string `gorm:"column:name"`
|
Name string `gorm:"column:name"`
|
||||||
@@ -611,512 +316,3 @@ func backfillLocalPricelistSource(tx *gorm.DB) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dropComponentUnusedFields(tx *gorm.DB) error {
|
|
||||||
// Check if columns exist
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_components')
|
|
||||||
WHERE name IN ('current_price', 'synced_at')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check columns existence: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(columns) == 0 {
|
|
||||||
slog.Info("unused fields already removed from local_components")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SQLite: recreate table without current_price and synced_at
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE TABLE local_components_new (
|
|
||||||
lot_name TEXT PRIMARY KEY,
|
|
||||||
lot_description TEXT,
|
|
||||||
category TEXT,
|
|
||||||
model TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("create new local_components table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
INSERT INTO local_components_new (lot_name, lot_description, category, model)
|
|
||||||
SELECT lot_name, lot_description, category, model
|
|
||||||
FROM local_components
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("copy data to new table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`DROP TABLE local_components`).Error; err != nil {
|
|
||||||
return fmt.Errorf("drop old table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`ALTER TABLE local_components_new RENAME TO local_components`).Error; err != nil {
|
|
||||||
return fmt.Errorf("rename new table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Info("dropped current_price and synced_at columns from local_components")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addWarehouseCompetitorPriceLists(tx *gorm.DB) error {
|
|
||||||
// Check if columns exist
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_configurations')
|
|
||||||
WHERE name IN ('warehouse_pricelist_id', 'competitor_pricelist_id')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check columns existence: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(columns) == 2 {
|
|
||||||
slog.Info("warehouse and competitor pricelist columns already exist")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add columns if they don't exist
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_configurations
|
|
||||||
ADD COLUMN warehouse_pricelist_id INTEGER
|
|
||||||
`).Error; err != nil {
|
|
||||||
// Column might already exist, ignore
|
|
||||||
if !strings.Contains(err.Error(), "duplicate column") {
|
|
||||||
return fmt.Errorf("add warehouse_pricelist_id column: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_configurations
|
|
||||||
ADD COLUMN competitor_pricelist_id INTEGER
|
|
||||||
`).Error; err != nil {
|
|
||||||
// Column might already exist, ignore
|
|
||||||
if !strings.Contains(err.Error(), "duplicate column") {
|
|
||||||
return fmt.Errorf("add competitor_pricelist_id column: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create indexes
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_local_configurations_warehouse_pricelist
|
|
||||||
ON local_configurations(warehouse_pricelist_id)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("create warehouse pricelist index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_local_configurations_competitor_pricelist
|
|
||||||
ON local_configurations(competitor_pricelist_id)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("create competitor pricelist index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Info("added warehouse and competitor pricelist fields to local_configurations")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addLocalPricelistItemCategoryAndIndexes(tx *gorm.DB) error {
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_pricelist_items')
|
|
||||||
WHERE name IN ('lot_category')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check local_pricelist_items(lot_category) existence: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(columns) == 0 {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_pricelist_items
|
|
||||||
ADD COLUMN lot_category TEXT
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("add local_pricelist_items.lot_category: %w", err)
|
|
||||||
}
|
|
||||||
slog.Info("added lot_category to local_pricelist_items")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_local_pricelist_items_pricelist_lot
|
|
||||||
ON local_pricelist_items(pricelist_id, lot_name)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("ensure idx_local_pricelist_items_pricelist_lot: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_local_pricelist_items_lot_category
|
|
||||||
ON local_pricelist_items(lot_category)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("ensure idx_local_pricelist_items_lot_category: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addLocalConfigurationArticle(tx *gorm.DB) error {
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_configurations')
|
|
||||||
WHERE name IN ('article')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check local_configurations(article) existence: %w", err)
|
|
||||||
}
|
|
||||||
if len(columns) == 0 {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_configurations
|
|
||||||
ADD COLUMN article TEXT
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("add local_configurations.article: %w", err)
|
|
||||||
}
|
|
||||||
slog.Info("added article to local_configurations")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addLocalConfigurationServerModel(tx *gorm.DB) error {
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_configurations')
|
|
||||||
WHERE name IN ('server_model')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check local_configurations(server_model) existence: %w", err)
|
|
||||||
}
|
|
||||||
if len(columns) == 0 {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_configurations
|
|
||||||
ADD COLUMN server_model TEXT
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("add local_configurations.server_model: %w", err)
|
|
||||||
}
|
|
||||||
slog.Info("added server_model to local_configurations")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addLocalConfigurationSupportCode(tx *gorm.DB) error {
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_configurations')
|
|
||||||
WHERE name IN ('support_code')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check local_configurations(support_code) existence: %w", err)
|
|
||||||
}
|
|
||||||
if len(columns) == 0 {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_configurations
|
|
||||||
ADD COLUMN support_code TEXT
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("add local_configurations.support_code: %w", err)
|
|
||||||
}
|
|
||||||
slog.Info("added support_code to local_configurations")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addLocalConfigurationLineNo(tx *gorm.DB) error {
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
var columns []columnInfo
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT name FROM pragma_table_info('local_configurations')
|
|
||||||
WHERE name IN ('line_no')
|
|
||||||
`).Scan(&columns).Error; err != nil {
|
|
||||||
return fmt.Errorf("check local_configurations(line_no) existence: %w", err)
|
|
||||||
}
|
|
||||||
if len(columns) == 0 {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
ALTER TABLE local_configurations
|
|
||||||
ADD COLUMN line_no INTEGER
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("add local_configurations.line_no: %w", err)
|
|
||||||
}
|
|
||||||
slog.Info("added line_no to local_configurations")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
WITH ranked AS (
|
|
||||||
SELECT
|
|
||||||
id,
|
|
||||||
ROW_NUMBER() OVER (
|
|
||||||
PARTITION BY COALESCE(NULLIF(TRIM(project_uuid), ''), '__NO_PROJECT__')
|
|
||||||
ORDER BY created_at ASC, id ASC
|
|
||||||
) AS rn
|
|
||||||
FROM local_configurations
|
|
||||||
WHERE line_no IS NULL OR line_no <= 0
|
|
||||||
)
|
|
||||||
UPDATE local_configurations
|
|
||||||
SET line_no = (
|
|
||||||
SELECT rn * 10
|
|
||||||
FROM ranked
|
|
||||||
WHERE ranked.id = local_configurations.id
|
|
||||||
)
|
|
||||||
WHERE id IN (SELECT id FROM ranked)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("backfill local_configurations.line_no: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_local_configurations_project_line_no
|
|
||||||
ON local_configurations(project_uuid, line_no)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("ensure idx_local_configurations_project_line_no: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func migrateLocalPartnumberBookCatalog(tx *gorm.DB) error {
|
|
||||||
type columnInfo struct {
|
|
||||||
Name string `gorm:"column:name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
hasBooksTable := tx.Migrator().HasTable(&LocalPartnumberBook{})
|
|
||||||
hasItemsTable := tx.Migrator().HasTable(&LocalPartnumberBookItem{})
|
|
||||||
if !hasItemsTable {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasBooksTable {
|
|
||||||
var bookCols []columnInfo
|
|
||||||
if err := tx.Raw(`SELECT name FROM pragma_table_info('local_partnumber_books')`).Scan(&bookCols).Error; err != nil {
|
|
||||||
return fmt.Errorf("load local_partnumber_books columns: %w", err)
|
|
||||||
}
|
|
||||||
hasPartnumbersJSON := false
|
|
||||||
for _, c := range bookCols {
|
|
||||||
if c.Name == "partnumbers_json" {
|
|
||||||
hasPartnumbersJSON = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasPartnumbersJSON {
|
|
||||||
if err := tx.Exec(`ALTER TABLE local_partnumber_books ADD COLUMN partnumbers_json TEXT NOT NULL DEFAULT '[]'`).Error; err != nil {
|
|
||||||
return fmt.Errorf("add local_partnumber_books.partnumbers_json: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var itemCols []columnInfo
|
|
||||||
if err := tx.Raw(`SELECT name FROM pragma_table_info('local_partnumber_book_items')`).Scan(&itemCols).Error; err != nil {
|
|
||||||
return fmt.Errorf("load local_partnumber_book_items columns: %w", err)
|
|
||||||
}
|
|
||||||
hasBookID := false
|
|
||||||
hasLotName := false
|
|
||||||
hasLotsJSON := false
|
|
||||||
for _, c := range itemCols {
|
|
||||||
if c.Name == "book_id" {
|
|
||||||
hasBookID = true
|
|
||||||
}
|
|
||||||
if c.Name == "lot_name" {
|
|
||||||
hasLotName = true
|
|
||||||
}
|
|
||||||
if c.Name == "lots_json" {
|
|
||||||
hasLotsJSON = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasBookID && !hasLotName && !hasLotsJSON {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type legacyRow struct {
|
|
||||||
BookID uint
|
|
||||||
Partnumber string
|
|
||||||
LotName string
|
|
||||||
Description string
|
|
||||||
CreatedAt time.Time
|
|
||||||
ServerID int
|
|
||||||
}
|
|
||||||
bookPNs := make(map[uint]map[string]struct{})
|
|
||||||
catalog := make(map[string]*localPartnumberCatalogRow)
|
|
||||||
|
|
||||||
if hasBookID || hasLotName {
|
|
||||||
var rows []legacyRow
|
|
||||||
if err := tx.Raw(`
|
|
||||||
SELECT
|
|
||||||
i.book_id,
|
|
||||||
i.partnumber,
|
|
||||||
i.lot_name,
|
|
||||||
COALESCE(i.description, '') AS description,
|
|
||||||
b.created_at,
|
|
||||||
b.server_id
|
|
||||||
FROM local_partnumber_book_items i
|
|
||||||
INNER JOIN local_partnumber_books b ON b.id = i.book_id
|
|
||||||
ORDER BY b.created_at DESC, b.id DESC, i.partnumber ASC, i.id ASC
|
|
||||||
`).Scan(&rows).Error; err != nil {
|
|
||||||
return fmt.Errorf("load legacy local partnumber book items: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, row := range rows {
|
|
||||||
if _, ok := bookPNs[row.BookID]; !ok {
|
|
||||||
bookPNs[row.BookID] = make(map[string]struct{})
|
|
||||||
}
|
|
||||||
bookPNs[row.BookID][row.Partnumber] = struct{}{}
|
|
||||||
|
|
||||||
entry, ok := catalog[row.Partnumber]
|
|
||||||
if !ok {
|
|
||||||
entry = &localPartnumberCatalogRow{
|
|
||||||
Partnumber: row.Partnumber,
|
|
||||||
Description: row.Description,
|
|
||||||
CreatedAt: row.CreatedAt,
|
|
||||||
ServerID: row.ServerID,
|
|
||||||
}
|
|
||||||
catalog[row.Partnumber] = entry
|
|
||||||
}
|
|
||||||
if row.CreatedAt.After(entry.CreatedAt) || (row.CreatedAt.Equal(entry.CreatedAt) && row.ServerID >= entry.ServerID) {
|
|
||||||
entry.Description = row.Description
|
|
||||||
entry.CreatedAt = row.CreatedAt
|
|
||||||
entry.ServerID = row.ServerID
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for i := range entry.LotsJSON {
|
|
||||||
if entry.LotsJSON[i].LotName == row.LotName {
|
|
||||||
entry.LotsJSON[i].Qty += 1
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found && row.LotName != "" {
|
|
||||||
entry.LotsJSON = append(entry.LotsJSON, LocalPartnumberBookLot{LotName: row.LotName, Qty: 1})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var books []LocalPartnumberBook
|
|
||||||
if err := tx.Find(&books).Error; err != nil {
|
|
||||||
return fmt.Errorf("load local partnumber books: %w", err)
|
|
||||||
}
|
|
||||||
for _, book := range books {
|
|
||||||
pnSet := bookPNs[book.ID]
|
|
||||||
partnumbers := make([]string, 0, len(pnSet))
|
|
||||||
for pn := range pnSet {
|
|
||||||
partnumbers = append(partnumbers, pn)
|
|
||||||
}
|
|
||||||
sort.Strings(partnumbers)
|
|
||||||
if err := tx.Model(&LocalPartnumberBook{}).
|
|
||||||
Where("id = ?", book.ID).
|
|
||||||
Update("partnumbers_json", LocalStringList(partnumbers)).Error; err != nil {
|
|
||||||
return fmt.Errorf("update partnumbers_json for local book %d: %w", book.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var items []LocalPartnumberBookItem
|
|
||||||
if err := tx.Order("id DESC").Find(&items).Error; err != nil {
|
|
||||||
return fmt.Errorf("load canonical local partnumber book items: %w", err)
|
|
||||||
}
|
|
||||||
for _, item := range items {
|
|
||||||
entry, ok := catalog[item.Partnumber]
|
|
||||||
if !ok {
|
|
||||||
copiedLots := append(LocalPartnumberBookLots(nil), item.LotsJSON...)
|
|
||||||
catalog[item.Partnumber] = &localPartnumberCatalogRow{
|
|
||||||
Partnumber: item.Partnumber,
|
|
||||||
LotsJSON: copiedLots,
|
|
||||||
Description: item.Description,
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if entry.Description == "" && item.Description != "" {
|
|
||||||
entry.Description = item.Description
|
|
||||||
}
|
|
||||||
for _, lot := range item.LotsJSON {
|
|
||||||
merged := false
|
|
||||||
for i := range entry.LotsJSON {
|
|
||||||
if entry.LotsJSON[i].LotName == lot.LotName {
|
|
||||||
if lot.Qty > entry.LotsJSON[i].Qty {
|
|
||||||
entry.LotsJSON[i].Qty = lot.Qty
|
|
||||||
}
|
|
||||||
merged = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !merged {
|
|
||||||
entry.LotsJSON = append(entry.LotsJSON, lot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rebuildLocalPartnumberBookCatalog(tx, catalog)
|
|
||||||
}
|
|
||||||
|
|
||||||
func rebuildLocalPartnumberBookCatalog(tx *gorm.DB, catalog map[string]*localPartnumberCatalogRow) error {
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE TABLE local_partnumber_book_items_new (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
partnumber TEXT NOT NULL UNIQUE,
|
|
||||||
lots_json TEXT NOT NULL,
|
|
||||||
description TEXT
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("create new local_partnumber_book_items table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
orderedPartnumbers := make([]string, 0, len(catalog))
|
|
||||||
for pn := range catalog {
|
|
||||||
orderedPartnumbers = append(orderedPartnumbers, pn)
|
|
||||||
}
|
|
||||||
sort.Strings(orderedPartnumbers)
|
|
||||||
for _, pn := range orderedPartnumbers {
|
|
||||||
row := catalog[pn]
|
|
||||||
sort.Slice(row.LotsJSON, func(i, j int) bool {
|
|
||||||
return row.LotsJSON[i].LotName < row.LotsJSON[j].LotName
|
|
||||||
})
|
|
||||||
if err := tx.Table("local_partnumber_book_items_new").Create(&LocalPartnumberBookItem{
|
|
||||||
Partnumber: row.Partnumber,
|
|
||||||
LotsJSON: row.LotsJSON,
|
|
||||||
Description: row.Description,
|
|
||||||
}).Error; err != nil {
|
|
||||||
return fmt.Errorf("insert new local_partnumber_book_items row for %s: %w", pn, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Exec(`DROP TABLE local_partnumber_book_items`).Error; err != nil {
|
|
||||||
return fmt.Errorf("drop legacy local_partnumber_book_items: %w", err)
|
|
||||||
}
|
|
||||||
if err := tx.Exec(`ALTER TABLE local_partnumber_book_items_new RENAME TO local_partnumber_book_items`).Error; err != nil {
|
|
||||||
return fmt.Errorf("rename new local_partnumber_book_items table: %w", err)
|
|
||||||
}
|
|
||||||
if err := tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_partnumber_book_items_partnumber ON local_partnumber_book_items(partnumber)`).Error; err != nil {
|
|
||||||
return fmt.Errorf("create local_partnumber_book_items partnumber index: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deduplicatePricelistItemsAndAddUniqueIndex(tx *gorm.DB) error {
|
|
||||||
// Remove duplicate (pricelist_id, lot_name) rows keeping only the row with the lowest id.
|
|
||||||
if err := tx.Exec(`
|
|
||||||
DELETE FROM local_pricelist_items
|
|
||||||
WHERE id NOT IN (
|
|
||||||
SELECT MIN(id) FROM local_pricelist_items
|
|
||||||
GROUP BY pricelist_id, lot_name
|
|
||||||
)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("deduplicate local_pricelist_items: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add unique index to prevent future duplicates.
|
|
||||||
if err := tx.Exec(`
|
|
||||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_local_pricelist_items_pricelist_lot_unique
|
|
||||||
ON local_pricelist_items(pricelist_id, lot_name)
|
|
||||||
`).Error; err != nil {
|
|
||||||
return fmt.Errorf("create unique index on local_pricelist_items: %w", err)
|
|
||||||
}
|
|
||||||
slog.Info("deduplicated local_pricelist_items and added unique index")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -57,65 +57,31 @@ func (c LocalConfigItems) Total() float64 {
|
|||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalStringList is a JSON-encoded list of strings stored as TEXT in SQLite.
|
|
||||||
type LocalStringList []string
|
|
||||||
|
|
||||||
func (s LocalStringList) Value() (driver.Value, error) {
|
|
||||||
return json.Marshal(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *LocalStringList) Scan(value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
*s = make(LocalStringList, 0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var bytes []byte
|
|
||||||
switch v := value.(type) {
|
|
||||||
case []byte:
|
|
||||||
bytes = v
|
|
||||||
case string:
|
|
||||||
bytes = []byte(v)
|
|
||||||
default:
|
|
||||||
return errors.New("type assertion failed for LocalStringList")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(bytes, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalConfiguration stores configurations in local SQLite
|
// LocalConfiguration stores configurations in local SQLite
|
||||||
type LocalConfiguration struct {
|
type LocalConfiguration struct {
|
||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||||
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
||||||
ServerID *uint `json:"server_id"` // ID on MariaDB server, NULL if local only
|
ServerID *uint `json:"server_id"` // ID on MariaDB server, NULL if local only
|
||||||
ProjectUUID *string `gorm:"index" json:"project_uuid,omitempty"`
|
ProjectUUID *string `gorm:"index" json:"project_uuid,omitempty"`
|
||||||
CurrentVersionID *string `gorm:"index" json:"current_version_id,omitempty"`
|
CurrentVersionID *string `gorm:"index" json:"current_version_id,omitempty"`
|
||||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||||
Name string `gorm:"not null" json:"name"`
|
Name string `gorm:"not null" json:"name"`
|
||||||
Items LocalConfigItems `gorm:"type:text" json:"items"` // JSON stored as text in SQLite
|
Items LocalConfigItems `gorm:"type:text" json:"items"` // JSON stored as text in SQLite
|
||||||
TotalPrice *float64 `json:"total_price"`
|
TotalPrice *float64 `json:"total_price"`
|
||||||
CustomPrice *float64 `json:"custom_price"`
|
CustomPrice *float64 `json:"custom_price"`
|
||||||
Notes string `json:"notes"`
|
Notes string `json:"notes"`
|
||||||
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
||||||
ServerCount int `gorm:"default:1" json:"server_count"`
|
ServerCount int `gorm:"default:1" json:"server_count"`
|
||||||
ServerModel string `gorm:"size:100" json:"server_model,omitempty"`
|
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
||||||
SupportCode string `gorm:"size:20" json:"support_code,omitempty"`
|
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
||||||
Article string `gorm:"size:80" json:"article,omitempty"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
WarehousePricelistID *uint `gorm:"index" json:"warehouse_pricelist_id,omitempty"`
|
SyncedAt *time.Time `json:"synced_at"`
|
||||||
CompetitorPricelistID *uint `gorm:"index" json:"competitor_pricelist_id,omitempty"`
|
SyncStatus string `gorm:"default:'local'" json:"sync_status"` // 'local', 'synced', 'modified'
|
||||||
DisablePriceRefresh bool `gorm:"default:false" json:"disable_price_refresh"`
|
OriginalUserID uint `json:"original_user_id"` // UserID from MariaDB for reference
|
||||||
OnlyInStock bool `gorm:"default:false" json:"only_in_stock"`
|
OriginalUsername string `gorm:"not null;default:'';index" json:"original_username"`
|
||||||
VendorSpec VendorSpec `gorm:"type:text" json:"vendor_spec,omitempty"`
|
CurrentVersion *LocalConfigurationVersion `gorm:"foreignKey:CurrentVersionID;references:ID" json:"current_version,omitempty"`
|
||||||
Line int `gorm:"column:line_no;index" json:"line"`
|
Versions []LocalConfigurationVersion `gorm:"foreignKey:ConfigurationUUID;references:UUID" json:"versions,omitempty"`
|
||||||
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
|
||||||
SyncedAt *time.Time `json:"synced_at"`
|
|
||||||
ConfigType string `gorm:"default:server" json:"config_type"` // "server" | "storage"
|
|
||||||
SyncStatus string `gorm:"default:'local'" json:"sync_status"` // 'local', 'synced', 'modified'
|
|
||||||
OriginalUserID uint `json:"original_user_id"` // UserID from MariaDB for reference
|
|
||||||
OriginalUsername string `gorm:"not null;default:'';index" json:"original_username"`
|
|
||||||
CurrentVersion *LocalConfigurationVersion `gorm:"foreignKey:CurrentVersionID;references:ID" json:"current_version,omitempty"`
|
|
||||||
Versions []LocalConfigurationVersion `gorm:"foreignKey:ConfigurationUUID;references:UUID" json:"versions,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (LocalConfiguration) TableName() string {
|
func (LocalConfiguration) TableName() string {
|
||||||
@@ -127,9 +93,7 @@ type LocalProject struct {
|
|||||||
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
||||||
ServerID *uint `json:"server_id,omitempty"`
|
ServerID *uint `json:"server_id,omitempty"`
|
||||||
OwnerUsername string `gorm:"not null;index" json:"owner_username"`
|
OwnerUsername string `gorm:"not null;index" json:"owner_username"`
|
||||||
Code string `gorm:"not null;index:idx_local_projects_code_variant,priority:1" json:"code"`
|
Name string `gorm:"not null" json:"name"`
|
||||||
Variant string `gorm:"default:'';index:idx_local_projects_code_variant,priority:2" json:"variant"`
|
|
||||||
Name *string `json:"name,omitempty"`
|
|
||||||
TrackerURL string `json:"tracker_url"`
|
TrackerURL string `json:"tracker_url"`
|
||||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||||
IsSystem bool `gorm:"default:false;index" json:"is_system"`
|
IsSystem bool `gorm:"default:false;index" json:"is_system"`
|
||||||
@@ -178,47 +142,30 @@ func (LocalPricelist) TableName() string {
|
|||||||
|
|
||||||
// LocalPricelistItem stores pricelist items
|
// LocalPricelistItem stores pricelist items
|
||||||
type LocalPricelistItem struct {
|
type LocalPricelistItem struct {
|
||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||||
PricelistID uint `gorm:"not null;index" json:"pricelist_id"`
|
PricelistID uint `gorm:"not null;index" json:"pricelist_id"`
|
||||||
LotName string `gorm:"not null" json:"lot_name"`
|
LotName string `gorm:"not null" json:"lot_name"`
|
||||||
LotCategory string `gorm:"column:lot_category" json:"lot_category,omitempty"`
|
Price float64 `gorm:"not null" json:"price"`
|
||||||
Price float64 `gorm:"not null" json:"price"`
|
|
||||||
AvailableQty *float64 `json:"available_qty,omitempty"`
|
|
||||||
Partnumbers LocalStringList `gorm:"type:text" json:"partnumbers,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (LocalPricelistItem) TableName() string {
|
func (LocalPricelistItem) TableName() string {
|
||||||
return "local_pricelist_items"
|
return "local_pricelist_items"
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalComponent stores cached components for offline search (metadata only)
|
// LocalComponent stores cached components for offline search
|
||||||
// All pricing is now sourced from local_pricelist_items based on configuration pricelist selection
|
|
||||||
type LocalComponent struct {
|
type LocalComponent struct {
|
||||||
LotName string `gorm:"primaryKey" json:"lot_name"`
|
LotName string `gorm:"primaryKey" json:"lot_name"`
|
||||||
LotDescription string `json:"lot_description"`
|
LotDescription string `json:"lot_description"`
|
||||||
Category string `json:"category"`
|
Category string `json:"category"`
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
CurrentPrice *float64 `json:"current_price"`
|
||||||
|
SyncedAt time.Time `json:"synced_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (LocalComponent) TableName() string {
|
func (LocalComponent) TableName() string {
|
||||||
return "local_components"
|
return "local_components"
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalSyncGuardState stores latest sync readiness decision for UI and preflight checks.
|
|
||||||
type LocalSyncGuardState struct {
|
|
||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
|
||||||
Status string `gorm:"size:32;not null;index" json:"status"` // ready|blocked|unknown
|
|
||||||
ReasonCode string `gorm:"size:128" json:"reason_code,omitempty"`
|
|
||||||
ReasonText string `gorm:"type:text" json:"reason_text,omitempty"`
|
|
||||||
RequiredMinAppVersion *string `gorm:"size:64" json:"required_min_app_version,omitempty"`
|
|
||||||
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (LocalSyncGuardState) TableName() string {
|
|
||||||
return "local_sync_guard_state"
|
|
||||||
}
|
|
||||||
|
|
||||||
// PendingChange stores changes that need to be synced to the server
|
// PendingChange stores changes that need to be synced to the server
|
||||||
type PendingChange struct {
|
type PendingChange struct {
|
||||||
ID int64 `gorm:"primaryKey;autoIncrement" json:"id"`
|
ID int64 `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||||
@@ -234,112 +181,3 @@ type PendingChange struct {
|
|||||||
func (PendingChange) TableName() string {
|
func (PendingChange) TableName() string {
|
||||||
return "pending_changes"
|
return "pending_changes"
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalPartnumberBook stores a version snapshot of the PN→LOT mapping book (pull-only from PriceForge)
|
|
||||||
type LocalPartnumberBook struct {
|
|
||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
|
||||||
ServerID int `gorm:"uniqueIndex;not null" json:"server_id"`
|
|
||||||
Version string `gorm:"not null" json:"version"`
|
|
||||||
CreatedAt time.Time `gorm:"not null" json:"created_at"`
|
|
||||||
IsActive bool `gorm:"not null;default:true" json:"is_active"`
|
|
||||||
PartnumbersJSON LocalStringList `gorm:"column:partnumbers_json;type:text" json:"partnumbers_json"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (LocalPartnumberBook) TableName() string {
|
|
||||||
return "local_partnumber_books"
|
|
||||||
}
|
|
||||||
|
|
||||||
type LocalPartnumberBookLot struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
Qty float64 `json:"qty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type LocalPartnumberBookLots []LocalPartnumberBookLot
|
|
||||||
|
|
||||||
func (l LocalPartnumberBookLots) Value() (driver.Value, error) {
|
|
||||||
return json.Marshal(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LocalPartnumberBookLots) Scan(value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
*l = make(LocalPartnumberBookLots, 0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var bytes []byte
|
|
||||||
switch v := value.(type) {
|
|
||||||
case []byte:
|
|
||||||
bytes = v
|
|
||||||
case string:
|
|
||||||
bytes = []byte(v)
|
|
||||||
default:
|
|
||||||
return errors.New("type assertion failed for LocalPartnumberBookLots")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(bytes, l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalPartnumberBookItem stores the canonical PN composition pulled from PriceForge.
|
|
||||||
type LocalPartnumberBookItem struct {
|
|
||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
|
||||||
Partnumber string `gorm:"not null" json:"partnumber"`
|
|
||||||
LotsJSON LocalPartnumberBookLots `gorm:"column:lots_json;type:text" json:"lots_json"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (LocalPartnumberBookItem) TableName() string {
|
|
||||||
return "local_partnumber_book_items"
|
|
||||||
}
|
|
||||||
|
|
||||||
// VendorSpecItem represents a single row in a vendor BOM specification
|
|
||||||
type VendorSpecItem struct {
|
|
||||||
SortOrder int `json:"sort_order"`
|
|
||||||
VendorPartnumber string `json:"vendor_partnumber"`
|
|
||||||
Quantity int `json:"quantity"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
UnitPrice *float64 `json:"unit_price,omitempty"`
|
|
||||||
TotalPrice *float64 `json:"total_price,omitempty"`
|
|
||||||
ResolvedLotName string `json:"resolved_lot_name,omitempty"`
|
|
||||||
ResolutionSource string `json:"resolution_source,omitempty"` // "book", "manual", "unresolved"
|
|
||||||
ManualLotSuggestion string `json:"manual_lot_suggestion,omitempty"`
|
|
||||||
LotQtyPerPN int `json:"lot_qty_per_pn,omitempty"`
|
|
||||||
LotAllocations []VendorSpecLotAllocation `json:"lot_allocations,omitempty"`
|
|
||||||
LotMappings []VendorSpecLotMapping `json:"lot_mappings,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type VendorSpecLotAllocation struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
Quantity int `json:"quantity"` // quantity of LOT per 1 vendor PN
|
|
||||||
}
|
|
||||||
|
|
||||||
// VendorSpecLotMapping is the canonical persisted LOT mapping for a vendor PN row.
|
|
||||||
// It stores all mapped LOTs (base + bundle) uniformly.
|
|
||||||
type VendorSpecLotMapping struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
QuantityPerPN int `json:"quantity_per_pn"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// VendorSpec is a JSON-encodable slice of VendorSpecItem
|
|
||||||
type VendorSpec []VendorSpecItem
|
|
||||||
|
|
||||||
func (v VendorSpec) Value() (driver.Value, error) {
|
|
||||||
if v == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *VendorSpec) Scan(value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
*v = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var bytes []byte
|
|
||||||
switch val := value.(type) {
|
|
||||||
case []byte:
|
|
||||||
bytes = val
|
|
||||||
case string:
|
|
||||||
bytes = []byte(val)
|
|
||||||
default:
|
|
||||||
return errors.New("type assertion failed for VendorSpec")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(bytes, v)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,128 +0,0 @@
|
|||||||
package localdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetLatestLocalPricelistBySource_SkipsPricelistWithoutItems(t *testing.T) {
|
|
||||||
local, err := New(filepath.Join(t.TempDir(), "latest_without_items.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
base := time.Now().Add(-time.Minute)
|
|
||||||
withItems := &LocalPricelist{
|
|
||||||
ServerID: 1001,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "E-1",
|
|
||||||
Name: "with-items",
|
|
||||||
CreatedAt: base,
|
|
||||||
SyncedAt: base,
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelist(withItems); err != nil {
|
|
||||||
t.Fatalf("save pricelist with items: %v", err)
|
|
||||||
}
|
|
||||||
storedWithItems, err := local.GetLocalPricelistByServerID(withItems.ServerID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("load pricelist with items: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]LocalPricelistItem{
|
|
||||||
{
|
|
||||||
PricelistID: storedWithItems.ID,
|
|
||||||
LotName: "CPU_A",
|
|
||||||
Price: 100,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save pricelist items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
withoutItems := &LocalPricelist{
|
|
||||||
ServerID: 1002,
|
|
||||||
Source: "estimate",
|
|
||||||
Version: "E-2",
|
|
||||||
Name: "without-items",
|
|
||||||
CreatedAt: base.Add(2 * time.Second),
|
|
||||||
SyncedAt: base.Add(2 * time.Second),
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelist(withoutItems); err != nil {
|
|
||||||
t.Fatalf("save pricelist without items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := local.GetLatestLocalPricelistBySource("estimate")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetLatestLocalPricelistBySource: %v", err)
|
|
||||||
}
|
|
||||||
if got.ServerID != withItems.ServerID {
|
|
||||||
t.Fatalf("expected server_id=%d, got %d", withItems.ServerID, got.ServerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetLatestLocalPricelistBySource_TieBreaksByID(t *testing.T) {
|
|
||||||
local, err := New(filepath.Join(t.TempDir(), "latest_tie_break.db"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
base := time.Now().Add(-time.Minute)
|
|
||||||
first := &LocalPricelist{
|
|
||||||
ServerID: 2001,
|
|
||||||
Source: "warehouse",
|
|
||||||
Version: "S-1",
|
|
||||||
Name: "first",
|
|
||||||
CreatedAt: base,
|
|
||||||
SyncedAt: base,
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelist(first); err != nil {
|
|
||||||
t.Fatalf("save first pricelist: %v", err)
|
|
||||||
}
|
|
||||||
storedFirst, err := local.GetLocalPricelistByServerID(first.ServerID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("load first pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]LocalPricelistItem{
|
|
||||||
{
|
|
||||||
PricelistID: storedFirst.ID,
|
|
||||||
LotName: "CPU_A",
|
|
||||||
Price: 101,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save first items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
second := &LocalPricelist{
|
|
||||||
ServerID: 2002,
|
|
||||||
Source: "warehouse",
|
|
||||||
Version: "S-2",
|
|
||||||
Name: "second",
|
|
||||||
CreatedAt: base,
|
|
||||||
SyncedAt: base,
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelist(second); err != nil {
|
|
||||||
t.Fatalf("save second pricelist: %v", err)
|
|
||||||
}
|
|
||||||
storedSecond, err := local.GetLocalPricelistByServerID(second.ServerID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("load second pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := local.SaveLocalPricelistItems([]LocalPricelistItem{
|
|
||||||
{
|
|
||||||
PricelistID: storedSecond.ID,
|
|
||||||
LotName: "CPU_A",
|
|
||||||
Price: 102,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
t.Fatalf("save second items: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := local.GetLatestLocalPricelistBySource("warehouse")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetLatestLocalPricelistBySource: %v", err)
|
|
||||||
}
|
|
||||||
if got.ServerID != second.ServerID {
|
|
||||||
t.Fatalf("expected server_id=%d, got %d", second.ServerID, got.ServerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
package localdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSaveProjectPreservingUpdatedAtKeepsProvidedTimestamp(t *testing.T) {
|
|
||||||
dbPath := filepath.Join(t.TempDir(), "project_sync_timestamp.db")
|
|
||||||
|
|
||||||
local, err := New(dbPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("open localdb: %v", err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() { _ = local.Close() })
|
|
||||||
|
|
||||||
createdAt := time.Date(2026, 2, 1, 10, 0, 0, 0, time.UTC)
|
|
||||||
updatedAt := time.Date(2026, 2, 3, 12, 30, 0, 0, time.UTC)
|
|
||||||
project := &LocalProject{
|
|
||||||
UUID: "project-1",
|
|
||||||
OwnerUsername: "tester",
|
|
||||||
Code: "OPS-1",
|
|
||||||
Variant: "Lenovo",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: createdAt,
|
|
||||||
UpdatedAt: updatedAt,
|
|
||||||
SyncStatus: "synced",
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := local.SaveProjectPreservingUpdatedAt(project); err != nil {
|
|
||||||
t.Fatalf("save project: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
syncedAt := time.Date(2026, 3, 16, 8, 45, 0, 0, time.UTC)
|
|
||||||
project.SyncedAt = &syncedAt
|
|
||||||
project.SyncStatus = "synced"
|
|
||||||
|
|
||||||
if err := local.SaveProjectPreservingUpdatedAt(project); err != nil {
|
|
||||||
t.Fatalf("save project second time: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stored, err := local.GetProjectByUUID(project.UUID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("get project: %v", err)
|
|
||||||
}
|
|
||||||
if !stored.UpdatedAt.Equal(updatedAt) {
|
|
||||||
t.Fatalf("updated_at changed during sync save: got %s want %s", stored.UpdatedAt, updatedAt)
|
|
||||||
}
|
|
||||||
if stored.SyncedAt == nil || !stored.SyncedAt.Equal(syncedAt) {
|
|
||||||
t.Fatalf("synced_at not updated correctly: got %+v want %s", stored.SyncedAt, syncedAt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,43 +3,33 @@ package localdb
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BuildConfigurationSnapshot serializes the full local configuration state.
|
// BuildConfigurationSnapshot serializes the full local configuration state.
|
||||||
func BuildConfigurationSnapshot(localCfg *LocalConfiguration) (string, error) {
|
func BuildConfigurationSnapshot(localCfg *LocalConfiguration) (string, error) {
|
||||||
snapshot := map[string]interface{}{
|
snapshot := map[string]interface{}{
|
||||||
"id": localCfg.ID,
|
"id": localCfg.ID,
|
||||||
"uuid": localCfg.UUID,
|
"uuid": localCfg.UUID,
|
||||||
"server_id": localCfg.ServerID,
|
"server_id": localCfg.ServerID,
|
||||||
"project_uuid": localCfg.ProjectUUID,
|
"project_uuid": localCfg.ProjectUUID,
|
||||||
"current_version_id": localCfg.CurrentVersionID,
|
"current_version_id": localCfg.CurrentVersionID,
|
||||||
"is_active": localCfg.IsActive,
|
"is_active": localCfg.IsActive,
|
||||||
"name": localCfg.Name,
|
"name": localCfg.Name,
|
||||||
"items": localCfg.Items,
|
"items": localCfg.Items,
|
||||||
"total_price": localCfg.TotalPrice,
|
"total_price": localCfg.TotalPrice,
|
||||||
"custom_price": localCfg.CustomPrice,
|
"custom_price": localCfg.CustomPrice,
|
||||||
"notes": localCfg.Notes,
|
"notes": localCfg.Notes,
|
||||||
"is_template": localCfg.IsTemplate,
|
"is_template": localCfg.IsTemplate,
|
||||||
"server_count": localCfg.ServerCount,
|
"server_count": localCfg.ServerCount,
|
||||||
"server_model": localCfg.ServerModel,
|
"pricelist_id": localCfg.PricelistID,
|
||||||
"support_code": localCfg.SupportCode,
|
"price_updated_at": localCfg.PriceUpdatedAt,
|
||||||
"article": localCfg.Article,
|
"created_at": localCfg.CreatedAt,
|
||||||
"pricelist_id": localCfg.PricelistID,
|
"updated_at": localCfg.UpdatedAt,
|
||||||
"warehouse_pricelist_id": localCfg.WarehousePricelistID,
|
"synced_at": localCfg.SyncedAt,
|
||||||
"competitor_pricelist_id": localCfg.CompetitorPricelistID,
|
"sync_status": localCfg.SyncStatus,
|
||||||
"disable_price_refresh": localCfg.DisablePriceRefresh,
|
"original_user_id": localCfg.OriginalUserID,
|
||||||
"only_in_stock": localCfg.OnlyInStock,
|
"original_username": localCfg.OriginalUsername,
|
||||||
"vendor_spec": localCfg.VendorSpec,
|
|
||||||
"line": localCfg.Line,
|
|
||||||
"price_updated_at": localCfg.PriceUpdatedAt,
|
|
||||||
"created_at": localCfg.CreatedAt,
|
|
||||||
"updated_at": localCfg.UpdatedAt,
|
|
||||||
"synced_at": localCfg.SyncedAt,
|
|
||||||
"sync_status": localCfg.SyncStatus,
|
|
||||||
"original_user_id": localCfg.OriginalUserID,
|
|
||||||
"original_username": localCfg.OriginalUsername,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := json.Marshal(snapshot)
|
data, err := json.Marshal(snapshot)
|
||||||
@@ -52,28 +42,19 @@ func BuildConfigurationSnapshot(localCfg *LocalConfiguration) (string, error) {
|
|||||||
// DecodeConfigurationSnapshot returns editable fields from one saved snapshot.
|
// DecodeConfigurationSnapshot returns editable fields from one saved snapshot.
|
||||||
func DecodeConfigurationSnapshot(data string) (*LocalConfiguration, error) {
|
func DecodeConfigurationSnapshot(data string) (*LocalConfiguration, error) {
|
||||||
var snapshot struct {
|
var snapshot struct {
|
||||||
ProjectUUID *string `json:"project_uuid"`
|
ProjectUUID *string `json:"project_uuid"`
|
||||||
IsActive *bool `json:"is_active"`
|
IsActive *bool `json:"is_active"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Items LocalConfigItems `json:"items"`
|
Items LocalConfigItems `json:"items"`
|
||||||
TotalPrice *float64 `json:"total_price"`
|
TotalPrice *float64 `json:"total_price"`
|
||||||
CustomPrice *float64 `json:"custom_price"`
|
CustomPrice *float64 `json:"custom_price"`
|
||||||
Notes string `json:"notes"`
|
Notes string `json:"notes"`
|
||||||
IsTemplate bool `json:"is_template"`
|
IsTemplate bool `json:"is_template"`
|
||||||
ServerCount int `json:"server_count"`
|
ServerCount int `json:"server_count"`
|
||||||
ServerModel string `json:"server_model"`
|
PricelistID *uint `json:"pricelist_id"`
|
||||||
SupportCode string `json:"support_code"`
|
PriceUpdatedAt *time.Time `json:"price_updated_at"`
|
||||||
Article string `json:"article"`
|
OriginalUserID uint `json:"original_user_id"`
|
||||||
PricelistID *uint `json:"pricelist_id"`
|
OriginalUsername string `json:"original_username"`
|
||||||
WarehousePricelistID *uint `json:"warehouse_pricelist_id"`
|
|
||||||
CompetitorPricelistID *uint `json:"competitor_pricelist_id"`
|
|
||||||
DisablePriceRefresh bool `json:"disable_price_refresh"`
|
|
||||||
OnlyInStock bool `json:"only_in_stock"`
|
|
||||||
VendorSpec VendorSpec `json:"vendor_spec"`
|
|
||||||
Line int `json:"line"`
|
|
||||||
PriceUpdatedAt *time.Time `json:"price_updated_at"`
|
|
||||||
OriginalUserID uint `json:"original_user_id"`
|
|
||||||
OriginalUsername string `json:"original_username"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.Unmarshal([]byte(data), &snapshot); err != nil {
|
if err := json.Unmarshal([]byte(data), &snapshot); err != nil {
|
||||||
@@ -86,87 +67,18 @@ func DecodeConfigurationSnapshot(data string) (*LocalConfiguration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &LocalConfiguration{
|
return &LocalConfiguration{
|
||||||
IsActive: isActive,
|
IsActive: isActive,
|
||||||
ProjectUUID: snapshot.ProjectUUID,
|
ProjectUUID: snapshot.ProjectUUID,
|
||||||
Name: snapshot.Name,
|
Name: snapshot.Name,
|
||||||
Items: snapshot.Items,
|
Items: snapshot.Items,
|
||||||
TotalPrice: snapshot.TotalPrice,
|
TotalPrice: snapshot.TotalPrice,
|
||||||
CustomPrice: snapshot.CustomPrice,
|
CustomPrice: snapshot.CustomPrice,
|
||||||
Notes: snapshot.Notes,
|
Notes: snapshot.Notes,
|
||||||
IsTemplate: snapshot.IsTemplate,
|
IsTemplate: snapshot.IsTemplate,
|
||||||
ServerCount: snapshot.ServerCount,
|
ServerCount: snapshot.ServerCount,
|
||||||
ServerModel: snapshot.ServerModel,
|
PricelistID: snapshot.PricelistID,
|
||||||
SupportCode: snapshot.SupportCode,
|
PriceUpdatedAt: snapshot.PriceUpdatedAt,
|
||||||
Article: snapshot.Article,
|
OriginalUserID: snapshot.OriginalUserID,
|
||||||
PricelistID: snapshot.PricelistID,
|
OriginalUsername: snapshot.OriginalUsername,
|
||||||
WarehousePricelistID: snapshot.WarehousePricelistID,
|
|
||||||
CompetitorPricelistID: snapshot.CompetitorPricelistID,
|
|
||||||
DisablePriceRefresh: snapshot.DisablePriceRefresh,
|
|
||||||
OnlyInStock: snapshot.OnlyInStock,
|
|
||||||
VendorSpec: snapshot.VendorSpec,
|
|
||||||
Line: snapshot.Line,
|
|
||||||
PriceUpdatedAt: snapshot.PriceUpdatedAt,
|
|
||||||
OriginalUserID: snapshot.OriginalUserID,
|
|
||||||
OriginalUsername: snapshot.OriginalUsername,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type configurationSpecPriceFingerprint struct {
|
|
||||||
Items []configurationSpecPriceFingerprintItem `json:"items"`
|
|
||||||
ServerCount int `json:"server_count"`
|
|
||||||
TotalPrice *float64 `json:"total_price,omitempty"`
|
|
||||||
CustomPrice *float64 `json:"custom_price,omitempty"`
|
|
||||||
PricelistID *uint `json:"pricelist_id,omitempty"`
|
|
||||||
WarehousePricelistID *uint `json:"warehouse_pricelist_id,omitempty"`
|
|
||||||
CompetitorPricelistID *uint `json:"competitor_pricelist_id,omitempty"`
|
|
||||||
DisablePriceRefresh bool `json:"disable_price_refresh"`
|
|
||||||
OnlyInStock bool `json:"only_in_stock"`
|
|
||||||
VendorSpec VendorSpec `json:"vendor_spec,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type configurationSpecPriceFingerprintItem struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
Quantity int `json:"quantity"`
|
|
||||||
UnitPrice float64 `json:"unit_price"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BuildConfigurationSpecPriceFingerprint returns a stable JSON key based on
|
|
||||||
// spec + price fields only, used for revision deduplication.
|
|
||||||
func BuildConfigurationSpecPriceFingerprint(localCfg *LocalConfiguration) (string, error) {
|
|
||||||
items := make([]configurationSpecPriceFingerprintItem, 0, len(localCfg.Items))
|
|
||||||
for _, item := range localCfg.Items {
|
|
||||||
items = append(items, configurationSpecPriceFingerprintItem{
|
|
||||||
LotName: item.LotName,
|
|
||||||
Quantity: item.Quantity,
|
|
||||||
UnitPrice: item.UnitPrice,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
sort.Slice(items, func(i, j int) bool {
|
|
||||||
if items[i].LotName != items[j].LotName {
|
|
||||||
return items[i].LotName < items[j].LotName
|
|
||||||
}
|
|
||||||
if items[i].Quantity != items[j].Quantity {
|
|
||||||
return items[i].Quantity < items[j].Quantity
|
|
||||||
}
|
|
||||||
return items[i].UnitPrice < items[j].UnitPrice
|
|
||||||
})
|
|
||||||
|
|
||||||
payload := configurationSpecPriceFingerprint{
|
|
||||||
Items: items,
|
|
||||||
ServerCount: localCfg.ServerCount,
|
|
||||||
TotalPrice: localCfg.TotalPrice,
|
|
||||||
CustomPrice: localCfg.CustomPrice,
|
|
||||||
PricelistID: localCfg.PricelistID,
|
|
||||||
WarehousePricelistID: localCfg.WarehousePricelistID,
|
|
||||||
CompetitorPricelistID: localCfg.CompetitorPricelistID,
|
|
||||||
DisablePriceRefresh: localCfg.DisablePriceRefresh,
|
|
||||||
OnlyInStock: localCfg.OnlyInStock,
|
|
||||||
VendorSpec: localCfg.VendorSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, err := json.Marshal(payload)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("marshal spec+price fingerprint: %w", err)
|
|
||||||
}
|
|
||||||
return string(raw), nil
|
|
||||||
}
|
|
||||||
|
|||||||
110
internal/middleware/auth.go
Normal file
110
internal/middleware/auth.go
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AuthUserKey = "auth_user"
|
||||||
|
AuthClaimsKey = "auth_claims"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Auth(authService *services.AuthService) gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
authHeader := c.GetHeader("Authorization")
|
||||||
|
if authHeader == "" {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||||
|
"error": "authorization header required",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(authHeader, " ", 2)
|
||||||
|
if len(parts) != 2 || parts[0] != "Bearer" {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||||
|
"error": "invalid authorization header format",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, err := authService.ValidateToken(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||||
|
"error": err.Error(),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Set(AuthClaimsKey, claims)
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RequireRole(roles ...models.UserRole) gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
claims, exists := c.Get(AuthClaimsKey)
|
||||||
|
if !exists {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||||
|
"error": "authentication required",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
authClaims := claims.(*services.Claims)
|
||||||
|
|
||||||
|
for _, role := range roles {
|
||||||
|
if authClaims.Role == role {
|
||||||
|
c.Next()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
|
||||||
|
"error": "insufficient permissions",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RequireEditor() gin.HandlerFunc {
|
||||||
|
return RequireRole(models.RoleEditor, models.RolePricingAdmin, models.RoleAdmin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RequirePricingAdmin() gin.HandlerFunc {
|
||||||
|
return RequireRole(models.RolePricingAdmin, models.RoleAdmin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RequireAdmin() gin.HandlerFunc {
|
||||||
|
return RequireRole(models.RoleAdmin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetClaims extracts auth claims from context
|
||||||
|
func GetClaims(c *gin.Context) *services.Claims {
|
||||||
|
claims, exists := c.Get(AuthClaimsKey)
|
||||||
|
if !exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return claims.(*services.Claims)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUserID extracts user ID from context
|
||||||
|
func GetUserID(c *gin.Context) uint {
|
||||||
|
claims := GetClaims(c)
|
||||||
|
if claims == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return claims.UserID
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUsername extracts username from context
|
||||||
|
func GetUsername(c *gin.Context) string {
|
||||||
|
claims := GetClaims(c)
|
||||||
|
if claims == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return claims.Username
|
||||||
|
}
|
||||||
@@ -1,55 +1,22 @@
|
|||||||
package middleware
|
package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CORS() gin.HandlerFunc {
|
func CORS() gin.HandlerFunc {
|
||||||
return func(c *gin.Context) {
|
return func(c *gin.Context) {
|
||||||
origin := strings.TrimSpace(c.GetHeader("Origin"))
|
c.Header("Access-Control-Allow-Origin", "*")
|
||||||
if origin != "" {
|
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
||||||
if isLoopbackOrigin(origin) {
|
c.Header("Access-Control-Allow-Headers", "Origin, Content-Type, Accept, Authorization")
|
||||||
c.Header("Access-Control-Allow-Origin", origin)
|
c.Header("Access-Control-Expose-Headers", "Content-Length, Content-Disposition")
|
||||||
c.Header("Vary", "Origin")
|
c.Header("Access-Control-Max-Age", "86400")
|
||||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
|
||||||
c.Header("Access-Control-Allow-Headers", "Origin, Content-Type, Accept, Authorization")
|
|
||||||
c.Header("Access-Control-Expose-Headers", "Content-Length, Content-Disposition")
|
|
||||||
c.Header("Access-Control-Max-Age", "86400")
|
|
||||||
} else if c.Request.Method == http.MethodOptions {
|
|
||||||
c.AbortWithStatus(http.StatusForbidden)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Request.Method == http.MethodOptions {
|
if c.Request.Method == "OPTIONS" {
|
||||||
c.AbortWithStatus(http.StatusNoContent)
|
c.AbortWithStatus(204)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Next()
|
c.Next()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isLoopbackOrigin(origin string) bool {
|
|
||||||
u, err := url.Parse(origin)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if u.Scheme != "http" && u.Scheme != "https" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
host := strings.TrimSpace(u.Hostname())
|
|
||||||
if host == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if strings.EqualFold(host, "localhost") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
ip := net.ParseIP(host)
|
|
||||||
return ip != nil && ip.IsLoopback()
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -39,85 +39,28 @@ func (c ConfigItems) Total() float64 {
|
|||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
type VendorSpecLotAllocation struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
Quantity int `json:"quantity"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type VendorSpecLotMapping struct {
|
|
||||||
LotName string `json:"lot_name"`
|
|
||||||
QuantityPerPN int `json:"quantity_per_pn"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type VendorSpecItem struct {
|
|
||||||
SortOrder int `json:"sort_order"`
|
|
||||||
VendorPartnumber string `json:"vendor_partnumber"`
|
|
||||||
Quantity int `json:"quantity"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
UnitPrice *float64 `json:"unit_price,omitempty"`
|
|
||||||
TotalPrice *float64 `json:"total_price,omitempty"`
|
|
||||||
ResolvedLotName string `json:"resolved_lot_name,omitempty"`
|
|
||||||
ResolutionSource string `json:"resolution_source,omitempty"`
|
|
||||||
ManualLotSuggestion string `json:"manual_lot_suggestion,omitempty"`
|
|
||||||
LotQtyPerPN int `json:"lot_qty_per_pn,omitempty"`
|
|
||||||
LotAllocations []VendorSpecLotAllocation `json:"lot_allocations,omitempty"`
|
|
||||||
LotMappings []VendorSpecLotMapping `json:"lot_mappings,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type VendorSpec []VendorSpecItem
|
|
||||||
|
|
||||||
func (v VendorSpec) Value() (driver.Value, error) {
|
|
||||||
if v == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *VendorSpec) Scan(value interface{}) error {
|
|
||||||
if value == nil {
|
|
||||||
*v = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var bytes []byte
|
|
||||||
switch val := value.(type) {
|
|
||||||
case []byte:
|
|
||||||
bytes = val
|
|
||||||
case string:
|
|
||||||
bytes = []byte(val)
|
|
||||||
default:
|
|
||||||
return errors.New("type assertion failed for VendorSpec")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(bytes, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||||
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
||||||
UserID *uint `json:"user_id,omitempty"` // Legacy field, no longer required for ownership
|
UserID *uint `json:"user_id,omitempty"` // Legacy field, no longer required for ownership
|
||||||
OwnerUsername string `gorm:"size:100;not null;default:'';index" json:"owner_username"`
|
OwnerUsername string `gorm:"size:100;not null;default:'';index" json:"owner_username"`
|
||||||
ProjectUUID *string `gorm:"size:36;index" json:"project_uuid,omitempty"`
|
ProjectUUID *string `gorm:"size:36;index" json:"project_uuid,omitempty"`
|
||||||
AppVersion string `gorm:"size:64" json:"app_version,omitempty"`
|
AppVersion string `gorm:"size:64" json:"app_version,omitempty"`
|
||||||
Name string `gorm:"size:200;not null" json:"name"`
|
Name string `gorm:"size:200;not null" json:"name"`
|
||||||
Items ConfigItems `gorm:"type:json;not null" json:"items"`
|
Items ConfigItems `gorm:"type:json;not null" json:"items"`
|
||||||
TotalPrice *float64 `gorm:"type:decimal(12,2)" json:"total_price"`
|
TotalPrice *float64 `gorm:"type:decimal(12,2)" json:"total_price"`
|
||||||
CustomPrice *float64 `gorm:"type:decimal(12,2)" json:"custom_price"`
|
CustomPrice *float64 `gorm:"type:decimal(12,2)" json:"custom_price"`
|
||||||
Notes string `gorm:"type:text" json:"notes"`
|
Notes string `gorm:"type:text" json:"notes"`
|
||||||
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
||||||
ServerCount int `gorm:"default:1" json:"server_count"`
|
ServerCount int `gorm:"default:1" json:"server_count"`
|
||||||
ServerModel string `gorm:"size:100" json:"server_model,omitempty"`
|
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
||||||
SupportCode string `gorm:"size:20" json:"support_code,omitempty"`
|
WarehousePricelistID *uint `gorm:"index" json:"warehouse_pricelist_id,omitempty"`
|
||||||
Article string `gorm:"size:80" json:"article,omitempty"`
|
CompetitorPricelistID *uint `gorm:"index" json:"competitor_pricelist_id,omitempty"`
|
||||||
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
DisablePriceRefresh bool `gorm:"default:false" json:"disable_price_refresh"`
|
||||||
WarehousePricelistID *uint `gorm:"index" json:"warehouse_pricelist_id,omitempty"`
|
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
||||||
CompetitorPricelistID *uint `gorm:"index" json:"competitor_pricelist_id,omitempty"`
|
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
||||||
VendorSpec VendorSpec `gorm:"type:json" json:"vendor_spec,omitempty"`
|
|
||||||
ConfigType string `gorm:"size:20;default:server" json:"config_type"` // "server" | "storage"
|
User *User `gorm:"foreignKey:UserID" json:"user,omitempty"`
|
||||||
DisablePriceRefresh bool `gorm:"default:false" json:"disable_price_refresh"`
|
|
||||||
OnlyInStock bool `gorm:"default:false" json:"only_in_stock"`
|
|
||||||
Line int `gorm:"column:line_no;index" json:"line"`
|
|
||||||
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
|
||||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
|
||||||
CurrentVersionNo int `gorm:"-" json:"current_version_no,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Configuration) TableName() string {
|
func (Configuration) TableName() string {
|
||||||
@@ -132,6 +75,8 @@ type PriceOverride struct {
|
|||||||
ValidUntil *time.Time `gorm:"type:date" json:"valid_until"`
|
ValidUntil *time.Time `gorm:"type:date" json:"valid_until"`
|
||||||
Reason string `gorm:"type:text" json:"reason"`
|
Reason string `gorm:"type:text" json:"reason"`
|
||||||
CreatedBy uint `gorm:"not null" json:"created_by"`
|
CreatedBy uint `gorm:"not null" json:"created_by"`
|
||||||
|
|
||||||
|
Creator *User `gorm:"foreignKey:CreatedBy" json:"creator,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PriceOverride) TableName() string {
|
func (PriceOverride) TableName() string {
|
||||||
|
|||||||
@@ -37,33 +37,3 @@ type Supplier struct {
|
|||||||
func (Supplier) TableName() string {
|
func (Supplier) TableName() string {
|
||||||
return "supplier"
|
return "supplier"
|
||||||
}
|
}
|
||||||
|
|
||||||
// StockLog stores warehouse stock snapshots imported from external files.
|
|
||||||
type StockLog struct {
|
|
||||||
StockLogID uint `gorm:"column:stock_log_id;primaryKey;autoIncrement"`
|
|
||||||
Partnumber string `gorm:"column:partnumber;size:255;not null"`
|
|
||||||
Supplier *string `gorm:"column:supplier;size:255"`
|
|
||||||
Date time.Time `gorm:"column:date;type:date;not null"`
|
|
||||||
Price float64 `gorm:"column:price;not null"`
|
|
||||||
Quality *string `gorm:"column:quality;size:255"`
|
|
||||||
Comments *string `gorm:"column:comments;size:15000"`
|
|
||||||
Vendor *string `gorm:"column:vendor;size:255"`
|
|
||||||
Qty *float64 `gorm:"column:qty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (StockLog) TableName() string {
|
|
||||||
return "stock_log"
|
|
||||||
}
|
|
||||||
|
|
||||||
// StockIgnoreRule contains import ignore pattern rules.
|
|
||||||
type StockIgnoreRule struct {
|
|
||||||
ID uint `gorm:"column:id;primaryKey;autoIncrement" json:"id"`
|
|
||||||
Target string `gorm:"column:target;size:20;not null" json:"target"` // partnumber|description
|
|
||||||
MatchType string `gorm:"column:match_type;size:20;not null" json:"match_type"` // exact|prefix|suffix
|
|
||||||
Pattern string `gorm:"column:pattern;size:500;not null" json:"pattern"`
|
|
||||||
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime" json:"created_at"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (StockIgnoreRule) TableName() string {
|
|
||||||
return "stock_ignore_rules"
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
// AllModels returns all models for auto-migration
|
// AllModels returns all models for auto-migration
|
||||||
func AllModels() []interface{} {
|
func AllModels() []interface{} {
|
||||||
return []interface{}{
|
return []interface{}{
|
||||||
|
&User{},
|
||||||
&Category{},
|
&Category{},
|
||||||
&LotMetadata{},
|
&LotMetadata{},
|
||||||
&Project{},
|
&Project{},
|
||||||
@@ -31,9 +32,7 @@ func Migrate(db *gorm.DB) error {
|
|||||||
errStr := err.Error()
|
errStr := err.Error()
|
||||||
if strings.Contains(errStr, "Can't DROP") ||
|
if strings.Contains(errStr, "Can't DROP") ||
|
||||||
strings.Contains(errStr, "Duplicate key name") ||
|
strings.Contains(errStr, "Duplicate key name") ||
|
||||||
strings.Contains(errStr, "check that it exists") ||
|
strings.Contains(errStr, "check that it exists") {
|
||||||
strings.Contains(errStr, "Cannot change column") ||
|
|
||||||
strings.Contains(errStr, "used in a foreign key constraint") {
|
|
||||||
slog.Warn("migration warning (skipped)", "model", model, "error", errStr)
|
slog.Warn("migration warning (skipped)", "model", model, "error", errStr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -53,3 +52,54 @@ func SeedCategories(db *gorm.DB) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SeedAdminUser creates default admin user if not exists
|
||||||
|
// Default credentials: admin / admin123
|
||||||
|
func SeedAdminUser(db *gorm.DB, passwordHash string) error {
|
||||||
|
var count int64
|
||||||
|
db.Model(&User{}).Where("username = ?", "admin").Count(&count)
|
||||||
|
if count > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
admin := &User{
|
||||||
|
Username: "admin",
|
||||||
|
Email: "admin@example.com",
|
||||||
|
PasswordHash: passwordHash,
|
||||||
|
Role: RoleAdmin,
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
return db.Create(admin).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureDBUser creates or returns the user corresponding to the database connection username.
|
||||||
|
// This is used when RBAC is disabled - configurations are owned by the DB user.
|
||||||
|
// Returns the user ID that should be used for all operations.
|
||||||
|
func EnsureDBUser(db *gorm.DB, dbUsername string) (uint, error) {
|
||||||
|
if dbUsername == "" {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var user User
|
||||||
|
err := db.Where("username = ?", dbUsername).First(&user).Error
|
||||||
|
if err == nil {
|
||||||
|
return user.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// User doesn't exist, create it
|
||||||
|
user = User{
|
||||||
|
Username: dbUsername,
|
||||||
|
Email: dbUsername + "@db.local",
|
||||||
|
PasswordHash: "-", // No password - this is a DB user, not an app user
|
||||||
|
Role: RoleAdmin,
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := db.Create(&user).Error; err != nil {
|
||||||
|
slog.Error("failed to create DB user", "username", dbUsername, "error", err)
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("created DB user for configurations", "username", dbUsername, "user_id", user.ID)
|
||||||
|
return user.ID, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -55,7 +55,6 @@ type PricelistItem struct {
|
|||||||
ID uint `gorm:"primaryKey" json:"id"`
|
ID uint `gorm:"primaryKey" json:"id"`
|
||||||
PricelistID uint `gorm:"not null;index:idx_pricelist_lot" json:"pricelist_id"`
|
PricelistID uint `gorm:"not null;index:idx_pricelist_lot" json:"pricelist_id"`
|
||||||
LotName string `gorm:"size:255;not null;index:idx_pricelist_lot" json:"lot_name"`
|
LotName string `gorm:"size:255;not null;index:idx_pricelist_lot" json:"lot_name"`
|
||||||
LotCategory string `gorm:"column:lot_category;size:50" json:"lot_category,omitempty"`
|
|
||||||
Price float64 `gorm:"type:decimal(12,2);not null" json:"price"`
|
Price float64 `gorm:"type:decimal(12,2);not null" json:"price"`
|
||||||
PriceMethod string `gorm:"size:20" json:"price_method"`
|
PriceMethod string `gorm:"size:20" json:"price_method"`
|
||||||
|
|
||||||
@@ -66,10 +65,8 @@ type PricelistItem struct {
|
|||||||
MetaPrices string `gorm:"size:1000" json:"meta_prices,omitempty"`
|
MetaPrices string `gorm:"size:1000" json:"meta_prices,omitempty"`
|
||||||
|
|
||||||
// Virtual fields for display
|
// Virtual fields for display
|
||||||
LotDescription string `gorm:"-" json:"lot_description,omitempty"`
|
LotDescription string `gorm:"-" json:"lot_description,omitempty"`
|
||||||
Category string `gorm:"-" json:"category,omitempty"`
|
Category string `gorm:"-" json:"category,omitempty"`
|
||||||
AvailableQty *float64 `gorm:"-" json:"available_qty,omitempty"`
|
|
||||||
Partnumbers []string `gorm:"-" json:"partnumbers,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (PricelistItem) TableName() string {
|
func (PricelistItem) TableName() string {
|
||||||
|
|||||||
@@ -6,9 +6,7 @@ type Project struct {
|
|||||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||||
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
||||||
OwnerUsername string `gorm:"size:100;not null;index" json:"owner_username"`
|
OwnerUsername string `gorm:"size:100;not null;index" json:"owner_username"`
|
||||||
Code string `gorm:"size:100;not null;index:idx_qt_projects_code_variant,priority:1" json:"code"`
|
Name string `gorm:"size:200;not null" json:"name"`
|
||||||
Variant string `gorm:"size:100;not null;default:'';index:idx_qt_projects_code_variant,priority:2" json:"variant"`
|
|
||||||
Name *string `gorm:"size:200" json:"name,omitempty"`
|
|
||||||
TrackerURL string `gorm:"size:500" json:"tracker_url"`
|
TrackerURL string `gorm:"size:500" json:"tracker_url"`
|
||||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||||
IsSystem bool `gorm:"default:false;index" json:"is_system"`
|
IsSystem bool `gorm:"default:false;index" json:"is_system"`
|
||||||
|
|||||||
39
internal/models/user.go
Normal file
39
internal/models/user.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type UserRole string
|
||||||
|
|
||||||
|
const (
|
||||||
|
RoleViewer UserRole = "viewer"
|
||||||
|
RoleEditor UserRole = "editor"
|
||||||
|
RolePricingAdmin UserRole = "pricing_admin"
|
||||||
|
RoleAdmin UserRole = "admin"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||||
|
Username string `gorm:"size:100;uniqueIndex;not null" json:"username"`
|
||||||
|
Email string `gorm:"size:255;uniqueIndex;not null" json:"email"`
|
||||||
|
PasswordHash string `gorm:"size:255;not null" json:"-"`
|
||||||
|
Role UserRole `gorm:"type:enum('viewer','editor','pricing_admin','admin');default:'viewer'" json:"role"`
|
||||||
|
IsActive bool `gorm:"default:true" json:"is_active"`
|
||||||
|
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
||||||
|
UpdatedAt time.Time `gorm:"autoUpdateTime" json:"updated_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (User) TableName() string {
|
||||||
|
return "qt_users"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) CanEdit() bool {
|
||||||
|
return u.Role == RoleEditor || u.Role == RolePricingAdmin || u.Role == RoleAdmin
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) CanManagePricing() bool {
|
||||||
|
return u.Role == RolePricingAdmin || u.Role == RoleAdmin
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) CanManageUsers() bool {
|
||||||
|
return u.Role == RoleAdmin
|
||||||
|
}
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
)
|
)
|
||||||
@@ -16,13 +14,7 @@ func NewConfigurationRepository(db *gorm.DB) *ConfigurationRepository {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *ConfigurationRepository) Create(config *models.Configuration) error {
|
func (r *ConfigurationRepository) Create(config *models.Configuration) error {
|
||||||
if err := r.db.Create(config).Error; err != nil {
|
return r.db.Create(config).Error
|
||||||
if isUnknownLineNoColumnError(err) {
|
|
||||||
return r.db.Omit("line_no").Create(config).Error
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ConfigurationRepository) GetByID(id uint) (*models.Configuration, error) {
|
func (r *ConfigurationRepository) GetByID(id uint) (*models.Configuration, error) {
|
||||||
@@ -44,21 +36,7 @@ func (r *ConfigurationRepository) GetByUUID(uuid string) (*models.Configuration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *ConfigurationRepository) Update(config *models.Configuration) error {
|
func (r *ConfigurationRepository) Update(config *models.Configuration) error {
|
||||||
if err := r.db.Save(config).Error; err != nil {
|
return r.db.Save(config).Error
|
||||||
if isUnknownLineNoColumnError(err) {
|
|
||||||
return r.db.Omit("line_no").Save(config).Error
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isUnknownLineNoColumnError(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
msg := strings.ToLower(err.Error())
|
|
||||||
return strings.Contains(msg, "unknown column 'line_no'") || strings.Contains(msg, "no column named line_no")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ConfigurationRepository) Delete(id uint) error {
|
func (r *ConfigurationRepository) Delete(id uint) error {
|
||||||
|
|||||||
@@ -1,174 +0,0 @@
|
|||||||
package repository
|
|
||||||
|
|
||||||
import (
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"gorm.io/gorm/clause"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PartnumberBookRepository provides read-only access to local partnumber book snapshots.
|
|
||||||
type PartnumberBookRepository struct {
|
|
||||||
db *gorm.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPartnumberBookRepository(db *gorm.DB) *PartnumberBookRepository {
|
|
||||||
return &PartnumberBookRepository{db: db}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetActiveBook returns the most recently active local partnumber book.
|
|
||||||
func (r *PartnumberBookRepository) GetActiveBook() (*localdb.LocalPartnumberBook, error) {
|
|
||||||
var book localdb.LocalPartnumberBook
|
|
||||||
err := r.db.Where("is_active = 1").Order("created_at DESC, id DESC").First(&book).Error
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &book, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBookItems returns all items for the given local book ID.
|
|
||||||
func (r *PartnumberBookRepository) GetBookItems(bookID uint) ([]localdb.LocalPartnumberBookItem, error) {
|
|
||||||
book, err := r.getBook(bookID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
items, _, err := r.listCatalogItems(book.PartnumbersJSON, "", 0, 0)
|
|
||||||
return items, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBookItemsPage returns items for the given local book ID with optional search and pagination.
|
|
||||||
func (r *PartnumberBookRepository) GetBookItemsPage(bookID uint, search string, page, perPage int) ([]localdb.LocalPartnumberBookItem, int64, error) {
|
|
||||||
if page < 1 {
|
|
||||||
page = 1
|
|
||||||
}
|
|
||||||
if perPage < 1 {
|
|
||||||
perPage = 100
|
|
||||||
}
|
|
||||||
|
|
||||||
book, err := r.getBook(bookID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
return r.listCatalogItems(book.PartnumbersJSON, search, page, perPage)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindLotByPartnumber looks up a partnumber in the active book and returns the matching items.
|
|
||||||
func (r *PartnumberBookRepository) FindLotByPartnumber(bookID uint, partnumber string) ([]localdb.LocalPartnumberBookItem, error) {
|
|
||||||
book, err := r.getBook(bookID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
found := false
|
|
||||||
for _, pn := range book.PartnumbersJSON {
|
|
||||||
if pn == partnumber {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var items []localdb.LocalPartnumberBookItem
|
|
||||||
err = r.db.Where("partnumber = ?", partnumber).Find(&items).Error
|
|
||||||
return items, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBooks returns all local partnumber books ordered newest first.
|
|
||||||
func (r *PartnumberBookRepository) ListBooks() ([]localdb.LocalPartnumberBook, error) {
|
|
||||||
var books []localdb.LocalPartnumberBook
|
|
||||||
err := r.db.Order("created_at DESC, id DESC").Find(&books).Error
|
|
||||||
return books, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveBook saves a new partnumber book snapshot.
|
|
||||||
func (r *PartnumberBookRepository) SaveBook(book *localdb.LocalPartnumberBook) error {
|
|
||||||
return r.db.Save(book).Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveBookItems upserts canonical PN catalog rows.
|
|
||||||
func (r *PartnumberBookRepository) SaveBookItems(items []localdb.LocalPartnumberBookItem) error {
|
|
||||||
if len(items) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return r.db.Clauses(clause.OnConflict{
|
|
||||||
Columns: []clause.Column{{Name: "partnumber"}},
|
|
||||||
DoUpdates: clause.AssignmentColumns([]string{
|
|
||||||
"lots_json",
|
|
||||||
"description",
|
|
||||||
}),
|
|
||||||
}).CreateInBatches(items, 500).Error
|
|
||||||
}
|
|
||||||
|
|
||||||
// CountBookItems returns the number of items for a given local book ID.
|
|
||||||
func (r *PartnumberBookRepository) CountBookItems(bookID uint) int64 {
|
|
||||||
book, err := r.getBook(bookID)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return int64(len(book.PartnumbersJSON))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PartnumberBookRepository) CountDistinctLots(bookID uint) int64 {
|
|
||||||
items, err := r.GetBookItems(bookID)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
for _, item := range items {
|
|
||||||
for _, lot := range item.LotsJSON {
|
|
||||||
if lot.LotName == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[lot.LotName] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int64(len(seen))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PartnumberBookRepository) HasAllBookItems(bookID uint) bool {
|
|
||||||
book, err := r.getBook(bookID)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(book.PartnumbersJSON) == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
var count int64
|
|
||||||
if err := r.db.Model(&localdb.LocalPartnumberBookItem{}).
|
|
||||||
Where("partnumber IN ?", []string(book.PartnumbersJSON)).
|
|
||||||
Count(&count).Error; err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return count == int64(len(book.PartnumbersJSON))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PartnumberBookRepository) getBook(bookID uint) (*localdb.LocalPartnumberBook, error) {
|
|
||||||
var book localdb.LocalPartnumberBook
|
|
||||||
if err := r.db.First(&book, bookID).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &book, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *PartnumberBookRepository) listCatalogItems(partnumbers localdb.LocalStringList, search string, page, perPage int) ([]localdb.LocalPartnumberBookItem, int64, error) {
|
|
||||||
if len(partnumbers) == 0 {
|
|
||||||
return []localdb.LocalPartnumberBookItem{}, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
query := r.db.Model(&localdb.LocalPartnumberBookItem{}).Where("partnumber IN ?", []string(partnumbers))
|
|
||||||
if search != "" {
|
|
||||||
trimmedSearch := "%" + search + "%"
|
|
||||||
query = query.Where("partnumber LIKE ? OR lots_json LIKE ? OR description LIKE ?", trimmedSearch, trimmedSearch, trimmedSearch)
|
|
||||||
}
|
|
||||||
|
|
||||||
var total int64
|
|
||||||
if err := query.Count(&total).Error; err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []localdb.LocalPartnumberBookItem
|
|
||||||
if page > 0 && perPage > 0 {
|
|
||||||
query = query.Offset((page - 1) * perPage).Limit(perPage)
|
|
||||||
}
|
|
||||||
err := query.Order("partnumber ASC, id ASC").Find(&items).Error
|
|
||||||
return items, total, err
|
|
||||||
}
|
|
||||||
@@ -26,8 +26,7 @@ func (r *PricelistRepository) List(offset, limit int) ([]models.PricelistSummary
|
|||||||
|
|
||||||
// ListBySource returns pricelists filtered by source when provided.
|
// ListBySource returns pricelists filtered by source when provided.
|
||||||
func (r *PricelistRepository) ListBySource(source string, offset, limit int) ([]models.PricelistSummary, int64, error) {
|
func (r *PricelistRepository) ListBySource(source string, offset, limit int) ([]models.PricelistSummary, int64, error) {
|
||||||
query := r.db.Model(&models.Pricelist{}).
|
query := r.db.Model(&models.Pricelist{})
|
||||||
Where("EXISTS (SELECT 1 FROM qt_pricelist_items WHERE qt_pricelist_items.pricelist_id = qt_pricelists.id)")
|
|
||||||
if source != "" {
|
if source != "" {
|
||||||
query = query.Where("source = ?", source)
|
query = query.Where("source = ?", source)
|
||||||
}
|
}
|
||||||
@@ -38,7 +37,7 @@ func (r *PricelistRepository) ListBySource(source string, offset, limit int) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
var pricelists []models.Pricelist
|
var pricelists []models.Pricelist
|
||||||
if err := query.Order("created_at DESC, id DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
if err := query.Order("created_at DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
||||||
return nil, 0, fmt.Errorf("listing pricelists: %w", err)
|
return nil, 0, fmt.Errorf("listing pricelists: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,9 +51,7 @@ func (r *PricelistRepository) ListActive(offset, limit int) ([]models.PricelistS
|
|||||||
|
|
||||||
// ListActiveBySource returns active pricelists filtered by source when provided.
|
// ListActiveBySource returns active pricelists filtered by source when provided.
|
||||||
func (r *PricelistRepository) ListActiveBySource(source string, offset, limit int) ([]models.PricelistSummary, int64, error) {
|
func (r *PricelistRepository) ListActiveBySource(source string, offset, limit int) ([]models.PricelistSummary, int64, error) {
|
||||||
query := r.db.Model(&models.Pricelist{}).
|
query := r.db.Model(&models.Pricelist{}).Where("is_active = ?", true)
|
||||||
Where("is_active = ?", true).
|
|
||||||
Where("EXISTS (SELECT 1 FROM qt_pricelist_items WHERE qt_pricelist_items.pricelist_id = qt_pricelists.id)")
|
|
||||||
if source != "" {
|
if source != "" {
|
||||||
query = query.Where("source = ?", source)
|
query = query.Where("source = ?", source)
|
||||||
}
|
}
|
||||||
@@ -65,7 +62,7 @@ func (r *PricelistRepository) ListActiveBySource(source string, offset, limit in
|
|||||||
}
|
}
|
||||||
|
|
||||||
var pricelists []models.Pricelist
|
var pricelists []models.Pricelist
|
||||||
if err := query.Order("created_at DESC, id DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
if err := query.Order("created_at DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
||||||
return nil, 0, fmt.Errorf("listing active pricelists: %w", err)
|
return nil, 0, fmt.Errorf("listing active pricelists: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -146,11 +143,7 @@ func (r *PricelistRepository) GetLatestActive() (*models.Pricelist, error) {
|
|||||||
// GetLatestActiveBySource returns the most recent active pricelist by source.
|
// GetLatestActiveBySource returns the most recent active pricelist by source.
|
||||||
func (r *PricelistRepository) GetLatestActiveBySource(source string) (*models.Pricelist, error) {
|
func (r *PricelistRepository) GetLatestActiveBySource(source string) (*models.Pricelist, error) {
|
||||||
var pricelist models.Pricelist
|
var pricelist models.Pricelist
|
||||||
if err := r.db.
|
if err := r.db.Where("is_active = ? AND source = ?", true, source).Order("created_at DESC").First(&pricelist).Error; err != nil {
|
||||||
Where("is_active = ? AND source = ?", true, source).
|
|
||||||
Where("EXISTS (SELECT 1 FROM qt_pricelist_items WHERE qt_pricelist_items.pricelist_id = qt_pricelists.id)").
|
|
||||||
Order("created_at DESC, id DESC").
|
|
||||||
First(&pricelist).Error; err != nil {
|
|
||||||
return nil, fmt.Errorf("getting latest pricelist: %w", err)
|
return nil, fmt.Errorf("getting latest pricelist: %w", err)
|
||||||
}
|
}
|
||||||
return &pricelist, nil
|
return &pricelist, nil
|
||||||
@@ -240,25 +233,16 @@ func (r *PricelistRepository) GetItems(pricelistID uint, offset, limit int, sear
|
|||||||
if err := r.db.Where("lot_name = ?", items[i].LotName).First(&lot).Error; err == nil {
|
if err := r.db.Where("lot_name = ?", items[i].LotName).First(&lot).Error; err == nil {
|
||||||
items[i].LotDescription = lot.LotDescription
|
items[i].LotDescription = lot.LotDescription
|
||||||
}
|
}
|
||||||
items[i].Category = strings.TrimSpace(items[i].LotCategory)
|
// Parse category from lot_name (e.g., "CPU_AMD_9654" -> "CPU")
|
||||||
|
parts := strings.SplitN(items[i].LotName, "_", 2)
|
||||||
|
if len(parts) >= 1 {
|
||||||
|
items[i].Category = parts[0]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return items, total, nil
|
return items, total, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLotNames returns distinct lot names from pricelist items.
|
|
||||||
func (r *PricelistRepository) GetLotNames(pricelistID uint) ([]string, error) {
|
|
||||||
var lotNames []string
|
|
||||||
if err := r.db.Model(&models.PricelistItem{}).
|
|
||||||
Where("pricelist_id = ?", pricelistID).
|
|
||||||
Distinct("lot_name").
|
|
||||||
Order("lot_name ASC").
|
|
||||||
Pluck("lot_name", &lotNames).Error; err != nil {
|
|
||||||
return nil, fmt.Errorf("listing pricelist lot names: %w", err)
|
|
||||||
}
|
|
||||||
return lotNames, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPriceForLot returns item price for a lot within a pricelist.
|
// GetPriceForLot returns item price for a lot within a pricelist.
|
||||||
func (r *PricelistRepository) GetPriceForLot(pricelistID uint, lotName string) (float64, error) {
|
func (r *PricelistRepository) GetPriceForLot(pricelistID uint, lotName string) (float64, error) {
|
||||||
var item models.PricelistItem
|
var item models.PricelistItem
|
||||||
@@ -268,28 +252,6 @@ func (r *PricelistRepository) GetPriceForLot(pricelistID uint, lotName string) (
|
|||||||
return item.Price, nil
|
return item.Price, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPricesForLots returns price map for given lots within a pricelist.
|
|
||||||
func (r *PricelistRepository) GetPricesForLots(pricelistID uint, lotNames []string) (map[string]float64, error) {
|
|
||||||
result := make(map[string]float64, len(lotNames))
|
|
||||||
if pricelistID == 0 || len(lotNames) == 0 {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var rows []models.PricelistItem
|
|
||||||
if err := r.db.Select("lot_name, price").
|
|
||||||
Where("pricelist_id = ? AND lot_name IN ?", pricelistID, lotNames).
|
|
||||||
Find(&rows).Error; err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, row := range rows {
|
|
||||||
if row.Price > 0 {
|
|
||||||
result[row.LotName] = row.Price
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetActive toggles active flag on a pricelist.
|
// SetActive toggles active flag on a pricelist.
|
||||||
func (r *PricelistRepository) SetActive(id uint, isActive bool) error {
|
func (r *PricelistRepository) SetActive(id uint, isActive bool) error {
|
||||||
return r.db.Model(&models.Pricelist{}).Where("id = ?", id).Update("is_active", isActive).Error
|
return r.db.Model(&models.Pricelist{}).Where("id = ?", id).Update("is_active", isActive).Error
|
||||||
@@ -303,18 +265,17 @@ func (r *PricelistRepository) GenerateVersion() (string, error) {
|
|||||||
// GenerateVersionBySource generates a new version string in format YYYY-MM-DD-NNN scoped by source.
|
// GenerateVersionBySource generates a new version string in format YYYY-MM-DD-NNN scoped by source.
|
||||||
func (r *PricelistRepository) GenerateVersionBySource(source string) (string, error) {
|
func (r *PricelistRepository) GenerateVersionBySource(source string) (string, error) {
|
||||||
today := time.Now().Format("2006-01-02")
|
today := time.Now().Format("2006-01-02")
|
||||||
prefix := versionPrefixBySource(source)
|
|
||||||
|
|
||||||
var last models.Pricelist
|
var last models.Pricelist
|
||||||
err := r.db.Model(&models.Pricelist{}).
|
err := r.db.Model(&models.Pricelist{}).
|
||||||
Select("version").
|
Select("version").
|
||||||
Where("source = ? AND version LIKE ?", source, prefix+"-"+today+"-%").
|
Where("source = ? AND version LIKE ?", source, today+"-%").
|
||||||
Order("version DESC").
|
Order("version DESC").
|
||||||
Limit(1).
|
Limit(1).
|
||||||
Take(&last).Error
|
Take(&last).Error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
return fmt.Sprintf("%s-%s-001", prefix, today), nil
|
return fmt.Sprintf("%s-001", today), nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("loading latest today's pricelist version: %w", err)
|
return "", fmt.Errorf("loading latest today's pricelist version: %w", err)
|
||||||
}
|
}
|
||||||
@@ -329,18 +290,7 @@ func (r *PricelistRepository) GenerateVersionBySource(source string) (string, er
|
|||||||
return "", fmt.Errorf("parsing pricelist sequence %q: %w", parts[len(parts)-1], err)
|
return "", fmt.Errorf("parsing pricelist sequence %q: %w", parts[len(parts)-1], err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%s-%s-%03d", prefix, today, n+1), nil
|
return fmt.Sprintf("%s-%03d", today, n+1), nil
|
||||||
}
|
|
||||||
|
|
||||||
func versionPrefixBySource(source string) string {
|
|
||||||
switch models.NormalizePricelistSource(source) {
|
|
||||||
case models.PricelistSourceWarehouse:
|
|
||||||
return "S"
|
|
||||||
case models.PricelistSourceCompetitor:
|
|
||||||
return "B"
|
|
||||||
default:
|
|
||||||
return "E"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPriceForLotBySource returns item price for a lot from latest active pricelist of source.
|
// GetPriceForLotBySource returns item price for a lot from latest active pricelist of source.
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func TestGenerateVersion_FirstOfDay(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
today := time.Now().Format("2006-01-02")
|
today := time.Now().Format("2006-01-02")
|
||||||
want := fmt.Sprintf("E-%s-001", today)
|
want := fmt.Sprintf("%s-001", today)
|
||||||
if version != want {
|
if version != want {
|
||||||
t.Fatalf("expected %s, got %s", want, version)
|
t.Fatalf("expected %s, got %s", want, version)
|
||||||
}
|
}
|
||||||
@@ -30,8 +30,8 @@ func TestGenerateVersion_UsesMaxSuffixNotCount(t *testing.T) {
|
|||||||
today := time.Now().Format("2006-01-02")
|
today := time.Now().Format("2006-01-02")
|
||||||
|
|
||||||
seed := []models.Pricelist{
|
seed := []models.Pricelist{
|
||||||
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("E-%s-001", today), CreatedBy: "test", IsActive: true},
|
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("%s-001", today), CreatedBy: "test", IsActive: true},
|
||||||
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("E-%s-003", today), CreatedBy: "test", IsActive: true},
|
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("%s-003", today), CreatedBy: "test", IsActive: true},
|
||||||
}
|
}
|
||||||
for _, pl := range seed {
|
for _, pl := range seed {
|
||||||
if err := repo.Create(&pl); err != nil {
|
if err := repo.Create(&pl); err != nil {
|
||||||
@@ -44,7 +44,7 @@ func TestGenerateVersion_UsesMaxSuffixNotCount(t *testing.T) {
|
|||||||
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
want := fmt.Sprintf("E-%s-004", today)
|
want := fmt.Sprintf("%s-004", today)
|
||||||
if version != want {
|
if version != want {
|
||||||
t.Fatalf("expected %s, got %s", want, version)
|
t.Fatalf("expected %s, got %s", want, version)
|
||||||
}
|
}
|
||||||
@@ -55,8 +55,8 @@ func TestGenerateVersion_IsolatedBySource(t *testing.T) {
|
|||||||
today := time.Now().Format("2006-01-02")
|
today := time.Now().Format("2006-01-02")
|
||||||
|
|
||||||
seed := []models.Pricelist{
|
seed := []models.Pricelist{
|
||||||
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("E-%s-009", today), CreatedBy: "test", IsActive: true},
|
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("%s-009", today), CreatedBy: "test", IsActive: true},
|
||||||
{Source: string(models.PricelistSourceWarehouse), Version: fmt.Sprintf("S-%s-002", today), CreatedBy: "test", IsActive: true},
|
{Source: string(models.PricelistSourceWarehouse), Version: fmt.Sprintf("%s-002", today), CreatedBy: "test", IsActive: true},
|
||||||
}
|
}
|
||||||
for _, pl := range seed {
|
for _, pl := range seed {
|
||||||
if err := repo.Create(&pl); err != nil {
|
if err := repo.Create(&pl); err != nil {
|
||||||
@@ -69,107 +69,12 @@ func TestGenerateVersion_IsolatedBySource(t *testing.T) {
|
|||||||
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
want := fmt.Sprintf("S-%s-003", today)
|
want := fmt.Sprintf("%s-003", today)
|
||||||
if version != want {
|
if version != want {
|
||||||
t.Fatalf("expected %s, got %s", want, version)
|
t.Fatalf("expected %s, got %s", want, version)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetLatestActiveBySource_SkipsPricelistsWithoutItems(t *testing.T) {
|
|
||||||
repo := newTestPricelistRepository(t)
|
|
||||||
db := repo.db
|
|
||||||
ts := time.Now().Add(-time.Minute)
|
|
||||||
source := "test-estimate-skip-empty"
|
|
||||||
|
|
||||||
emptyLatest := models.Pricelist{
|
|
||||||
Source: source,
|
|
||||||
Version: "E-empty",
|
|
||||||
CreatedBy: "test",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: ts.Add(2 * time.Second),
|
|
||||||
}
|
|
||||||
if err := db.Create(&emptyLatest).Error; err != nil {
|
|
||||||
t.Fatalf("create empty pricelist: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
withItems := models.Pricelist{
|
|
||||||
Source: source,
|
|
||||||
Version: "E-with-items",
|
|
||||||
CreatedBy: "test",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: ts,
|
|
||||||
}
|
|
||||||
if err := db.Create(&withItems).Error; err != nil {
|
|
||||||
t.Fatalf("create pricelist with items: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Create(&models.PricelistItem{
|
|
||||||
PricelistID: withItems.ID,
|
|
||||||
LotName: "CPU_A",
|
|
||||||
Price: 100,
|
|
||||||
}).Error; err != nil {
|
|
||||||
t.Fatalf("create pricelist item: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := repo.GetLatestActiveBySource(source)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetLatestActiveBySource: %v", err)
|
|
||||||
}
|
|
||||||
if got.ID != withItems.ID {
|
|
||||||
t.Fatalf("expected pricelist with items id=%d, got id=%d", withItems.ID, got.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetLatestActiveBySource_TieBreaksByID(t *testing.T) {
|
|
||||||
repo := newTestPricelistRepository(t)
|
|
||||||
db := repo.db
|
|
||||||
ts := time.Now().Add(-time.Minute)
|
|
||||||
source := "test-warehouse-tie-break"
|
|
||||||
|
|
||||||
first := models.Pricelist{
|
|
||||||
Source: source,
|
|
||||||
Version: "S-1",
|
|
||||||
CreatedBy: "test",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: ts,
|
|
||||||
}
|
|
||||||
if err := db.Create(&first).Error; err != nil {
|
|
||||||
t.Fatalf("create first pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Create(&models.PricelistItem{
|
|
||||||
PricelistID: first.ID,
|
|
||||||
LotName: "CPU_A",
|
|
||||||
Price: 101,
|
|
||||||
}).Error; err != nil {
|
|
||||||
t.Fatalf("create first item: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
second := models.Pricelist{
|
|
||||||
Source: source,
|
|
||||||
Version: "S-2",
|
|
||||||
CreatedBy: "test",
|
|
||||||
IsActive: true,
|
|
||||||
CreatedAt: ts,
|
|
||||||
}
|
|
||||||
if err := db.Create(&second).Error; err != nil {
|
|
||||||
t.Fatalf("create second pricelist: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Create(&models.PricelistItem{
|
|
||||||
PricelistID: second.ID,
|
|
||||||
LotName: "CPU_A",
|
|
||||||
Price: 102,
|
|
||||||
}).Error; err != nil {
|
|
||||||
t.Fatalf("create second item: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := repo.GetLatestActiveBySource(source)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("GetLatestActiveBySource: %v", err)
|
|
||||||
}
|
|
||||||
if got.ID != second.ID {
|
|
||||||
t.Fatalf("expected later inserted pricelist id=%d, got id=%d", second.ID, got.ID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestPricelistRepository(t *testing.T) *PricelistRepository {
|
func newTestPricelistRepository(t *testing.T) *PricelistRepository {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
@@ -177,7 +82,7 @@ func newTestPricelistRepository(t *testing.T) *PricelistRepository {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("open sqlite: %v", err)
|
t.Fatalf("open sqlite: %v", err)
|
||||||
}
|
}
|
||||||
if err := db.AutoMigrate(&models.Pricelist{}, &models.PricelistItem{}, &models.Lot{}, &models.StockLog{}); err != nil {
|
if err := db.AutoMigrate(&models.Pricelist{}); err != nil {
|
||||||
t.Fatalf("migrate: %v", err)
|
t.Fatalf("migrate: %v", err)
|
||||||
}
|
}
|
||||||
return NewPricelistRepository(db)
|
return NewPricelistRepository(db)
|
||||||
|
|||||||
@@ -27,8 +27,6 @@ func (r *ProjectRepository) UpsertByUUID(project *models.Project) error {
|
|||||||
Columns: []clause.Column{{Name: "uuid"}},
|
Columns: []clause.Column{{Name: "uuid"}},
|
||||||
DoUpdates: clause.AssignmentColumns([]string{
|
DoUpdates: clause.AssignmentColumns([]string{
|
||||||
"owner_username",
|
"owner_username",
|
||||||
"code",
|
|
||||||
"variant",
|
|
||||||
"name",
|
"name",
|
||||||
"tracker_url",
|
"tracker_url",
|
||||||
"is_active",
|
"is_active",
|
||||||
|
|||||||
@@ -83,6 +83,10 @@ func (r *UnifiedRepo) getComponentsOffline(filter ComponentFilter, offset, limit
|
|||||||
search := "%" + filter.Search + "%"
|
search := "%" + filter.Search + "%"
|
||||||
query = query.Where("lot_name LIKE ? OR lot_description LIKE ? OR model LIKE ?", search, search, search)
|
query = query.Where("lot_name LIKE ? OR lot_description LIKE ? OR model LIKE ?", search, search, search)
|
||||||
}
|
}
|
||||||
|
if filter.HasPrice {
|
||||||
|
query = query.Where("current_price IS NOT NULL AND current_price > 0")
|
||||||
|
}
|
||||||
|
|
||||||
var total int64
|
var total int64
|
||||||
query.Count(&total)
|
query.Count(&total)
|
||||||
|
|
||||||
@@ -92,6 +96,8 @@ func (r *UnifiedRepo) getComponentsOffline(filter ComponentFilter, offset, limit
|
|||||||
sortDir = "DESC"
|
sortDir = "DESC"
|
||||||
}
|
}
|
||||||
switch filter.SortField {
|
switch filter.SortField {
|
||||||
|
case "current_price":
|
||||||
|
query = query.Order("current_price " + sortDir)
|
||||||
case "lot_name":
|
case "lot_name":
|
||||||
query = query.Order("lot_name " + sortDir)
|
query = query.Order("lot_name " + sortDir)
|
||||||
default:
|
default:
|
||||||
@@ -106,8 +112,9 @@ func (r *UnifiedRepo) getComponentsOffline(filter ComponentFilter, offset, limit
|
|||||||
result := make([]models.LotMetadata, len(components))
|
result := make([]models.LotMetadata, len(components))
|
||||||
for i, comp := range components {
|
for i, comp := range components {
|
||||||
result[i] = models.LotMetadata{
|
result[i] = models.LotMetadata{
|
||||||
LotName: comp.LotName,
|
LotName: comp.LotName,
|
||||||
Model: comp.Model,
|
Model: comp.Model,
|
||||||
|
CurrentPrice: comp.CurrentPrice,
|
||||||
Lot: &models.Lot{
|
Lot: &models.Lot{
|
||||||
LotName: comp.LotName,
|
LotName: comp.LotName,
|
||||||
LotDescription: comp.LotDescription,
|
LotDescription: comp.LotDescription,
|
||||||
@@ -131,8 +138,9 @@ func (r *UnifiedRepo) GetComponent(lotName string) (*models.LotMetadata, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &models.LotMetadata{
|
return &models.LotMetadata{
|
||||||
LotName: comp.LotName,
|
LotName: comp.LotName,
|
||||||
Model: comp.Model,
|
Model: comp.Model,
|
||||||
|
CurrentPrice: comp.CurrentPrice,
|
||||||
Lot: &models.Lot{
|
Lot: &models.Lot{
|
||||||
LotName: comp.LotName,
|
LotName: comp.LotName,
|
||||||
LotDescription: comp.LotDescription,
|
LotDescription: comp.LotDescription,
|
||||||
|
|||||||
62
internal/repository/user.go
Normal file
62
internal/repository/user.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package repository
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UserRepository struct {
|
||||||
|
db *gorm.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUserRepository(db *gorm.DB) *UserRepository {
|
||||||
|
return &UserRepository{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) Create(user *models.User) error {
|
||||||
|
return r.db.Create(user).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) GetByID(id uint) (*models.User, error) {
|
||||||
|
var user models.User
|
||||||
|
err := r.db.First(&user, id).Error
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) GetByUsername(username string) (*models.User, error) {
|
||||||
|
var user models.User
|
||||||
|
err := r.db.Where("username = ?", username).First(&user).Error
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) GetByEmail(email string) (*models.User, error) {
|
||||||
|
var user models.User
|
||||||
|
err := r.db.Where("email = ?", email).First(&user).Error
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) Update(user *models.User) error {
|
||||||
|
return r.db.Save(user).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) Delete(id uint) error {
|
||||||
|
return r.db.Delete(&models.User{}, id).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *UserRepository) List(offset, limit int) ([]models.User, int64, error) {
|
||||||
|
var users []models.User
|
||||||
|
var total int64
|
||||||
|
|
||||||
|
r.db.Model(&models.User{}).Count(&total)
|
||||||
|
err := r.db.Offset(offset).Limit(limit).Find(&users).Error
|
||||||
|
return users, total, err
|
||||||
|
}
|
||||||
199
internal/services/alerts/service.go
Normal file
199
internal/services/alerts/service.go
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
package alerts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
alertRepo *repository.AlertRepository
|
||||||
|
componentRepo *repository.ComponentRepository
|
||||||
|
priceRepo *repository.PriceRepository
|
||||||
|
statsRepo *repository.StatsRepository
|
||||||
|
config config.AlertsConfig
|
||||||
|
pricingConfig config.PricingConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(
|
||||||
|
alertRepo *repository.AlertRepository,
|
||||||
|
componentRepo *repository.ComponentRepository,
|
||||||
|
priceRepo *repository.PriceRepository,
|
||||||
|
statsRepo *repository.StatsRepository,
|
||||||
|
alertCfg config.AlertsConfig,
|
||||||
|
pricingCfg config.PricingConfig,
|
||||||
|
) *Service {
|
||||||
|
return &Service{
|
||||||
|
alertRepo: alertRepo,
|
||||||
|
componentRepo: componentRepo,
|
||||||
|
priceRepo: priceRepo,
|
||||||
|
statsRepo: statsRepo,
|
||||||
|
config: alertCfg,
|
||||||
|
pricingConfig: pricingCfg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) List(filter repository.AlertFilter, page, perPage int) ([]models.PricingAlert, int64, error) {
|
||||||
|
if page < 1 {
|
||||||
|
page = 1
|
||||||
|
}
|
||||||
|
if perPage < 1 || perPage > 100 {
|
||||||
|
perPage = 20
|
||||||
|
}
|
||||||
|
offset := (page - 1) * perPage
|
||||||
|
|
||||||
|
return s.alertRepo.List(filter, offset, perPage)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Acknowledge(id uint) error {
|
||||||
|
return s.alertRepo.UpdateStatus(id, models.AlertStatusAcknowledged)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Resolve(id uint) error {
|
||||||
|
return s.alertRepo.UpdateStatus(id, models.AlertStatusResolved)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Ignore(id uint) error {
|
||||||
|
return s.alertRepo.UpdateStatus(id, models.AlertStatusIgnored)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) GetNewAlertsCount() (int64, error) {
|
||||||
|
return s.alertRepo.CountByStatus(models.AlertStatusNew)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckAndGenerateAlerts scans components and creates alerts
|
||||||
|
func (s *Service) CheckAndGenerateAlerts() error {
|
||||||
|
if !s.config.Enabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get top components by usage
|
||||||
|
topComponents, err := s.statsRepo.GetTopComponents(100)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, stats := range topComponents {
|
||||||
|
component, err := s.componentRepo.GetByLotName(stats.LotName)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check high demand + stale price
|
||||||
|
if err := s.checkHighDemandStalePrice(component, &stats); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check trending without price
|
||||||
|
if err := s.checkTrendingNoPrice(component, &stats); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check no recent quotes
|
||||||
|
if err := s.checkNoRecentQuotes(component, &stats); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) checkHighDemandStalePrice(comp *models.LotMetadata, stats *models.ComponentUsageStats) error {
|
||||||
|
// high_demand_stale_price: >= 5 quotes/month AND price > 60 days old
|
||||||
|
if stats.QuotesLast30d < s.config.HighDemandThreshold {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if comp.PriceUpdatedAt == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
daysSinceUpdate := int(time.Since(*comp.PriceUpdatedAt).Hours() / 24)
|
||||||
|
if daysSinceUpdate <= s.pricingConfig.FreshnessYellowDays {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if alert already exists
|
||||||
|
exists, _ := s.alertRepo.ExistsByLotAndType(comp.LotName, models.AlertHighDemandStalePrice)
|
||||||
|
if exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
alert := &models.PricingAlert{
|
||||||
|
LotName: comp.LotName,
|
||||||
|
AlertType: models.AlertHighDemandStalePrice,
|
||||||
|
Severity: models.SeverityCritical,
|
||||||
|
Message: fmt.Sprintf("Компонент %s: высокий спрос (%d КП/мес), но цена устарела (%d дней)", comp.LotName, stats.QuotesLast30d, daysSinceUpdate),
|
||||||
|
Details: models.AlertDetails{
|
||||||
|
"quotes_30d": stats.QuotesLast30d,
|
||||||
|
"days_since_update": daysSinceUpdate,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.alertRepo.Create(alert)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) checkTrendingNoPrice(comp *models.LotMetadata, stats *models.ComponentUsageStats) error {
|
||||||
|
// trending_no_price: trend > 50% AND no price
|
||||||
|
if stats.TrendDirection != models.TrendUp || stats.TrendPercent < float64(s.config.TrendingThresholdPercent) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if comp.CurrentPrice != nil && *comp.CurrentPrice > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, _ := s.alertRepo.ExistsByLotAndType(comp.LotName, models.AlertTrendingNoPrice)
|
||||||
|
if exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
alert := &models.PricingAlert{
|
||||||
|
LotName: comp.LotName,
|
||||||
|
AlertType: models.AlertTrendingNoPrice,
|
||||||
|
Severity: models.SeverityHigh,
|
||||||
|
Message: fmt.Sprintf("Компонент %s: рост спроса +%.0f%%, но цена не установлена", comp.LotName, stats.TrendPercent),
|
||||||
|
Details: models.AlertDetails{
|
||||||
|
"trend_percent": stats.TrendPercent,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.alertRepo.Create(alert)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) checkNoRecentQuotes(comp *models.LotMetadata, stats *models.ComponentUsageStats) error {
|
||||||
|
// no_recent_quotes: popular component, no supplier quotes > 90 days
|
||||||
|
if stats.QuotesLast30d < 3 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
quoteCount, err := s.priceRepo.GetQuoteCount(comp.LotName, s.pricingConfig.FreshnessRedDays)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if quoteCount > 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, _ := s.alertRepo.ExistsByLotAndType(comp.LotName, models.AlertNoRecentQuotes)
|
||||||
|
if exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
alert := &models.PricingAlert{
|
||||||
|
LotName: comp.LotName,
|
||||||
|
AlertType: models.AlertNoRecentQuotes,
|
||||||
|
Severity: models.SeverityMedium,
|
||||||
|
Message: fmt.Sprintf("Компонент %s: популярный (%d КП), но нет новых котировок >%d дней", comp.LotName, stats.QuotesLast30d, s.pricingConfig.FreshnessRedDays),
|
||||||
|
Details: models.AlertDetails{
|
||||||
|
"quotes_30d": stats.QuotesLast30d,
|
||||||
|
"no_quotes_days": s.pricingConfig.FreshnessRedDays,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.alertRepo.Create(alert)
|
||||||
|
}
|
||||||
180
internal/services/auth.go
Normal file
180
internal/services/auth.go
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt/v5"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
|
"golang.org/x/crypto/bcrypt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidCredentials = errors.New("invalid username or password")
|
||||||
|
ErrUserNotFound = errors.New("user not found")
|
||||||
|
ErrUserInactive = errors.New("user account is inactive")
|
||||||
|
ErrInvalidToken = errors.New("invalid token")
|
||||||
|
ErrTokenExpired = errors.New("token expired")
|
||||||
|
)
|
||||||
|
|
||||||
|
type AuthService struct {
|
||||||
|
userRepo *repository.UserRepository
|
||||||
|
config config.AuthConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAuthService(userRepo *repository.UserRepository, cfg config.AuthConfig) *AuthService {
|
||||||
|
return &AuthService{
|
||||||
|
userRepo: userRepo,
|
||||||
|
config: cfg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenPair struct {
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
ExpiresAt int64 `json:"expires_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Claims struct {
|
||||||
|
UserID uint `json:"user_id"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Role models.UserRole `json:"role"`
|
||||||
|
jwt.RegisteredClaims
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AuthService) Login(username, password string) (*TokenPair, *models.User, error) {
|
||||||
|
user, err := s.userRepo.GetByUsername(username)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, ErrInvalidCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
if !user.IsActive {
|
||||||
|
return nil, nil, ErrUserInactive
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bcrypt.CompareHashAndPassword([]byte(user.PasswordHash), []byte(password)); err != nil {
|
||||||
|
return nil, nil, ErrInvalidCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens, err := s.generateTokenPair(user)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return tokens, user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AuthService) RefreshTokens(refreshToken string) (*TokenPair, error) {
|
||||||
|
claims, err := s.ValidateToken(refreshToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
user, err := s.userRepo.GetByID(claims.UserID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ErrUserNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if !user.IsActive {
|
||||||
|
return nil, ErrUserInactive
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.generateTokenPair(user)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AuthService) ValidateToken(tokenString string) (*Claims, error) {
|
||||||
|
token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {
|
||||||
|
return []byte(s.config.JWTSecret), nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, jwt.ErrTokenExpired) {
|
||||||
|
return nil, ErrTokenExpired
|
||||||
|
}
|
||||||
|
return nil, ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, ok := token.Claims.(*Claims)
|
||||||
|
if !ok || !token.Valid {
|
||||||
|
return nil, ErrInvalidToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return claims, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AuthService) generateTokenPair(user *models.User) (*TokenPair, error) {
|
||||||
|
now := time.Now()
|
||||||
|
accessExpiry := now.Add(s.config.TokenExpiry)
|
||||||
|
refreshExpiry := now.Add(s.config.RefreshExpiry)
|
||||||
|
|
||||||
|
accessClaims := &Claims{
|
||||||
|
UserID: user.ID,
|
||||||
|
Username: user.Username,
|
||||||
|
Role: user.Role,
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(accessExpiry),
|
||||||
|
IssuedAt: jwt.NewNumericDate(now),
|
||||||
|
Subject: user.Username,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
accessToken := jwt.NewWithClaims(jwt.SigningMethodHS256, accessClaims)
|
||||||
|
accessTokenString, err := accessToken.SignedString([]byte(s.config.JWTSecret))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
refreshClaims := &Claims{
|
||||||
|
UserID: user.ID,
|
||||||
|
Username: user.Username,
|
||||||
|
Role: user.Role,
|
||||||
|
RegisteredClaims: jwt.RegisteredClaims{
|
||||||
|
ExpiresAt: jwt.NewNumericDate(refreshExpiry),
|
||||||
|
IssuedAt: jwt.NewNumericDate(now),
|
||||||
|
Subject: user.Username,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
refreshToken := jwt.NewWithClaims(jwt.SigningMethodHS256, refreshClaims)
|
||||||
|
refreshTokenString, err := refreshToken.SignedString([]byte(s.config.JWTSecret))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &TokenPair{
|
||||||
|
AccessToken: accessTokenString,
|
||||||
|
RefreshToken: refreshTokenString,
|
||||||
|
ExpiresAt: accessExpiry.Unix(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AuthService) HashPassword(password string) (string, error) {
|
||||||
|
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(hash), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *AuthService) CreateUser(username, email, password string, role models.UserRole) (*models.User, error) {
|
||||||
|
hash, err := s.HashPassword(password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
user := &models.User{
|
||||||
|
Username: username,
|
||||||
|
Email: email,
|
||||||
|
PasswordHash: hash,
|
||||||
|
Role: role,
|
||||||
|
IsActive: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.userRepo.Create(user); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return user, nil
|
||||||
|
}
|
||||||
@@ -53,6 +53,7 @@ type ComponentView struct {
|
|||||||
Category string `json:"category"`
|
Category string `json:"category"`
|
||||||
CategoryName string `json:"category_name"`
|
CategoryName string `json:"category_name"`
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
CurrentPrice *float64 `json:"current_price"`
|
||||||
PriceFreshness models.PriceFreshness `json:"price_freshness"`
|
PriceFreshness models.PriceFreshness `json:"price_freshness"`
|
||||||
PopularityScore float64 `json:"popularity_score"`
|
PopularityScore float64 `json:"popularity_score"`
|
||||||
Specs models.Specs `json:"specs,omitempty"`
|
Specs models.Specs `json:"specs,omitempty"`
|
||||||
@@ -91,6 +92,7 @@ func (s *ComponentService) List(filter repository.ComponentFilter, page, perPage
|
|||||||
view := ComponentView{
|
view := ComponentView{
|
||||||
LotName: c.LotName,
|
LotName: c.LotName,
|
||||||
Model: c.Model,
|
Model: c.Model,
|
||||||
|
CurrentPrice: c.CurrentPrice,
|
||||||
PriceFreshness: c.GetPriceFreshness(30, 60, 90, 3),
|
PriceFreshness: c.GetPriceFreshness(30, 60, 90, 3),
|
||||||
PopularityScore: c.PopularityScore,
|
PopularityScore: c.PopularityScore,
|
||||||
Specs: c.Specs,
|
Specs: c.Specs,
|
||||||
@@ -132,6 +134,7 @@ func (s *ComponentService) GetByLotName(lotName string) (*ComponentView, error)
|
|||||||
view := &ComponentView{
|
view := &ComponentView{
|
||||||
LotName: c.LotName,
|
LotName: c.LotName,
|
||||||
Model: c.Model,
|
Model: c.Model,
|
||||||
|
CurrentPrice: c.CurrentPrice,
|
||||||
PriceFreshness: c.GetPriceFreshness(30, 60, 90, 3),
|
PriceFreshness: c.GetPriceFreshness(30, 60, 90, 3),
|
||||||
PopularityScore: c.PopularityScore,
|
PopularityScore: c.PopularityScore,
|
||||||
Specs: c.Specs,
|
Specs: c.Specs,
|
||||||
|
|||||||
@@ -45,28 +45,13 @@ func NewConfigurationService(
|
|||||||
}
|
}
|
||||||
|
|
||||||
type CreateConfigRequest struct {
|
type CreateConfigRequest struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Items models.ConfigItems `json:"items"`
|
|
||||||
ProjectUUID *string `json:"project_uuid,omitempty"`
|
|
||||||
CustomPrice *float64 `json:"custom_price"`
|
|
||||||
Notes string `json:"notes"`
|
|
||||||
IsTemplate bool `json:"is_template"`
|
|
||||||
ServerCount int `json:"server_count"`
|
|
||||||
ServerModel string `json:"server_model,omitempty"`
|
|
||||||
SupportCode string `json:"support_code,omitempty"`
|
|
||||||
Article string `json:"article,omitempty"`
|
|
||||||
PricelistID *uint `json:"pricelist_id,omitempty"`
|
|
||||||
WarehousePricelistID *uint `json:"warehouse_pricelist_id,omitempty"`
|
|
||||||
CompetitorPricelistID *uint `json:"competitor_pricelist_id,omitempty"`
|
|
||||||
ConfigType string `json:"config_type,omitempty"` // "server" | "storage"
|
|
||||||
DisablePriceRefresh bool `json:"disable_price_refresh"`
|
|
||||||
OnlyInStock bool `json:"only_in_stock"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArticlePreviewRequest struct {
|
|
||||||
Items models.ConfigItems `json:"items"`
|
Items models.ConfigItems `json:"items"`
|
||||||
ServerModel string `json:"server_model"`
|
ProjectUUID *string `json:"project_uuid,omitempty"`
|
||||||
SupportCode string `json:"support_code,omitempty"`
|
CustomPrice *float64 `json:"custom_price"`
|
||||||
|
Notes string `json:"notes"`
|
||||||
|
IsTemplate bool `json:"is_template"`
|
||||||
|
ServerCount int `json:"server_count"`
|
||||||
PricelistID *uint `json:"pricelist_id,omitempty"`
|
PricelistID *uint `json:"pricelist_id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,28 +73,17 @@ func (s *ConfigurationService) Create(ownerUsername string, req *CreateConfigReq
|
|||||||
}
|
}
|
||||||
|
|
||||||
config := &models.Configuration{
|
config := &models.Configuration{
|
||||||
UUID: uuid.New().String(),
|
UUID: uuid.New().String(),
|
||||||
OwnerUsername: ownerUsername,
|
OwnerUsername: ownerUsername,
|
||||||
ProjectUUID: projectUUID,
|
ProjectUUID: projectUUID,
|
||||||
Name: req.Name,
|
Name: req.Name,
|
||||||
Items: req.Items,
|
Items: req.Items,
|
||||||
TotalPrice: &total,
|
TotalPrice: &total,
|
||||||
CustomPrice: req.CustomPrice,
|
CustomPrice: req.CustomPrice,
|
||||||
Notes: req.Notes,
|
Notes: req.Notes,
|
||||||
IsTemplate: req.IsTemplate,
|
IsTemplate: req.IsTemplate,
|
||||||
ServerCount: req.ServerCount,
|
ServerCount: req.ServerCount,
|
||||||
ServerModel: req.ServerModel,
|
PricelistID: pricelistID,
|
||||||
SupportCode: req.SupportCode,
|
|
||||||
Article: req.Article,
|
|
||||||
PricelistID: pricelistID,
|
|
||||||
WarehousePricelistID: req.WarehousePricelistID,
|
|
||||||
CompetitorPricelistID: req.CompetitorPricelistID,
|
|
||||||
ConfigType: req.ConfigType,
|
|
||||||
DisablePriceRefresh: req.DisablePriceRefresh,
|
|
||||||
OnlyInStock: req.OnlyInStock,
|
|
||||||
}
|
|
||||||
if config.ConfigType == "" {
|
|
||||||
config.ConfigType = "server"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.configRepo.Create(config); err != nil {
|
if err := s.configRepo.Create(config); err != nil {
|
||||||
@@ -170,14 +144,7 @@ func (s *ConfigurationService) Update(uuid string, ownerUsername string, req *Cr
|
|||||||
config.Notes = req.Notes
|
config.Notes = req.Notes
|
||||||
config.IsTemplate = req.IsTemplate
|
config.IsTemplate = req.IsTemplate
|
||||||
config.ServerCount = req.ServerCount
|
config.ServerCount = req.ServerCount
|
||||||
config.ServerModel = req.ServerModel
|
|
||||||
config.SupportCode = req.SupportCode
|
|
||||||
config.Article = req.Article
|
|
||||||
config.PricelistID = pricelistID
|
config.PricelistID = pricelistID
|
||||||
config.WarehousePricelistID = req.WarehousePricelistID
|
|
||||||
config.CompetitorPricelistID = req.CompetitorPricelistID
|
|
||||||
config.DisablePriceRefresh = req.DisablePriceRefresh
|
|
||||||
config.OnlyInStock = req.OnlyInStock
|
|
||||||
|
|
||||||
if err := s.configRepo.Update(config); err != nil {
|
if err := s.configRepo.Update(config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -244,24 +211,17 @@ func (s *ConfigurationService) CloneToProject(configUUID string, ownerUsername s
|
|||||||
}
|
}
|
||||||
|
|
||||||
clone := &models.Configuration{
|
clone := &models.Configuration{
|
||||||
UUID: uuid.New().String(),
|
UUID: uuid.New().String(),
|
||||||
OwnerUsername: ownerUsername,
|
OwnerUsername: ownerUsername,
|
||||||
ProjectUUID: resolvedProjectUUID,
|
ProjectUUID: resolvedProjectUUID,
|
||||||
Name: newName,
|
Name: newName,
|
||||||
Items: original.Items,
|
Items: original.Items,
|
||||||
TotalPrice: &total,
|
TotalPrice: &total,
|
||||||
CustomPrice: original.CustomPrice,
|
CustomPrice: original.CustomPrice,
|
||||||
Notes: original.Notes,
|
Notes: original.Notes,
|
||||||
IsTemplate: false, // Clone is never a template
|
IsTemplate: false, // Clone is never a template
|
||||||
ServerCount: original.ServerCount,
|
ServerCount: original.ServerCount,
|
||||||
ServerModel: original.ServerModel,
|
PricelistID: original.PricelistID,
|
||||||
SupportCode: original.SupportCode,
|
|
||||||
Article: original.Article,
|
|
||||||
PricelistID: original.PricelistID,
|
|
||||||
WarehousePricelistID: original.WarehousePricelistID,
|
|
||||||
CompetitorPricelistID: original.CompetitorPricelistID,
|
|
||||||
DisablePriceRefresh: original.DisablePriceRefresh,
|
|
||||||
OnlyInStock: original.OnlyInStock,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.configRepo.Create(clone); err != nil {
|
if err := s.configRepo.Create(clone); err != nil {
|
||||||
@@ -334,14 +294,7 @@ func (s *ConfigurationService) UpdateNoAuth(uuid string, req *CreateConfigReques
|
|||||||
config.Notes = req.Notes
|
config.Notes = req.Notes
|
||||||
config.IsTemplate = req.IsTemplate
|
config.IsTemplate = req.IsTemplate
|
||||||
config.ServerCount = req.ServerCount
|
config.ServerCount = req.ServerCount
|
||||||
config.ServerModel = req.ServerModel
|
|
||||||
config.SupportCode = req.SupportCode
|
|
||||||
config.Article = req.Article
|
|
||||||
config.PricelistID = pricelistID
|
config.PricelistID = pricelistID
|
||||||
config.WarehousePricelistID = req.WarehousePricelistID
|
|
||||||
config.CompetitorPricelistID = req.CompetitorPricelistID
|
|
||||||
config.DisablePriceRefresh = req.DisablePriceRefresh
|
|
||||||
config.OnlyInStock = req.OnlyInStock
|
|
||||||
|
|
||||||
if err := s.configRepo.Update(config); err != nil {
|
if err := s.configRepo.Update(config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -409,7 +362,6 @@ func (s *ConfigurationService) CloneNoAuthToProject(configUUID string, newName s
|
|||||||
IsTemplate: false,
|
IsTemplate: false,
|
||||||
ServerCount: original.ServerCount,
|
ServerCount: original.ServerCount,
|
||||||
PricelistID: original.PricelistID,
|
PricelistID: original.PricelistID,
|
||||||
OnlyInStock: original.OnlyInStock,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.configRepo.Create(clone); err != nil {
|
if err := s.configRepo.Create(clone); err != nil {
|
||||||
@@ -613,7 +565,13 @@ func (s *ConfigurationService) isOwner(config *models.Configuration, ownerUserna
|
|||||||
if config == nil || ownerUsername == "" {
|
if config == nil || ownerUsername == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return config.OwnerUsername == ownerUsername
|
if config.OwnerUsername != "" {
|
||||||
|
return config.OwnerUsername == ownerUsername
|
||||||
|
}
|
||||||
|
if config.User != nil {
|
||||||
|
return config.User.Username == ownerUsername
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// // Export configuration as JSON
|
// // Export configuration as JSON
|
||||||
|
|||||||
@@ -4,33 +4,33 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ExportService struct {
|
type ExportService struct {
|
||||||
config config.ExportConfig
|
config config.ExportConfig
|
||||||
categoryRepo *repository.CategoryRepository
|
categoryRepo *repository.CategoryRepository
|
||||||
localDB *localdb.LocalDB
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewExportService(cfg config.ExportConfig, categoryRepo *repository.CategoryRepository, local *localdb.LocalDB) *ExportService {
|
func NewExportService(cfg config.ExportConfig, categoryRepo *repository.CategoryRepository) *ExportService {
|
||||||
return &ExportService{
|
return &ExportService{
|
||||||
config: cfg,
|
config: cfg,
|
||||||
categoryRepo: categoryRepo,
|
categoryRepo: categoryRepo,
|
||||||
localDB: local,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExportItem represents a single component in an export block.
|
type ExportData struct {
|
||||||
|
Name string
|
||||||
|
Items []ExportItem
|
||||||
|
Total float64
|
||||||
|
Notes string
|
||||||
|
CreatedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
type ExportItem struct {
|
type ExportItem struct {
|
||||||
LotName string
|
LotName string
|
||||||
Description string
|
Description string
|
||||||
@@ -40,91 +40,14 @@ type ExportItem struct {
|
|||||||
TotalPrice float64
|
TotalPrice float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigExportBlock represents one configuration (server) in the export.
|
func (s *ExportService) ToCSV(data *ExportData) ([]byte, error) {
|
||||||
type ConfigExportBlock struct {
|
var buf bytes.Buffer
|
||||||
Article string
|
w := csv.NewWriter(&buf)
|
||||||
Line int
|
|
||||||
ServerCount int
|
|
||||||
UnitPrice float64 // sum of component prices for one server
|
|
||||||
Items []ExportItem
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectExportData holds all configuration blocks for a project-level export.
|
|
||||||
type ProjectExportData struct {
|
|
||||||
Configs []ConfigExportBlock
|
|
||||||
CreatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProjectPricingExportOptions struct {
|
|
||||||
IncludeLOT bool `json:"include_lot"`
|
|
||||||
IncludeBOM bool `json:"include_bom"`
|
|
||||||
IncludeEstimate bool `json:"include_estimate"`
|
|
||||||
IncludeStock bool `json:"include_stock"`
|
|
||||||
IncludeCompetitor bool `json:"include_competitor"`
|
|
||||||
Basis string `json:"basis"` // "fob" or "ddp"; empty defaults to "fob"
|
|
||||||
SaleMarkup float64 `json:"sale_markup"` // DDP multiplier; 0 defaults to 1.3
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o ProjectPricingExportOptions) saleMarkupFactor() float64 {
|
|
||||||
if o.SaleMarkup > 0 {
|
|
||||||
return o.SaleMarkup
|
|
||||||
}
|
|
||||||
return 1.3
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o ProjectPricingExportOptions) isDDP() bool {
|
|
||||||
return strings.EqualFold(strings.TrimSpace(o.Basis), "ddp")
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProjectPricingExportData struct {
|
|
||||||
Configs []ProjectPricingExportConfig
|
|
||||||
CreatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProjectPricingExportConfig struct {
|
|
||||||
Name string
|
|
||||||
Article string
|
|
||||||
Line int
|
|
||||||
ServerCount int
|
|
||||||
Rows []ProjectPricingExportRow
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProjectPricingExportRow struct {
|
|
||||||
LotDisplay string
|
|
||||||
VendorPN string
|
|
||||||
Description string
|
|
||||||
Quantity int
|
|
||||||
BOMTotal *float64
|
|
||||||
Estimate *float64
|
|
||||||
Stock *float64
|
|
||||||
Competitor *float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToCSV writes project export data in the new structured CSV format.
|
|
||||||
//
|
|
||||||
// Format:
|
|
||||||
//
|
|
||||||
// Line;Type;p/n;Description;Qty (1 pcs.);Qty (total);Price (1 pcs.);Price (total)
|
|
||||||
// 10;;DL380-ARTICLE;;;10;10470;104 700
|
|
||||||
// ;;MB_INTEL_...;;1;;2074,5;
|
|
||||||
// ...
|
|
||||||
// (empty row)
|
|
||||||
// 20;;DL380-ARTICLE-2;;;2;10470;20 940
|
|
||||||
// ...
|
|
||||||
func (s *ExportService) ToCSV(w io.Writer, data *ProjectExportData) error {
|
|
||||||
// Write UTF-8 BOM for Excel compatibility
|
|
||||||
if _, err := w.Write([]byte{0xEF, 0xBB, 0xBF}); err != nil {
|
|
||||||
return fmt.Errorf("failed to write BOM: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvWriter := csv.NewWriter(w)
|
|
||||||
csvWriter.Comma = ';'
|
|
||||||
defer csvWriter.Flush()
|
|
||||||
|
|
||||||
// Header
|
// Header
|
||||||
headers := []string{"Line", "Type", "p/n", "Description", "Qty (1 pcs.)", "Qty (total)", "Price (1 pcs.)", "Price (total)"}
|
headers := []string{"Артикул", "Описание", "Категория", "Количество", "Цена за единицу", "Сумма"}
|
||||||
if err := csvWriter.Write(headers); err != nil {
|
if err := w.Write(headers); err != nil {
|
||||||
return fmt.Errorf("failed to write header: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get category hierarchy for sorting
|
// Get category hierarchy for sorting
|
||||||
@@ -138,766 +61,87 @@ func (s *ExportService) ToCSV(w io.Writer, data *ProjectExportData) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, block := range data.Configs {
|
// Sort items by category display order
|
||||||
lineNo := block.Line
|
sortedItems := make([]ExportItem, len(data.Items))
|
||||||
if lineNo <= 0 {
|
copy(sortedItems, data.Items)
|
||||||
lineNo = (i + 1) * 10
|
|
||||||
}
|
|
||||||
|
|
||||||
serverCount := block.ServerCount
|
// Sort using category display order (items without category go to the end)
|
||||||
if serverCount < 1 {
|
for i := 0; i < len(sortedItems)-1; i++ {
|
||||||
serverCount = 1
|
for j := i + 1; j < len(sortedItems); j++ {
|
||||||
}
|
orderI, hasI := categoryOrder[sortedItems[i].Category]
|
||||||
|
orderJ, hasJ := categoryOrder[sortedItems[j].Category]
|
||||||
|
|
||||||
totalPrice := block.UnitPrice * float64(serverCount)
|
// Items without category go to the end
|
||||||
|
if !hasI && hasJ {
|
||||||
// Server summary row
|
sortedItems[i], sortedItems[j] = sortedItems[j], sortedItems[i]
|
||||||
serverRow := []string{
|
} else if hasI && hasJ {
|
||||||
fmt.Sprintf("%d", lineNo), // Line
|
// Both have categories, sort by display order
|
||||||
"", // Type
|
if orderI > orderJ {
|
||||||
block.Article, // p/n
|
sortedItems[i], sortedItems[j] = sortedItems[j], sortedItems[i]
|
||||||
"", // Description
|
}
|
||||||
"", // Qty (1 pcs.)
|
|
||||||
fmt.Sprintf("%d", serverCount), // Qty (total)
|
|
||||||
formatPriceInt(block.UnitPrice), // Price (1 pcs.)
|
|
||||||
formatPriceWithSpace(totalPrice), // Price (total)
|
|
||||||
}
|
|
||||||
if err := csvWriter.Write(serverRow); err != nil {
|
|
||||||
return fmt.Errorf("failed to write server row: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort items by category display order
|
|
||||||
sortedItems := make([]ExportItem, len(block.Items))
|
|
||||||
copy(sortedItems, block.Items)
|
|
||||||
sortItemsByCategory(sortedItems, categoryOrder)
|
|
||||||
|
|
||||||
// Component rows
|
|
||||||
for _, item := range sortedItems {
|
|
||||||
componentRow := []string{
|
|
||||||
"", // Line
|
|
||||||
item.Category, // Type
|
|
||||||
item.LotName, // p/n
|
|
||||||
"", // Description
|
|
||||||
fmt.Sprintf("%d", item.Quantity), // Qty (1 pcs.)
|
|
||||||
"", // Qty (total)
|
|
||||||
formatPriceComma(item.UnitPrice), // Price (1 pcs.)
|
|
||||||
"", // Price (total)
|
|
||||||
}
|
|
||||||
if err := csvWriter.Write(componentRow); err != nil {
|
|
||||||
return fmt.Errorf("failed to write component row: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty separator row between blocks (skip after last)
|
|
||||||
if i < len(data.Configs)-1 {
|
|
||||||
if err := csvWriter.Write([]string{"", "", "", "", "", "", "", ""}); err != nil {
|
|
||||||
return fmt.Errorf("failed to write separator row: %w", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
csvWriter.Flush()
|
// Items
|
||||||
if err := csvWriter.Error(); err != nil {
|
for _, item := range sortedItems {
|
||||||
return fmt.Errorf("csv writer error: %w", err)
|
row := []string{
|
||||||
}
|
item.LotName,
|
||||||
|
item.Description,
|
||||||
return nil
|
item.Category,
|
||||||
}
|
fmt.Sprintf("%d", item.Quantity),
|
||||||
|
fmt.Sprintf("%.2f", item.UnitPrice),
|
||||||
// ToCSVBytes is a backward-compatible wrapper that returns CSV data as bytes.
|
fmt.Sprintf("%.2f", item.TotalPrice),
|
||||||
func (s *ExportService) ToCSVBytes(data *ProjectExportData) ([]byte, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := s.ToCSV(&buf, data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ExportService) ProjectToPricingExportData(configs []models.Configuration, opts ProjectPricingExportOptions) (*ProjectPricingExportData, error) {
|
|
||||||
sortedConfigs := make([]models.Configuration, len(configs))
|
|
||||||
copy(sortedConfigs, configs)
|
|
||||||
sort.Slice(sortedConfigs, func(i, j int) bool {
|
|
||||||
leftLine := sortedConfigs[i].Line
|
|
||||||
rightLine := sortedConfigs[j].Line
|
|
||||||
|
|
||||||
if leftLine <= 0 {
|
|
||||||
leftLine = int(^uint(0) >> 1)
|
|
||||||
}
|
}
|
||||||
if rightLine <= 0 {
|
if err := w.Write(row); err != nil {
|
||||||
rightLine = int(^uint(0) >> 1)
|
|
||||||
}
|
|
||||||
if leftLine != rightLine {
|
|
||||||
return leftLine < rightLine
|
|
||||||
}
|
|
||||||
if !sortedConfigs[i].CreatedAt.Equal(sortedConfigs[j].CreatedAt) {
|
|
||||||
return sortedConfigs[i].CreatedAt.After(sortedConfigs[j].CreatedAt)
|
|
||||||
}
|
|
||||||
return sortedConfigs[i].UUID > sortedConfigs[j].UUID
|
|
||||||
})
|
|
||||||
|
|
||||||
blocks := make([]ProjectPricingExportConfig, 0, len(sortedConfigs))
|
|
||||||
for i := range sortedConfigs {
|
|
||||||
block, err := s.buildPricingExportBlock(&sortedConfigs[i], opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
blocks = append(blocks, block)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ProjectPricingExportData{
|
// Total row
|
||||||
Configs: blocks,
|
if err := w.Write([]string{"", "", "", "", "ИТОГО:", fmt.Sprintf("%.2f", data.Total)}); err != nil {
|
||||||
CreatedAt: time.Now(),
|
return nil, err
|
||||||
}, nil
|
}
|
||||||
|
|
||||||
|
w.Flush()
|
||||||
|
return buf.Bytes(), w.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ExportService) ToPricingCSV(w io.Writer, data *ProjectPricingExportData, opts ProjectPricingExportOptions) error {
|
func (s *ExportService) ConfigToExportData(config *models.Configuration, componentService *ComponentService) *ExportData {
|
||||||
if _, err := w.Write([]byte{0xEF, 0xBB, 0xBF}); err != nil {
|
items := make([]ExportItem, len(config.Items))
|
||||||
return fmt.Errorf("failed to write BOM: %w", err)
|
var total float64
|
||||||
}
|
|
||||||
|
|
||||||
csvWriter := csv.NewWriter(w)
|
for i, item := range config.Items {
|
||||||
csvWriter.Comma = ';'
|
|
||||||
defer csvWriter.Flush()
|
|
||||||
|
|
||||||
headers := pricingCSVHeaders(opts)
|
|
||||||
if err := csvWriter.Write(headers); err != nil {
|
|
||||||
return fmt.Errorf("failed to write pricing header: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
writeRows := opts.IncludeLOT || opts.IncludeBOM
|
|
||||||
for _, cfg := range data.Configs {
|
|
||||||
if err := csvWriter.Write(pricingConfigSummaryRow(cfg, opts)); err != nil {
|
|
||||||
return fmt.Errorf("failed to write config summary row: %w", err)
|
|
||||||
}
|
|
||||||
if writeRows {
|
|
||||||
for _, row := range cfg.Rows {
|
|
||||||
if err := csvWriter.Write(pricingCSVRow(row, opts)); err != nil {
|
|
||||||
return fmt.Errorf("failed to write pricing row: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
csvWriter.Flush()
|
|
||||||
if err := csvWriter.Error(); err != nil {
|
|
||||||
return fmt.Errorf("csv writer error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigToExportData converts a single configuration into ProjectExportData.
|
|
||||||
func (s *ExportService) ConfigToExportData(cfg *models.Configuration) *ProjectExportData {
|
|
||||||
block := s.buildExportBlock(cfg)
|
|
||||||
return &ProjectExportData{
|
|
||||||
Configs: []ConfigExportBlock{block},
|
|
||||||
CreatedAt: cfg.CreatedAt,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectToExportData converts multiple configurations into ProjectExportData.
|
|
||||||
func (s *ExportService) ProjectToExportData(configs []models.Configuration) *ProjectExportData {
|
|
||||||
sortedConfigs := make([]models.Configuration, len(configs))
|
|
||||||
copy(sortedConfigs, configs)
|
|
||||||
sort.Slice(sortedConfigs, func(i, j int) bool {
|
|
||||||
leftLine := sortedConfigs[i].Line
|
|
||||||
rightLine := sortedConfigs[j].Line
|
|
||||||
|
|
||||||
if leftLine <= 0 {
|
|
||||||
leftLine = int(^uint(0) >> 1)
|
|
||||||
}
|
|
||||||
if rightLine <= 0 {
|
|
||||||
rightLine = int(^uint(0) >> 1)
|
|
||||||
}
|
|
||||||
if leftLine != rightLine {
|
|
||||||
return leftLine < rightLine
|
|
||||||
}
|
|
||||||
if !sortedConfigs[i].CreatedAt.Equal(sortedConfigs[j].CreatedAt) {
|
|
||||||
return sortedConfigs[i].CreatedAt.After(sortedConfigs[j].CreatedAt)
|
|
||||||
}
|
|
||||||
return sortedConfigs[i].UUID > sortedConfigs[j].UUID
|
|
||||||
})
|
|
||||||
|
|
||||||
blocks := make([]ConfigExportBlock, 0, len(configs))
|
|
||||||
for i := range sortedConfigs {
|
|
||||||
blocks = append(blocks, s.buildExportBlock(&sortedConfigs[i]))
|
|
||||||
}
|
|
||||||
return &ProjectExportData{
|
|
||||||
Configs: blocks,
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ExportService) buildExportBlock(cfg *models.Configuration) ConfigExportBlock {
|
|
||||||
// Batch-fetch categories from local data (pricelist items → local_components fallback)
|
|
||||||
lotNames := make([]string, len(cfg.Items))
|
|
||||||
for i, item := range cfg.Items {
|
|
||||||
lotNames[i] = item.LotName
|
|
||||||
}
|
|
||||||
categories := s.resolveCategories(cfg.PricelistID, lotNames)
|
|
||||||
|
|
||||||
items := make([]ExportItem, len(cfg.Items))
|
|
||||||
var unitTotal float64
|
|
||||||
|
|
||||||
for i, item := range cfg.Items {
|
|
||||||
itemTotal := item.UnitPrice * float64(item.Quantity)
|
itemTotal := item.UnitPrice * float64(item.Quantity)
|
||||||
items[i] = ExportItem{
|
|
||||||
LotName: item.LotName,
|
|
||||||
Category: categories[item.LotName],
|
|
||||||
Quantity: item.Quantity,
|
|
||||||
UnitPrice: item.UnitPrice,
|
|
||||||
TotalPrice: itemTotal,
|
|
||||||
}
|
|
||||||
unitTotal += itemTotal
|
|
||||||
}
|
|
||||||
|
|
||||||
serverCount := cfg.ServerCount
|
// Получаем информацию о компоненте для заполнения категории
|
||||||
if serverCount < 1 {
|
componentView, err := componentService.GetByLotName(item.LotName)
|
||||||
serverCount = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return ConfigExportBlock{
|
|
||||||
Article: cfg.Article,
|
|
||||||
Line: cfg.Line,
|
|
||||||
ServerCount: serverCount,
|
|
||||||
UnitPrice: unitTotal,
|
|
||||||
Items: items,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ExportService) buildPricingExportBlock(cfg *models.Configuration, opts ProjectPricingExportOptions) (ProjectPricingExportConfig, error) {
|
|
||||||
block := ProjectPricingExportConfig{
|
|
||||||
Name: cfg.Name,
|
|
||||||
Article: cfg.Article,
|
|
||||||
Line: cfg.Line,
|
|
||||||
ServerCount: exportPositiveInt(cfg.ServerCount, 1),
|
|
||||||
Rows: make([]ProjectPricingExportRow, 0),
|
|
||||||
}
|
|
||||||
if s.localDB == nil {
|
|
||||||
for _, item := range cfg.Items {
|
|
||||||
block.Rows = append(block.Rows, ProjectPricingExportRow{
|
|
||||||
LotDisplay: item.LotName,
|
|
||||||
VendorPN: "—",
|
|
||||||
Quantity: item.Quantity,
|
|
||||||
Estimate: floatPtr(item.UnitPrice * float64(item.Quantity)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
localCfg, err := s.localDB.GetConfigurationByUUID(cfg.UUID)
|
|
||||||
if err != nil {
|
|
||||||
localCfg = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
priceMap := s.resolvePricingTotals(cfg, localCfg, opts)
|
|
||||||
componentDescriptions := s.resolveLotDescriptions(cfg, localCfg)
|
|
||||||
if opts.IncludeBOM && localCfg != nil && len(localCfg.VendorSpec) > 0 {
|
|
||||||
coveredLots := make(map[string]struct{})
|
|
||||||
for _, row := range localCfg.VendorSpec {
|
|
||||||
rowMappings := normalizeLotMappings(row.LotMappings)
|
|
||||||
for _, mapping := range rowMappings {
|
|
||||||
coveredLots[mapping.LotName] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
description := strings.TrimSpace(row.Description)
|
|
||||||
if description == "" && len(rowMappings) > 0 {
|
|
||||||
description = componentDescriptions[rowMappings[0].LotName]
|
|
||||||
}
|
|
||||||
|
|
||||||
pricingRow := ProjectPricingExportRow{
|
|
||||||
LotDisplay: formatLotDisplay(rowMappings),
|
|
||||||
VendorPN: row.VendorPartnumber,
|
|
||||||
Description: description,
|
|
||||||
Quantity: exportPositiveInt(row.Quantity, 1),
|
|
||||||
BOMTotal: vendorRowTotal(row),
|
|
||||||
Estimate: computeMappingTotal(priceMap, rowMappings, row.Quantity, func(p pricingLevels) *float64 { return p.Estimate }),
|
|
||||||
Stock: computeMappingTotal(priceMap, rowMappings, row.Quantity, func(p pricingLevels) *float64 { return p.Stock }),
|
|
||||||
Competitor: computeMappingTotal(priceMap, rowMappings, row.Quantity, func(p pricingLevels) *float64 { return p.Competitor }),
|
|
||||||
}
|
|
||||||
block.Rows = append(block.Rows, pricingRow)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range cfg.Items {
|
|
||||||
if item.LotName == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := coveredLots[item.LotName]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
estimate := estimateOnlyTotal(priceMap[item.LotName].Estimate, item.UnitPrice, item.Quantity)
|
|
||||||
block.Rows = append(block.Rows, ProjectPricingExportRow{
|
|
||||||
LotDisplay: item.LotName,
|
|
||||||
VendorPN: "—",
|
|
||||||
Description: componentDescriptions[item.LotName],
|
|
||||||
Quantity: exportPositiveInt(item.Quantity, 1),
|
|
||||||
Estimate: estimate,
|
|
||||||
Stock: totalForUnitPrice(priceMap[item.LotName].Stock, item.Quantity),
|
|
||||||
Competitor: totalForUnitPrice(priceMap[item.LotName].Competitor, item.Quantity),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if opts.isDDP() {
|
|
||||||
applyDDPMarkup(block.Rows, opts.saleMarkupFactor())
|
|
||||||
}
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, item := range cfg.Items {
|
|
||||||
if item.LotName == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
estimate := estimateOnlyTotal(priceMap[item.LotName].Estimate, item.UnitPrice, item.Quantity)
|
|
||||||
block.Rows = append(block.Rows, ProjectPricingExportRow{
|
|
||||||
LotDisplay: item.LotName,
|
|
||||||
VendorPN: "—",
|
|
||||||
Description: componentDescriptions[item.LotName],
|
|
||||||
Quantity: exportPositiveInt(item.Quantity, 1),
|
|
||||||
Estimate: estimate,
|
|
||||||
Stock: totalForUnitPrice(priceMap[item.LotName].Stock, item.Quantity),
|
|
||||||
Competitor: totalForUnitPrice(priceMap[item.LotName].Competitor, item.Quantity),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.isDDP() {
|
|
||||||
applyDDPMarkup(block.Rows, opts.saleMarkupFactor())
|
|
||||||
}
|
|
||||||
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyDDPMarkup(rows []ProjectPricingExportRow, factor float64) {
|
|
||||||
for i := range rows {
|
|
||||||
rows[i].Estimate = scaleFloatPtr(rows[i].Estimate, factor)
|
|
||||||
rows[i].Stock = scaleFloatPtr(rows[i].Stock, factor)
|
|
||||||
rows[i].Competitor = scaleFloatPtr(rows[i].Competitor, factor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func scaleFloatPtr(v *float64, factor float64) *float64 {
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
result := *v * factor
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolveCategories returns lot_name → category map.
|
|
||||||
// Primary source: pricelist items (lot_category). Fallback: local_components table.
|
|
||||||
func (s *ExportService) resolveCategories(pricelistID *uint, lotNames []string) map[string]string {
|
|
||||||
if len(lotNames) == 0 || s.localDB == nil {
|
|
||||||
return map[string]string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
categories := make(map[string]string, len(lotNames))
|
|
||||||
|
|
||||||
// Primary: pricelist items
|
|
||||||
if pricelistID != nil && *pricelistID > 0 {
|
|
||||||
if cats, err := s.localDB.GetLocalLotCategoriesByServerPricelistID(*pricelistID, lotNames); err == nil {
|
|
||||||
for lot, cat := range cats {
|
|
||||||
if strings.TrimSpace(cat) != "" {
|
|
||||||
categories[lot] = cat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: local_components for any still missing
|
|
||||||
var missing []string
|
|
||||||
for _, lot := range lotNames {
|
|
||||||
if categories[lot] == "" {
|
|
||||||
missing = append(missing, lot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(missing) > 0 {
|
|
||||||
if fallback, err := s.localDB.GetLocalComponentCategoriesByLotNames(missing); err == nil {
|
|
||||||
for lot, cat := range fallback {
|
|
||||||
if strings.TrimSpace(cat) != "" {
|
|
||||||
categories[lot] = cat
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return categories
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortItemsByCategory sorts items by category display order (items without category go to the end).
|
|
||||||
func sortItemsByCategory(items []ExportItem, categoryOrder map[string]int) {
|
|
||||||
for i := 0; i < len(items)-1; i++ {
|
|
||||||
for j := i + 1; j < len(items); j++ {
|
|
||||||
orderI, hasI := categoryOrder[items[i].Category]
|
|
||||||
orderJ, hasJ := categoryOrder[items[j].Category]
|
|
||||||
|
|
||||||
if !hasI && hasJ {
|
|
||||||
items[i], items[j] = items[j], items[i]
|
|
||||||
} else if hasI && hasJ && orderI > orderJ {
|
|
||||||
items[i], items[j] = items[j], items[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type pricingLevels struct {
|
|
||||||
Estimate *float64
|
|
||||||
Stock *float64
|
|
||||||
Competitor *float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ExportService) resolvePricingTotals(cfg *models.Configuration, localCfg *localdb.LocalConfiguration, opts ProjectPricingExportOptions) map[string]pricingLevels {
|
|
||||||
result := map[string]pricingLevels{}
|
|
||||||
lots := collectPricingLots(cfg, localCfg, opts.IncludeBOM)
|
|
||||||
if len(lots) == 0 || s.localDB == nil {
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
estimateID := cfg.PricelistID
|
|
||||||
if estimateID == nil || *estimateID == 0 {
|
|
||||||
if latest, err := s.localDB.GetLatestLocalPricelistBySource("estimate"); err == nil && latest != nil {
|
|
||||||
estimateID = &latest.ServerID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var warehouseID *uint
|
|
||||||
var competitorID *uint
|
|
||||||
if localCfg != nil {
|
|
||||||
warehouseID = localCfg.WarehousePricelistID
|
|
||||||
competitorID = localCfg.CompetitorPricelistID
|
|
||||||
}
|
|
||||||
if warehouseID == nil || *warehouseID == 0 {
|
|
||||||
if latest, err := s.localDB.GetLatestLocalPricelistBySource("warehouse"); err == nil && latest != nil {
|
|
||||||
warehouseID = &latest.ServerID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if competitorID == nil || *competitorID == 0 {
|
|
||||||
if latest, err := s.localDB.GetLatestLocalPricelistBySource("competitor"); err == nil && latest != nil {
|
|
||||||
competitorID = &latest.ServerID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, lot := range lots {
|
|
||||||
level := pricingLevels{}
|
|
||||||
level.Estimate = s.lookupPricePointer(estimateID, lot)
|
|
||||||
level.Stock = s.lookupPricePointer(warehouseID, lot)
|
|
||||||
level.Competitor = s.lookupPricePointer(competitorID, lot)
|
|
||||||
result[lot] = level
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ExportService) lookupPricePointer(serverPricelistID *uint, lotName string) *float64 {
|
|
||||||
if s.localDB == nil || serverPricelistID == nil || *serverPricelistID == 0 || strings.TrimSpace(lotName) == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
localPL, err := s.localDB.GetLocalPricelistByServerID(*serverPricelistID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
price, err := s.localDB.GetLocalPriceForLot(localPL.ID, lotName)
|
|
||||||
if err != nil || price <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return floatPtr(price)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ExportService) resolveLotDescriptions(cfg *models.Configuration, localCfg *localdb.LocalConfiguration) map[string]string {
|
|
||||||
lots := collectPricingLots(cfg, localCfg, true)
|
|
||||||
result := make(map[string]string, len(lots))
|
|
||||||
if s.localDB == nil {
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
for _, lot := range lots {
|
|
||||||
component, err := s.localDB.GetLocalComponent(lot)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
// Если не удалось получить информацию о компоненте, используем только основные данные
|
||||||
}
|
items[i] = ExportItem{
|
||||||
result[lot] = component.LotDescription
|
LotName: item.LotName,
|
||||||
}
|
Quantity: item.Quantity,
|
||||||
return result
|
UnitPrice: item.UnitPrice,
|
||||||
}
|
TotalPrice: itemTotal,
|
||||||
|
}
|
||||||
func collectPricingLots(cfg *models.Configuration, localCfg *localdb.LocalConfiguration, includeBOM bool) []string {
|
} else {
|
||||||
seen := map[string]struct{}{}
|
items[i] = ExportItem{
|
||||||
out := make([]string, 0)
|
LotName: item.LotName,
|
||||||
if includeBOM && localCfg != nil {
|
Description: componentView.Description,
|
||||||
for _, row := range localCfg.VendorSpec {
|
Category: componentView.Category,
|
||||||
for _, mapping := range normalizeLotMappings(row.LotMappings) {
|
Quantity: item.Quantity,
|
||||||
if _, ok := seen[mapping.LotName]; ok {
|
UnitPrice: item.UnitPrice,
|
||||||
continue
|
TotalPrice: itemTotal,
|
||||||
}
|
|
||||||
seen[mapping.LotName] = struct{}{}
|
|
||||||
out = append(out, mapping.LotName)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
total += itemTotal
|
||||||
for _, item := range cfg.Items {
|
|
||||||
lot := strings.TrimSpace(item.LotName)
|
|
||||||
if lot == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := seen[lot]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[lot] = struct{}{}
|
|
||||||
out = append(out, lot)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeLotMappings(mappings []localdb.VendorSpecLotMapping) []localdb.VendorSpecLotMapping {
|
|
||||||
if len(mappings) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
out := make([]localdb.VendorSpecLotMapping, 0, len(mappings))
|
|
||||||
for _, mapping := range mappings {
|
|
||||||
lot := strings.TrimSpace(mapping.LotName)
|
|
||||||
if lot == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
qty := mapping.QuantityPerPN
|
|
||||||
if qty < 1 {
|
|
||||||
qty = 1
|
|
||||||
}
|
|
||||||
out = append(out, localdb.VendorSpecLotMapping{
|
|
||||||
LotName: lot,
|
|
||||||
QuantityPerPN: qty,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func vendorRowTotal(row localdb.VendorSpecItem) *float64 {
|
|
||||||
if row.TotalPrice != nil {
|
|
||||||
return floatPtr(*row.TotalPrice)
|
|
||||||
}
|
|
||||||
if row.UnitPrice == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return floatPtr(*row.UnitPrice * float64(exportPositiveInt(row.Quantity, 1)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeMappingTotal(priceMap map[string]pricingLevels, mappings []localdb.VendorSpecLotMapping, pnQty int, selector func(pricingLevels) *float64) *float64 {
|
|
||||||
if len(mappings) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
total := 0.0
|
|
||||||
hasValue := false
|
|
||||||
qty := exportPositiveInt(pnQty, 1)
|
|
||||||
for _, mapping := range mappings {
|
|
||||||
price := selector(priceMap[mapping.LotName])
|
|
||||||
if price == nil || *price <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
total += *price * float64(qty*mapping.QuantityPerPN)
|
|
||||||
hasValue = true
|
|
||||||
}
|
|
||||||
if !hasValue {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return floatPtr(total)
|
|
||||||
}
|
|
||||||
|
|
||||||
func totalForUnitPrice(unitPrice *float64, quantity int) *float64 {
|
|
||||||
if unitPrice == nil || *unitPrice <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
total := *unitPrice * float64(exportPositiveInt(quantity, 1))
|
|
||||||
return &total
|
|
||||||
}
|
|
||||||
|
|
||||||
func estimateOnlyTotal(estimatePrice *float64, fallbackUnitPrice float64, quantity int) *float64 {
|
|
||||||
if estimatePrice != nil && *estimatePrice > 0 {
|
|
||||||
return totalForUnitPrice(estimatePrice, quantity)
|
|
||||||
}
|
|
||||||
if fallbackUnitPrice <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
total := fallbackUnitPrice * float64(maxInt(quantity, 1))
|
|
||||||
return &total
|
|
||||||
}
|
|
||||||
|
|
||||||
func pricingCSVHeaders(opts ProjectPricingExportOptions) []string {
|
|
||||||
headers := make([]string, 0, 8)
|
|
||||||
headers = append(headers, "Line Item")
|
|
||||||
if opts.IncludeLOT {
|
|
||||||
headers = append(headers, "LOT")
|
|
||||||
}
|
|
||||||
headers = append(headers, "PN вендора", "Описание", "Кол-во")
|
|
||||||
if opts.IncludeBOM {
|
|
||||||
headers = append(headers, "BOM")
|
|
||||||
}
|
|
||||||
if opts.IncludeEstimate {
|
|
||||||
headers = append(headers, "Estimate")
|
|
||||||
}
|
|
||||||
if opts.IncludeStock {
|
|
||||||
headers = append(headers, "Stock")
|
|
||||||
}
|
|
||||||
if opts.IncludeCompetitor {
|
|
||||||
headers = append(headers, "Конкуренты")
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
||||||
|
|
||||||
func pricingCSVRow(row ProjectPricingExportRow, opts ProjectPricingExportOptions) []string {
|
|
||||||
record := make([]string, 0, 8)
|
|
||||||
record = append(record, "")
|
|
||||||
if opts.IncludeLOT {
|
|
||||||
record = append(record, emptyDash(row.LotDisplay))
|
|
||||||
}
|
|
||||||
record = append(record,
|
|
||||||
emptyDash(row.VendorPN),
|
|
||||||
emptyDash(row.Description),
|
|
||||||
fmt.Sprintf("%d", exportPositiveInt(row.Quantity, 1)),
|
|
||||||
)
|
|
||||||
if opts.IncludeBOM {
|
|
||||||
record = append(record, formatMoneyValue(row.BOMTotal))
|
|
||||||
}
|
|
||||||
if opts.IncludeEstimate {
|
|
||||||
record = append(record, formatMoneyValue(row.Estimate))
|
|
||||||
}
|
|
||||||
if opts.IncludeStock {
|
|
||||||
record = append(record, formatMoneyValue(row.Stock))
|
|
||||||
}
|
|
||||||
if opts.IncludeCompetitor {
|
|
||||||
record = append(record, formatMoneyValue(row.Competitor))
|
|
||||||
}
|
|
||||||
return record
|
|
||||||
}
|
|
||||||
|
|
||||||
func pricingConfigSummaryRow(cfg ProjectPricingExportConfig, opts ProjectPricingExportOptions) []string {
|
|
||||||
record := make([]string, 0, 8)
|
|
||||||
record = append(record, fmt.Sprintf("%d", cfg.Line))
|
|
||||||
if opts.IncludeLOT {
|
|
||||||
record = append(record, "")
|
|
||||||
}
|
|
||||||
record = append(record,
|
|
||||||
emptyDash(cfg.Article),
|
|
||||||
emptyDash(cfg.Name),
|
|
||||||
fmt.Sprintf("%d", exportPositiveInt(cfg.ServerCount, 1)),
|
|
||||||
)
|
|
||||||
if opts.IncludeBOM {
|
|
||||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.BOMTotal })))
|
|
||||||
}
|
|
||||||
if opts.IncludeEstimate {
|
|
||||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.Estimate })))
|
|
||||||
}
|
|
||||||
if opts.IncludeStock {
|
|
||||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.Stock })))
|
|
||||||
}
|
|
||||||
if opts.IncludeCompetitor {
|
|
||||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.Competitor })))
|
|
||||||
}
|
|
||||||
return record
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatLotDisplay(mappings []localdb.VendorSpecLotMapping) string {
|
|
||||||
switch len(mappings) {
|
|
||||||
case 0:
|
|
||||||
return "н/д"
|
|
||||||
case 1:
|
|
||||||
return mappings[0].LotName
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%s +%d", mappings[0].LotName, len(mappings)-1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatMoneyValue(value *float64) string {
|
|
||||||
if value == nil {
|
|
||||||
return "—"
|
|
||||||
}
|
|
||||||
n := math.Round(*value*100) / 100
|
|
||||||
sign := ""
|
|
||||||
if n < 0 {
|
|
||||||
sign = "-"
|
|
||||||
n = -n
|
|
||||||
}
|
|
||||||
whole := int64(n)
|
|
||||||
fraction := int(math.Round((n - float64(whole)) * 100))
|
|
||||||
if fraction == 100 {
|
|
||||||
whole++
|
|
||||||
fraction = 0
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s%s,%02d", sign, formatIntWithSpace(whole), fraction)
|
|
||||||
}
|
|
||||||
|
|
||||||
func emptyDash(value string) string {
|
|
||||||
if strings.TrimSpace(value) == "" {
|
|
||||||
return "—"
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
func sumPricingColumn(rows []ProjectPricingExportRow, selector func(ProjectPricingExportRow) *float64) *float64 {
|
|
||||||
total := 0.0
|
|
||||||
hasValue := false
|
|
||||||
for _, row := range rows {
|
|
||||||
value := selector(row)
|
|
||||||
if value == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
total += *value
|
|
||||||
hasValue = true
|
|
||||||
}
|
|
||||||
if !hasValue {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return floatPtr(total)
|
|
||||||
}
|
|
||||||
|
|
||||||
func floatPtr(value float64) *float64 {
|
|
||||||
v := value
|
|
||||||
return &v
|
|
||||||
}
|
|
||||||
|
|
||||||
func exportPositiveInt(value, fallback int) int {
|
|
||||||
if value < 1 {
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPriceComma formats a price with comma as decimal separator (e.g., "2074,5").
|
|
||||||
// Trailing zeros after the comma are trimmed, and if the value is an integer, no comma is shown.
|
|
||||||
func formatPriceComma(value float64) string {
|
|
||||||
if value == math.Trunc(value) {
|
|
||||||
return fmt.Sprintf("%.0f", value)
|
|
||||||
}
|
|
||||||
s := fmt.Sprintf("%.2f", value)
|
|
||||||
s = strings.ReplaceAll(s, ".", ",")
|
|
||||||
// Trim trailing zero: "2074,50" -> "2074,5"
|
|
||||||
s = strings.TrimRight(s, "0")
|
|
||||||
s = strings.TrimRight(s, ",")
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPriceInt formats price as integer (rounded), no decimal.
|
|
||||||
func formatPriceInt(value float64) string {
|
|
||||||
return fmt.Sprintf("%.0f", math.Round(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPriceWithSpace formats a price as an integer with space as thousands separator (e.g., "104 700").
|
|
||||||
func formatPriceWithSpace(value float64) string {
|
|
||||||
intVal := int64(math.Round(value))
|
|
||||||
if intVal < 0 {
|
|
||||||
return "-" + formatIntWithSpace(-intVal)
|
|
||||||
}
|
|
||||||
return formatIntWithSpace(intVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatIntWithSpace(n int64) string {
|
|
||||||
s := fmt.Sprintf("%d", n)
|
|
||||||
if len(s) <= 3 {
|
|
||||||
return s
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var result strings.Builder
|
return &ExportData{
|
||||||
remainder := len(s) % 3
|
Name: config.Name,
|
||||||
if remainder > 0 {
|
Items: items,
|
||||||
result.WriteString(s[:remainder])
|
Total: total,
|
||||||
|
Notes: config.Notes,
|
||||||
|
CreatedAt: config.CreatedAt,
|
||||||
}
|
}
|
||||||
for i := remainder; i < len(s); i += 3 {
|
|
||||||
if result.Len() > 0 {
|
|
||||||
result.WriteByte(' ')
|
|
||||||
}
|
|
||||||
result.WriteString(s[i : i+3])
|
|
||||||
}
|
|
||||||
return result.String()
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,563 +0,0 @@
|
|||||||
package services
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/csv"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
|
||||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newTestProjectData(items []ExportItem, article string, serverCount int) *ProjectExportData {
|
|
||||||
var unitTotal float64
|
|
||||||
for _, item := range items {
|
|
||||||
unitTotal += item.UnitPrice * float64(item.Quantity)
|
|
||||||
}
|
|
||||||
if serverCount < 1 {
|
|
||||||
serverCount = 1
|
|
||||||
}
|
|
||||||
return &ProjectExportData{
|
|
||||||
Configs: []ConfigExportBlock{
|
|
||||||
{
|
|
||||||
Article: article,
|
|
||||||
ServerCount: serverCount,
|
|
||||||
UnitPrice: unitTotal,
|
|
||||||
Items: items,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_UTF8BOM(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := newTestProjectData([]ExportItem{
|
|
||||||
{
|
|
||||||
LotName: "LOT-001",
|
|
||||||
Category: "CAT",
|
|
||||||
Quantity: 1,
|
|
||||||
UnitPrice: 100.0,
|
|
||||||
TotalPrice: 100.0,
|
|
||||||
},
|
|
||||||
}, "TEST-ARTICLE", 1)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToCSV(&buf, data); err != nil {
|
|
||||||
t.Fatalf("ToCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvBytes := buf.Bytes()
|
|
||||||
if len(csvBytes) < 3 {
|
|
||||||
t.Fatalf("CSV too short to contain BOM")
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
actualBOM := csvBytes[:3]
|
|
||||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
|
||||||
t.Errorf("UTF-8 BOM mismatch. Expected %v, got %v", expectedBOM, actualBOM)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_SemicolonDelimiter(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := newTestProjectData([]ExportItem{
|
|
||||||
{
|
|
||||||
LotName: "LOT-001",
|
|
||||||
Category: "CAT",
|
|
||||||
Quantity: 2,
|
|
||||||
UnitPrice: 100.50,
|
|
||||||
TotalPrice: 201.00,
|
|
||||||
},
|
|
||||||
}, "TEST-ARTICLE", 1)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToCSV(&buf, data); err != nil {
|
|
||||||
t.Fatalf("ToCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvBytes := buf.Bytes()
|
|
||||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
|
|
||||||
// Read header
|
|
||||||
header, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to read header: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(header) != 8 {
|
|
||||||
t.Errorf("Expected 8 columns, got %d", len(header))
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedHeader := []string{"Line", "Type", "p/n", "Description", "Qty (1 pcs.)", "Qty (total)", "Price (1 pcs.)", "Price (total)"}
|
|
||||||
for i, col := range expectedHeader {
|
|
||||||
if i < len(header) && header[i] != col {
|
|
||||||
t.Errorf("Column %d: expected %q, got %q", i, col, header[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read server row
|
|
||||||
serverRow, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to read server row: %v", err)
|
|
||||||
}
|
|
||||||
if serverRow[0] != "10" {
|
|
||||||
t.Errorf("Expected line number 10, got %s", serverRow[0])
|
|
||||||
}
|
|
||||||
if serverRow[2] != "TEST-ARTICLE" {
|
|
||||||
t.Errorf("Expected article TEST-ARTICLE, got %s", serverRow[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read component row
|
|
||||||
itemRow, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to read item row: %v", err)
|
|
||||||
}
|
|
||||||
if itemRow[2] != "LOT-001" {
|
|
||||||
t.Errorf("Lot name mismatch: expected LOT-001, got %s", itemRow[2])
|
|
||||||
}
|
|
||||||
if itemRow[4] != "2" {
|
|
||||||
t.Errorf("Quantity mismatch: expected 2, got %s", itemRow[4])
|
|
||||||
}
|
|
||||||
if itemRow[6] != "100,5" {
|
|
||||||
t.Errorf("Unit price mismatch: expected 100,5, got %s", itemRow[6])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_ServerRow(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := newTestProjectData([]ExportItem{
|
|
||||||
{LotName: "LOT-001", Category: "CAT", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
|
||||||
{LotName: "LOT-002", Category: "CAT", Quantity: 2, UnitPrice: 50.0, TotalPrice: 100.0},
|
|
||||||
}, "DL380-ART", 10)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToCSV(&buf, data); err != nil {
|
|
||||||
t.Fatalf("ToCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvBytes := buf.Bytes()
|
|
||||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
|
|
||||||
// Skip header
|
|
||||||
reader.Read()
|
|
||||||
|
|
||||||
// Read server row
|
|
||||||
serverRow, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to read server row: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if serverRow[0] != "10" {
|
|
||||||
t.Errorf("Expected line 10, got %s", serverRow[0])
|
|
||||||
}
|
|
||||||
if serverRow[2] != "DL380-ART" {
|
|
||||||
t.Errorf("Expected article DL380-ART, got %s", serverRow[2])
|
|
||||||
}
|
|
||||||
if serverRow[5] != "10" {
|
|
||||||
t.Errorf("Expected server count 10, got %s", serverRow[5])
|
|
||||||
}
|
|
||||||
// UnitPrice = 100 + 100 = 200
|
|
||||||
if serverRow[6] != "200" {
|
|
||||||
t.Errorf("Expected unit price 200, got %s", serverRow[6])
|
|
||||||
}
|
|
||||||
// TotalPrice = 200 * 10 = 2000
|
|
||||||
if serverRow[7] != "2 000" {
|
|
||||||
t.Errorf("Expected total price '2 000', got %q", serverRow[7])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_CategorySorting(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := newTestProjectData([]ExportItem{
|
|
||||||
{LotName: "LOT-001", Category: "CAT-A", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
|
||||||
{LotName: "LOT-002", Category: "CAT-C", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
|
||||||
{LotName: "LOT-003", Category: "CAT-B", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
|
||||||
}, "ART", 1)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToCSV(&buf, data); err != nil {
|
|
||||||
t.Fatalf("ToCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvBytes := buf.Bytes()
|
|
||||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
|
|
||||||
// Skip header and server row
|
|
||||||
reader.Read()
|
|
||||||
reader.Read()
|
|
||||||
|
|
||||||
// Without category repo, items maintain original order
|
|
||||||
row1, _ := reader.Read()
|
|
||||||
if row1[2] != "LOT-001" {
|
|
||||||
t.Errorf("Expected LOT-001 first, got %s", row1[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
row2, _ := reader.Read()
|
|
||||||
if row2[2] != "LOT-002" {
|
|
||||||
t.Errorf("Expected LOT-002 second, got %s", row2[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
row3, _ := reader.Read()
|
|
||||||
if row3[2] != "LOT-003" {
|
|
||||||
t.Errorf("Expected LOT-003 third, got %s", row3[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_EmptyData(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := &ProjectExportData{
|
|
||||||
Configs: []ConfigExportBlock{},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToCSV(&buf, data); err != nil {
|
|
||||||
t.Fatalf("ToCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvBytes := buf.Bytes()
|
|
||||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
|
|
||||||
header, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to read header: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(header) != 8 {
|
|
||||||
t.Errorf("Expected 8 columns, got %d", len(header))
|
|
||||||
}
|
|
||||||
|
|
||||||
// No more rows expected
|
|
||||||
_, err = reader.Read()
|
|
||||||
if err != io.EOF {
|
|
||||||
t.Errorf("Expected EOF after header, got: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSVBytes_BackwardCompat(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := newTestProjectData([]ExportItem{
|
|
||||||
{LotName: "LOT-001", Category: "CAT", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
|
||||||
}, "ART", 1)
|
|
||||||
|
|
||||||
csvBytes, err := svc.ToCSVBytes(data)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ToCSVBytes failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(csvBytes) < 3 {
|
|
||||||
t.Fatalf("CSV bytes too short")
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
actualBOM := csvBytes[:3]
|
|
||||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
|
||||||
t.Errorf("UTF-8 BOM mismatch in ToCSVBytes")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_WriterError(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := newTestProjectData([]ExportItem{
|
|
||||||
{LotName: "LOT-001", Category: "CAT", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
|
||||||
}, "ART", 1)
|
|
||||||
|
|
||||||
failingWriter := &failingWriter{}
|
|
||||||
|
|
||||||
if err := svc.ToCSV(failingWriter, data); err == nil {
|
|
||||||
t.Errorf("Expected error from failing writer, got nil")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToCSV_MultipleBlocks(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
data := &ProjectExportData{
|
|
||||||
Configs: []ConfigExportBlock{
|
|
||||||
{
|
|
||||||
Article: "ART-1",
|
|
||||||
ServerCount: 2,
|
|
||||||
UnitPrice: 500.0,
|
|
||||||
Items: []ExportItem{
|
|
||||||
{LotName: "LOT-A", Category: "CPU", Quantity: 1, UnitPrice: 500.0, TotalPrice: 500.0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Article: "ART-2",
|
|
||||||
ServerCount: 3,
|
|
||||||
UnitPrice: 1000.0,
|
|
||||||
Items: []ExportItem{
|
|
||||||
{LotName: "LOT-B", Category: "MEM", Quantity: 2, UnitPrice: 500.0, TotalPrice: 1000.0},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToCSV(&buf, data); err != nil {
|
|
||||||
t.Fatalf("ToCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
csvBytes := buf.Bytes()
|
|
||||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
reader.FieldsPerRecord = -1 // allow variable fields
|
|
||||||
|
|
||||||
// Header
|
|
||||||
reader.Read()
|
|
||||||
|
|
||||||
// Block 1: server row
|
|
||||||
srv1, _ := reader.Read()
|
|
||||||
if srv1[0] != "10" {
|
|
||||||
t.Errorf("Block 1 line: expected 10, got %s", srv1[0])
|
|
||||||
}
|
|
||||||
if srv1[7] != "1 000" {
|
|
||||||
t.Errorf("Block 1 total: expected '1 000', got %q", srv1[7])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block 1: component row
|
|
||||||
comp1, _ := reader.Read()
|
|
||||||
if comp1[2] != "LOT-A" {
|
|
||||||
t.Errorf("Block 1 component: expected LOT-A, got %s", comp1[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Separator row
|
|
||||||
sep, _ := reader.Read()
|
|
||||||
allEmpty := true
|
|
||||||
for _, v := range sep {
|
|
||||||
if v != "" {
|
|
||||||
allEmpty = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !allEmpty {
|
|
||||||
t.Errorf("Expected empty separator row, got %v", sep)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block 2: server row
|
|
||||||
srv2, _ := reader.Read()
|
|
||||||
if srv2[0] != "20" {
|
|
||||||
t.Errorf("Block 2 line: expected 20, got %s", srv2[0])
|
|
||||||
}
|
|
||||||
if srv2[7] != "3 000" {
|
|
||||||
t.Errorf("Block 2 total: expected '3 000', got %q", srv2[7])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProjectToExportData_SortsByLine(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
|
|
||||||
configs := []models.Configuration{
|
|
||||||
{
|
|
||||||
UUID: "cfg-1",
|
|
||||||
Line: 30,
|
|
||||||
Article: "ART-30",
|
|
||||||
ServerCount: 1,
|
|
||||||
Items: models.ConfigItems{{LotName: "LOT-30", Quantity: 1, UnitPrice: 300}},
|
|
||||||
CreatedAt: time.Now().Add(-1 * time.Hour),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
UUID: "cfg-2",
|
|
||||||
Line: 10,
|
|
||||||
Article: "ART-10",
|
|
||||||
ServerCount: 1,
|
|
||||||
Items: models.ConfigItems{{LotName: "LOT-10", Quantity: 1, UnitPrice: 100}},
|
|
||||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
UUID: "cfg-3",
|
|
||||||
Line: 20,
|
|
||||||
Article: "ART-20",
|
|
||||||
ServerCount: 1,
|
|
||||||
Items: models.ConfigItems{{LotName: "LOT-20", Quantity: 1, UnitPrice: 200}},
|
|
||||||
CreatedAt: time.Now().Add(-3 * time.Hour),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
data := svc.ProjectToExportData(configs)
|
|
||||||
if len(data.Configs) != 3 {
|
|
||||||
t.Fatalf("expected 3 blocks, got %d", len(data.Configs))
|
|
||||||
}
|
|
||||||
if data.Configs[0].Article != "ART-10" || data.Configs[0].Line != 10 {
|
|
||||||
t.Fatalf("first block must be line 10, got article=%s line=%d", data.Configs[0].Article, data.Configs[0].Line)
|
|
||||||
}
|
|
||||||
if data.Configs[1].Article != "ART-20" || data.Configs[1].Line != 20 {
|
|
||||||
t.Fatalf("second block must be line 20, got article=%s line=%d", data.Configs[1].Article, data.Configs[1].Line)
|
|
||||||
}
|
|
||||||
if data.Configs[2].Article != "ART-30" || data.Configs[2].Line != 30 {
|
|
||||||
t.Fatalf("third block must be line 30, got article=%s line=%d", data.Configs[2].Article, data.Configs[2].Line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatPriceWithSpace(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input float64
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{0, "0"},
|
|
||||||
{100, "100"},
|
|
||||||
{1000, "1 000"},
|
|
||||||
{10470, "10 470"},
|
|
||||||
{104700, "104 700"},
|
|
||||||
{1000000, "1 000 000"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
result := formatPriceWithSpace(tt.input)
|
|
||||||
if result != tt.expected {
|
|
||||||
t.Errorf("formatPriceWithSpace(%v): expected %q, got %q", tt.input, tt.expected, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFormatPriceComma(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
input float64
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{100.0, "100"},
|
|
||||||
{2074.5, "2074,5"},
|
|
||||||
{100.50, "100,5"},
|
|
||||||
{99.99, "99,99"},
|
|
||||||
{0, "0"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
result := formatPriceComma(tt.input)
|
|
||||||
if result != tt.expected {
|
|
||||||
t.Errorf("formatPriceComma(%v): expected %q, got %q", tt.input, tt.expected, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestToPricingCSV_UsesSelectedColumns(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
data := &ProjectPricingExportData{
|
|
||||||
Configs: []ProjectPricingExportConfig{
|
|
||||||
{
|
|
||||||
Name: "Config A",
|
|
||||||
Article: "ART-1",
|
|
||||||
Line: 10,
|
|
||||||
ServerCount: 2,
|
|
||||||
Rows: []ProjectPricingExportRow{
|
|
||||||
{
|
|
||||||
LotDisplay: "LOT_A +1",
|
|
||||||
VendorPN: "PN-001",
|
|
||||||
Description: "Bundle row",
|
|
||||||
Quantity: 2,
|
|
||||||
BOMTotal: floatPtr(2400.5),
|
|
||||||
Estimate: floatPtr(2000),
|
|
||||||
Stock: floatPtr(1800.25),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
}
|
|
||||||
opts := ProjectPricingExportOptions{
|
|
||||||
IncludeLOT: true,
|
|
||||||
IncludeBOM: true,
|
|
||||||
IncludeEstimate: true,
|
|
||||||
IncludeStock: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := svc.ToPricingCSV(&buf, data, opts); err != nil {
|
|
||||||
t.Fatalf("ToPricingCSV failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
reader := csv.NewReader(bytes.NewReader(buf.Bytes()[3:]))
|
|
||||||
reader.Comma = ';'
|
|
||||||
reader.FieldsPerRecord = -1
|
|
||||||
|
|
||||||
header, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("read header row: %v", err)
|
|
||||||
}
|
|
||||||
expectedHeader := []string{"Line Item", "LOT", "PN вендора", "Описание", "Кол-во", "BOM", "Estimate", "Stock"}
|
|
||||||
for i, want := range expectedHeader {
|
|
||||||
if header[i] != want {
|
|
||||||
t.Fatalf("header[%d]: expected %q, got %q", i, want, header[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
summary, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("read summary row: %v", err)
|
|
||||||
}
|
|
||||||
expectedSummary := []string{"10", "", "", "Config A", "2", "2 400,50", "2 000,00", "1 800,25"}
|
|
||||||
for i, want := range expectedSummary {
|
|
||||||
if summary[i] != want {
|
|
||||||
t.Fatalf("summary[%d]: expected %q, got %q", i, want, summary[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
row, err := reader.Read()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("read data row: %v", err)
|
|
||||||
}
|
|
||||||
expectedRow := []string{"", "LOT_A +1", "PN-001", "Bundle row", "2", "2 400,50", "2 000,00", "1 800,25"}
|
|
||||||
for i, want := range expectedRow {
|
|
||||||
if row[i] != want {
|
|
||||||
t.Fatalf("row[%d]: expected %q, got %q", i, want, row[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestProjectToPricingExportData_UsesCartRowsWithoutBOM(t *testing.T) {
|
|
||||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
|
||||||
configs := []models.Configuration{
|
|
||||||
{
|
|
||||||
UUID: "cfg-1",
|
|
||||||
Name: "Config A",
|
|
||||||
Article: "ART-1",
|
|
||||||
ServerCount: 1,
|
|
||||||
Items: models.ConfigItems{
|
|
||||||
{LotName: "LOT_A", Quantity: 2, UnitPrice: 300},
|
|
||||||
},
|
|
||||||
CreatedAt: time.Now(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := svc.ProjectToPricingExportData(configs, ProjectPricingExportOptions{
|
|
||||||
IncludeLOT: true,
|
|
||||||
IncludeEstimate: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("ProjectToPricingExportData failed: %v", err)
|
|
||||||
}
|
|
||||||
if len(data.Configs) != 1 || len(data.Configs[0].Rows) != 1 {
|
|
||||||
t.Fatalf("unexpected rows count: %+v", data.Configs)
|
|
||||||
}
|
|
||||||
row := data.Configs[0].Rows[0]
|
|
||||||
if row.LotDisplay != "LOT_A" {
|
|
||||||
t.Fatalf("expected LOT_A, got %q", row.LotDisplay)
|
|
||||||
}
|
|
||||||
if row.VendorPN != "—" {
|
|
||||||
t.Fatalf("expected vendor dash, got %q", row.VendorPN)
|
|
||||||
}
|
|
||||||
if row.Estimate == nil || *row.Estimate != 600 {
|
|
||||||
t.Fatalf("expected estimate total 600, got %+v", row.Estimate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// failingWriter always returns an error
|
|
||||||
type failingWriter struct{}
|
|
||||||
|
|
||||||
func (fw *failingWriter) Write(p []byte) (int, error) {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user