Compare commits
10 Commits
v1.5.9
...
e9307c4bad
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9307c4bad | ||
|
|
1b48401828 | ||
|
|
4a86f7b7ba | ||
|
|
955467fbea | ||
|
|
9ddffe48e9 | ||
|
|
4732605925 | ||
|
|
d318a7f462 | ||
|
|
1bec110d91 | ||
|
|
6392e4b4a9 | ||
|
|
8f7defdb8a |
@@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
repo_root="$(git rev-parse --show-toplevel)"
|
||||
"$repo_root/scripts/check-secrets.sh"
|
||||
41
.gitignore
vendored
41
.gitignore
vendored
@@ -1,16 +1,5 @@
|
||||
# QuoteForge
|
||||
config.yaml
|
||||
.env
|
||||
.env.*
|
||||
*.pem
|
||||
*.key
|
||||
*.p12
|
||||
*.pfx
|
||||
*.crt
|
||||
id_rsa
|
||||
id_rsa.*
|
||||
secrets.yaml
|
||||
secrets.yml
|
||||
|
||||
# Local SQLite database (contains encrypted credentials)
|
||||
/data/*.db
|
||||
@@ -23,30 +12,10 @@ secrets.yml
|
||||
/importer
|
||||
/cron
|
||||
/bin/
|
||||
qfs
|
||||
|
||||
# Local Go build cache used in sandboxed runs
|
||||
.gocache/
|
||||
|
||||
# Local tooling state
|
||||
.claude/
|
||||
|
||||
# Editor settings
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# Temp and logs
|
||||
*.tmp
|
||||
*.temp
|
||||
*.log
|
||||
|
||||
# Go test/build artifacts
|
||||
*.out
|
||||
*.test
|
||||
coverage/
|
||||
|
||||
# ---> macOS
|
||||
# General
|
||||
.DS_Store
|
||||
@@ -75,12 +44,4 @@ Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
# Release artifacts (binaries, archives, checksums), but keep markdown notes tracked
|
||||
releases/*
|
||||
!releases/README.md
|
||||
!releases/memory/
|
||||
!releases/memory/**
|
||||
!releases/**/
|
||||
releases/**/*
|
||||
!releases/README.md
|
||||
!releases/*/RELEASE_NOTES.md
|
||||
releases/
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +0,0 @@
|
||||
[submodule "bible"]
|
||||
path = bible
|
||||
url = https://git.mchus.pro/mchus/bible.git
|
||||
11
AGENTS.md
11
AGENTS.md
@@ -1,11 +0,0 @@
|
||||
# QuoteForge — Instructions for Codex
|
||||
|
||||
## Shared Engineering Rules
|
||||
Read `bible/` — shared rules for all projects (CSV, logging, DB, tables, background tasks, code style).
|
||||
Start with `bible/rules/patterns/` for specific contracts.
|
||||
|
||||
## Project Architecture
|
||||
Read `bible-local/` — QuoteForge specific architecture.
|
||||
Read order: `bible-local/README.md` → relevant files for the task.
|
||||
|
||||
Every architectural decision specific to this project must be recorded in `bible-local/`.
|
||||
168
CLAUDE.md
168
CLAUDE.md
@@ -1,17 +1,163 @@
|
||||
# QuoteForge — Instructions for Claude
|
||||
# QuoteForge - Claude Code Instructions
|
||||
|
||||
## Shared Engineering Rules
|
||||
Read `bible/` — shared rules for all projects (CSV, logging, DB, tables, background tasks, code style).
|
||||
Start with `bible/rules/patterns/` for specific contracts.
|
||||
## Overview
|
||||
Корпоративный конфигуратор серверов и формирование КП. MariaDB (RFQ_LOG) + SQLite для оффлайн.
|
||||
|
||||
## Project Architecture
|
||||
Read `bible-local/` — QuoteForge specific architecture.
|
||||
Read order: `bible-local/README.md` → relevant files for the task.
|
||||
## Development Phases
|
||||
|
||||
Every architectural decision specific to this project must be recorded in `bible-local/`.
|
||||
### Phase 1: Pricelists in MariaDB ✅ DONE
|
||||
### Phase 2: Local SQLite Database ✅ DONE
|
||||
|
||||
### Phase 2.5: Full Offline Mode 🔶 IN PROGRESS
|
||||
**Local-first architecture:** приложение ВСЕГДА работает с SQLite, MariaDB только для синхронизации.
|
||||
|
||||
**Принцип работы:**
|
||||
- ВСЕ операции (CRUD) выполняются в SQLite
|
||||
- При создании конфигурации:
|
||||
1. Если online → проверить новые прайслисты на сервере → скачать если есть
|
||||
2. Далее работаем с local_pricelists (и online, и offline одинаково)
|
||||
- Background sync: push pending_changes → pull updates
|
||||
|
||||
**DONE:**
|
||||
- ✅ Sync queue table (pending_changes) - `internal/localdb/models.go`
|
||||
- ✅ Model converters: MariaDB ↔ SQLite - `internal/localdb/converters.go`
|
||||
- ✅ LocalConfigurationService: все CRUD через SQLite - `internal/services/local_configuration.go`
|
||||
- ✅ Pre-create pricelist check: `SyncPricelistsIfNeeded()` - `internal/services/sync/service.go`
|
||||
- ✅ Push pending changes: `PushPendingChanges()` - sync service + handlers
|
||||
- ✅ Sync API endpoints: `/api/sync/push`, `/pending/count`, `/pending`
|
||||
- ✅ Integrate LocalConfigurationService in main.go (replace ConfigurationService)
|
||||
- ✅ Add routes for new sync endpoints (`/api/sync/push`, `/pending/count`, `/pending`)
|
||||
- ✅ ConfigurationGetter interface for handler compatibility
|
||||
- ✅ Background sync worker: auto-sync every 5min (push + pull) - `internal/services/sync/worker.go`
|
||||
- ✅ UI: sync status indicator (pending badge + sync button + offline/online dot) - `web/templates/partials/sync_status.html`
|
||||
- ✅ RefreshPrices for local mode:
|
||||
- `RefreshPrices()` / `RefreshPricesNoAuth()` в `local_configuration.go`
|
||||
- Берёт цены из `local_components.current_price`
|
||||
- Graceful degradation при отсутствии компонента
|
||||
- Добавлено поле `price_updated_at` в `LocalConfiguration` (models.go:72)
|
||||
- Обновлены converters для PriceUpdatedAt
|
||||
- UI кнопка "Пересчитать цену" работает offline/online
|
||||
- ✅ Fixed sync bugs:
|
||||
- Duplicate entry error при update конфигураций (`sync/service.go:334-365`)
|
||||
- pushConfigurationUpdate теперь проверяет наличие server_id перед update
|
||||
- Если нет ID → получает из LocalConfiguration.ServerID или ищет на сервере
|
||||
- Fixed setup.go: `settings.Password` → `settings.PasswordEncrypted`
|
||||
|
||||
**TODO:**
|
||||
- ❌ Conflict resolution (Phase 4, last-write-wins default)
|
||||
|
||||
### UI Improvements ✅ MOSTLY DONE
|
||||
|
||||
**1. Sync UI + pricelist badge: ✅ DONE**
|
||||
- ✅ `sync_status.html`: SVG иконки Online/Offline (кликабельные → открывают модал)
|
||||
- ✅ Кнопка sync → иконка circular arrows (только full sync)
|
||||
- ✅ Модальное окно "Статус системы" в `base.html` (info о БД, ошибки синхронизации)
|
||||
- ✅ `configs.html`: badge с версией активного прайслиста
|
||||
- ✅ Загрузка через `/api/pricelists/latest` при DOMContentLoaded
|
||||
- ✅ Удалён dropdown с Push changes (упрощение UI)
|
||||
|
||||
**2. Прайслисты → вкладка в "Администратор цен": ✅ DONE**
|
||||
- ✅ `base.html`: убрана ссылка "Прайслисты" из навигации
|
||||
- ✅ `admin_pricing.html`: добавлена вкладка "Прайслисты"
|
||||
- ✅ Логика перенесена из `pricelists.html` (table, create modal, CRUD)
|
||||
- ✅ Route `/pricelists` → редирект на `/admin/pricing?tab=pricelists`
|
||||
- ✅ Поддержка URL param `?tab=pricelists`
|
||||
|
||||
**3. Модал "Настройка цены" - кол-во котировок с учётом периода: ❌ TODO**
|
||||
- Текущее: показывает только общее кол-во котировок
|
||||
- Новое: показывать `N (всего: M)` где N - за выбранный период, M - всего
|
||||
- ❌ `admin_pricing.html`: обновить `#modal-quote-count`
|
||||
- ❌ `admin_pricing_handler.go`: в `/api/admin/pricing/preview` возвращать `quote_count_period` + `quote_count_total`
|
||||
|
||||
**4. Страница настроек: ❌ ОТЛОЖЕНО**
|
||||
- Перенесено в Phase 3 (после основных UI улучшений)
|
||||
|
||||
### Phase 3: Projects and Specifications
|
||||
- qt_projects, qt_specifications tables (MariaDB)
|
||||
- Replace qt_configurations → Project/Specification hierarchy
|
||||
- Fields: opty, customer_requirement, variant, qty, rev
|
||||
- Local projects/specs with server sync
|
||||
|
||||
### Phase 4: Price Versioning
|
||||
- Bind specifications to pricelist versions
|
||||
- Price diff comparison
|
||||
- Auto-cleanup expired pricelists (>1 year, usage_count=0)
|
||||
|
||||
## Tech Stack
|
||||
Go 1.22+ | Gin | GORM | MariaDB 11 | SQLite (glebarez/sqlite) | htmx + Tailwind CDN | excelize
|
||||
|
||||
## Key Tables
|
||||
|
||||
### READ-ONLY (external systems)
|
||||
- `lot` (lot_name PK, lot_description)
|
||||
- `lot_log` (lot, supplier, date, price, quality, comments)
|
||||
- `supplier` (supplier_name PK)
|
||||
|
||||
### MariaDB (qt_* prefix)
|
||||
- `qt_lot_metadata` - component prices, methods, popularity
|
||||
- `qt_categories` - category codes and names
|
||||
- `qt_pricelists` - version snapshots (YYYY-MM-DD-NNN format)
|
||||
- `qt_pricelist_items` - prices per pricelist
|
||||
- `qt_projects` - uuid, opty, customer_requirement, name (Phase 3)
|
||||
- `qt_specifications` - project_id, pricelist_id, variant, rev, qty, items JSON (Phase 3)
|
||||
|
||||
### SQLite (data/quoteforge.db)
|
||||
- `connection_settings` - encrypted DB credentials (PasswordEncrypted field)
|
||||
- `local_pricelists/items` - cached from server
|
||||
- `local_components` - lot cache for offline search (with current_price)
|
||||
- `local_configurations` - UUID, items, price_updated_at, sync_status (pending/synced/conflict), server_id
|
||||
- `local_projects/specifications` - Phase 3
|
||||
- `pending_changes` - sync queue (entity_type, uuid, op, payload, created_at, attempts, last_error)
|
||||
|
||||
## Business Logic
|
||||
|
||||
**Part number parsing:** `CPU_AMD_9654` → category=`CPU`, model=`AMD_9654`
|
||||
|
||||
**Price methods:** manual | median | average | weighted_median
|
||||
|
||||
**Price freshness:** fresh (<30d, ≥3 quotes) | normal (<60d) | stale (<90d) | critical
|
||||
|
||||
**Pricelist version:** `YYYY-MM-DD-NNN` (e.g., `2024-01-31-001`)
|
||||
|
||||
## API Endpoints
|
||||
|
||||
| Group | Endpoints |
|
||||
|-------|-----------|
|
||||
| Setup | GET/POST /setup, POST /setup/test |
|
||||
| Components | GET /api/components, /api/categories |
|
||||
| Pricelists | CRUD /api/pricelists, GET /latest, POST /compare |
|
||||
| Projects | CRUD /api/projects/:uuid (Phase 3) |
|
||||
| Specs | CRUD /api/specs/:uuid, POST /upgrade, GET /diff (Phase 3) |
|
||||
| Configs | POST /:uuid/refresh-prices (обновить цены из local_components) |
|
||||
| Sync | GET /status, POST /components, /pricelists, /push, /pull, /resolve-conflict |
|
||||
| Export | GET /api/specs/:uuid/export, /api/projects/:uuid/export |
|
||||
|
||||
## Commands
|
||||
```bash
|
||||
go build ./cmd/qfs && go vet ./... # verify
|
||||
go run ./cmd/qfs # run
|
||||
make build-release # release build
|
||||
# Development
|
||||
go run ./cmd/qfs # Dev server
|
||||
make run # Dev server (via Makefile)
|
||||
|
||||
# Production build
|
||||
make build-release # Optimized build with version (recommended)
|
||||
VERSION=$(git describe --tags --always --dirty)
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$VERSION" -o bin/qfs ./cmd/qfs
|
||||
|
||||
# Cron jobs
|
||||
go run ./cmd/cron -job=cleanup-pricelists # Remove old unused pricelists
|
||||
go run ./cmd/cron -job=update-prices # Recalculate all prices
|
||||
go run ./cmd/cron -job=update-popularity # Update popularity scores
|
||||
|
||||
# Check version
|
||||
./bin/qfs -version
|
||||
```
|
||||
|
||||
## Code Style
|
||||
- gofmt, structured logging (slog), wrap errors with context
|
||||
- snake_case files, PascalCase types
|
||||
- RBAC disabled: DB username = user_id via `models.EnsureDBUser()`
|
||||
|
||||
## UI Guidelines
|
||||
- htmx (hx-get/post/target/swap), Tailwind CDN
|
||||
- Freshness colors: green (fresh) → yellow → orange → red (critical)
|
||||
- Sync status + offline indicator in header
|
||||
|
||||
178
LOCAL_FIRST_INTEGRATION.md
Normal file
178
LOCAL_FIRST_INTEGRATION.md
Normal file
@@ -0,0 +1,178 @@
|
||||
# Local-First Architecture Integration Guide
|
||||
|
||||
## Overview
|
||||
|
||||
QuoteForge теперь поддерживает local-first архитектуру: приложение ВСЕГДА работает с SQLite (localdb), MariaDB используется только для синхронизации.
|
||||
|
||||
## Реализованные компоненты
|
||||
|
||||
### 1. Конвертеры моделей (`internal/localdb/converters.go`)
|
||||
|
||||
Конвертеры между MariaDB и SQLite моделями:
|
||||
- `ConfigurationToLocal()` / `LocalToConfiguration()`
|
||||
- `PricelistToLocal()` / `LocalToPricelist()`
|
||||
- `ComponentToLocal()` / `LocalToComponent()`
|
||||
|
||||
### 2. LocalDB методы (`internal/localdb/localdb.go`)
|
||||
|
||||
Добавлены методы для работы с pending changes:
|
||||
- `MarkChangesSynced(ids []int64)` - помечает изменения как синхронизированные
|
||||
- `GetPendingCount()` - возвращает количество несинхронизированных изменений
|
||||
|
||||
### 3. Sync Service расширения (`internal/services/sync/service.go`)
|
||||
|
||||
Новые методы:
|
||||
- `SyncPricelistsIfNeeded()` - проверяет и скачивает новые прайслисты при необходимости
|
||||
- `PushPendingChanges()` - отправляет все pending changes на сервер
|
||||
- `pushSingleChange()` - обрабатывает один pending change
|
||||
- `pushConfigurationCreate/Update/Delete()` - специфичные методы для конфигураций
|
||||
|
||||
**ВАЖНО**: Конструктор изменен - теперь требует `ConfigurationRepository`:
|
||||
```go
|
||||
syncService := sync.NewService(pricelistRepo, configRepo, local)
|
||||
```
|
||||
|
||||
### 4. LocalConfigurationService (`internal/services/local_configuration.go`)
|
||||
|
||||
Новый сервис для работы с конфигурациями в local-first режиме:
|
||||
- Все операции CRUD работают через SQLite
|
||||
- Автоматически добавляет изменения в pending_changes
|
||||
- При создании конфигурации (если online) проверяет новые прайслисты
|
||||
|
||||
```go
|
||||
localConfigService := services.NewLocalConfigurationService(
|
||||
localDB,
|
||||
syncService,
|
||||
quoteService,
|
||||
isOnlineFunc,
|
||||
)
|
||||
```
|
||||
|
||||
### 5. Sync Handler расширения (`internal/handlers/sync.go`)
|
||||
|
||||
Новые endpoints:
|
||||
- `POST /api/sync/push` - отправить pending changes на сервер
|
||||
- `GET /api/sync/pending/count` - получить количество pending changes
|
||||
- `GET /api/sync/pending` - получить список pending changes
|
||||
|
||||
## Интеграция
|
||||
|
||||
### Шаг 1: Обновить main.go
|
||||
|
||||
```go
|
||||
// В cmd/qfs/main.go
|
||||
syncService := sync.NewService(pricelistRepo, configRepo, local)
|
||||
|
||||
// Создать isOnline функцию
|
||||
isOnlineFunc := func() bool {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return sqlDB.Ping() == nil
|
||||
}
|
||||
|
||||
// Создать LocalConfigurationService
|
||||
localConfigService := services.NewLocalConfigurationService(
|
||||
local,
|
||||
syncService,
|
||||
quoteService,
|
||||
isOnlineFunc,
|
||||
)
|
||||
```
|
||||
|
||||
### Шаг 2: Обновить ConfigurationHandler
|
||||
|
||||
Заменить `ConfigurationService` на `LocalConfigurationService` в handlers:
|
||||
|
||||
```go
|
||||
// Было:
|
||||
configHandler := handlers.NewConfigurationHandler(configService, exportService)
|
||||
|
||||
// Стало:
|
||||
configHandler := handlers.NewConfigurationHandler(localConfigService, exportService)
|
||||
```
|
||||
|
||||
### Шаг 3: Добавить endpoints для sync
|
||||
|
||||
В роутере добавить:
|
||||
```go
|
||||
syncGroup := router.Group("/api/sync")
|
||||
{
|
||||
syncGroup.POST("/push", syncHandler.PushPendingChanges)
|
||||
syncGroup.GET("/pending/count", syncHandler.GetPendingCount)
|
||||
syncGroup.GET("/pending", syncHandler.GetPendingChanges)
|
||||
}
|
||||
```
|
||||
|
||||
## Как это работает
|
||||
|
||||
### Создание конфигурации
|
||||
|
||||
1. Пользователь создает конфигурацию
|
||||
2. `LocalConfigurationService.Create()`:
|
||||
- Если online → `SyncPricelistsIfNeeded()` проверяет новые прайслисты
|
||||
- Сохраняет конфигурацию в SQLite
|
||||
- Добавляет в `pending_changes` с operation="create"
|
||||
3. Конфигурация доступна локально сразу
|
||||
|
||||
### Синхронизация с сервером
|
||||
|
||||
**Manual sync:**
|
||||
```bash
|
||||
POST /api/sync/push
|
||||
```
|
||||
|
||||
**Background sync (TODO):**
|
||||
- Периодический worker вызывает `syncService.PushPendingChanges()`
|
||||
- Проверяет online статус
|
||||
- Отправляет все pending changes на сервер
|
||||
- Удаляет успешно синхронизированные записи
|
||||
|
||||
### Offline режим
|
||||
|
||||
1. Все операции работают нормально через SQLite
|
||||
2. Изменения копятся в `pending_changes`
|
||||
3. При восстановлении соединения автоматически синхронизируются
|
||||
|
||||
## Pending Changes Queue
|
||||
|
||||
Таблица `pending_changes`:
|
||||
```go
|
||||
type PendingChange struct {
|
||||
ID int64 // Auto-increment
|
||||
EntityType string // "configuration", "project", "specification"
|
||||
EntityUUID string // UUID сущности
|
||||
Operation string // "create", "update", "delete"
|
||||
Payload string // JSON snapshot сущности
|
||||
CreatedAt time.Time
|
||||
Attempts int // Счетчик попыток синхронизации
|
||||
LastError string // Последняя ошибка синхронизации
|
||||
}
|
||||
```
|
||||
|
||||
## TODO для Phase 2.5
|
||||
|
||||
- [ ] Background sync worker (автоматическая синхронизация каждые N минут)
|
||||
- [ ] Conflict resolution (при конфликтах обновления)
|
||||
- [ ] UI: pending counter в header
|
||||
- [ ] UI: manual sync button
|
||||
- [ ] UI: conflict alerts
|
||||
- [ ] Retry logic для failed pending changes
|
||||
- [ ] RefreshPrices для local mode (через local_components)
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
# Compile
|
||||
go build ./cmd/qfs
|
||||
|
||||
# Run
|
||||
./quoteforge
|
||||
|
||||
# Check pending changes
|
||||
curl http://localhost:8080/api/sync/pending/count
|
||||
|
||||
# Manual sync
|
||||
curl -X POST http://localhost:8080/api/sync/push
|
||||
```
|
||||
121
MIGRATION_PRICE_REFRESH.md
Normal file
121
MIGRATION_PRICE_REFRESH.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Миграция: Функционал пересчета цен в конфигураторе
|
||||
|
||||
## Описание изменений
|
||||
|
||||
Добавлен функционал автоматического обновления цен компонентов в сохраненных конфигурациях.
|
||||
|
||||
### Новые возможности
|
||||
|
||||
1. **Кнопка "Пересчитать цену"** на странице конфигуратора
|
||||
- Обновляет цены всех компонентов в конфигурации до актуальных значений из базы данных
|
||||
- Сохраняет количество компонентов, обновляя только цены
|
||||
- Отображает время последнего обновления цен
|
||||
|
||||
2. **Поле `price_updated_at`** в таблице конфигураций
|
||||
- Хранит дату и время последнего обновления цен
|
||||
- Отображается на странице конфигуратора в удобном формате ("5 мин. назад", "2 ч. назад" и т.д.)
|
||||
|
||||
### Изменения в базе данных
|
||||
|
||||
Добавлено новое поле в таблицу `qt_configurations`:
|
||||
```sql
|
||||
ALTER TABLE qt_configurations
|
||||
ADD COLUMN price_updated_at TIMESTAMP NULL DEFAULT NULL
|
||||
AFTER server_count;
|
||||
```
|
||||
|
||||
### Новый API endpoint
|
||||
|
||||
```
|
||||
POST /api/configs/:uuid/refresh-prices
|
||||
```
|
||||
|
||||
**Требования:**
|
||||
- Авторизация: Bearer Token
|
||||
- Роль: editor или выше
|
||||
|
||||
**Ответ:**
|
||||
```json
|
||||
{
|
||||
"id": 1,
|
||||
"uuid": "...",
|
||||
"name": "Конфигурация 1",
|
||||
"items": [
|
||||
{
|
||||
"lot_name": "CPU_AMD_9654",
|
||||
"quantity": 2,
|
||||
"unit_price": 11500.00
|
||||
}
|
||||
],
|
||||
"total_price": 23000.00,
|
||||
"price_updated_at": "2026-01-31T12:34:56Z",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Применение изменений
|
||||
|
||||
### 1. Обновление базы данных
|
||||
|
||||
Запустите сервер с флагом миграции:
|
||||
```bash
|
||||
./quoteforge -migrate -config config.yaml
|
||||
```
|
||||
|
||||
Или выполните SQL миграцию вручную:
|
||||
```bash
|
||||
mysql -u user -p RFQ_LOG < migrations/004_add_price_updated_at.sql
|
||||
```
|
||||
|
||||
### 2. Перезапуск сервера
|
||||
|
||||
После применения миграции перезапустите сервер:
|
||||
```bash
|
||||
./quoteforge -config config.yaml
|
||||
```
|
||||
|
||||
## Использование
|
||||
|
||||
1. Откройте любую сохраненную конфигурацию в конфигураторе
|
||||
2. Нажмите кнопку **"Пересчитать цену"** рядом с кнопкой "Сохранить"
|
||||
3. Все цены компонентов будут обновлены до актуальных значений
|
||||
4. Конфигурация автоматически сохраняется с обновленными ценами
|
||||
5. Под кнопками отображается время последнего обновления цен
|
||||
|
||||
## Технические детали
|
||||
|
||||
### Измененные файлы
|
||||
|
||||
- `internal/models/configuration.go` - добавлено поле `PriceUpdatedAt`
|
||||
- `internal/services/configuration.go` - добавлен метод `RefreshPrices()`
|
||||
- `internal/handlers/configuration.go` - добавлен обработчик `RefreshPrices()`
|
||||
- `cmd/qfs/main.go` - добавлен маршрут `/api/configs/:uuid/refresh-prices`
|
||||
- `web/templates/index.html` - добавлена кнопка и JavaScript функции
|
||||
- `migrations/004_add_price_updated_at.sql` - SQL миграция
|
||||
- `CLAUDE.md` - обновлена документация
|
||||
|
||||
### Логика обновления цен
|
||||
|
||||
1. Получение конфигурации по UUID
|
||||
2. Проверка прав доступа (пользователь должен быть владельцем)
|
||||
3. Для каждого компонента в конфигурации:
|
||||
- Получение актуальной цены из `qt_lot_metadata.current_price`
|
||||
- Обновление `unit_price` в items
|
||||
4. Пересчет `total_price` с учетом `server_count`
|
||||
5. Установка `price_updated_at` на текущее время
|
||||
6. Сохранение конфигурации
|
||||
|
||||
### Обработка ошибок
|
||||
|
||||
- Если компонент не найден или у него нет цены - сохраняется старая цена
|
||||
- При ошибках доступа возвращается 403 Forbidden
|
||||
- При отсутствии конфигурации возвращается 404 Not Found
|
||||
|
||||
## Отмена изменений (Rollback)
|
||||
|
||||
Для отмены миграции выполните:
|
||||
```sql
|
||||
ALTER TABLE qt_configurations DROP COLUMN price_updated_at;
|
||||
```
|
||||
|
||||
**Внимание:** После отмены миграции функционал пересчета цен перестанет работать корректно.
|
||||
9
Makefile
9
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: build build-release clean test run version install-hooks
|
||||
.PHONY: build build-release clean test run version
|
||||
|
||||
# Get version from git
|
||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
||||
@@ -72,12 +72,6 @@ deps:
|
||||
go mod download
|
||||
go mod tidy
|
||||
|
||||
# Install local git hooks
|
||||
install-hooks:
|
||||
git config core.hooksPath .githooks
|
||||
chmod +x .githooks/pre-commit scripts/check-secrets.sh
|
||||
@echo "Installed git hooks from .githooks/"
|
||||
|
||||
# Help
|
||||
help:
|
||||
@echo "QuoteForge Server (qfs) - Build Commands"
|
||||
@@ -98,7 +92,6 @@ help:
|
||||
@echo " run Run development server"
|
||||
@echo " watch Run with auto-restart (requires entr)"
|
||||
@echo " deps Install/update dependencies"
|
||||
@echo " install-hooks Install local git hooks (secret scan on commit)"
|
||||
@echo " help Show this help"
|
||||
@echo ""
|
||||
@echo "Current version: $(VERSION)"
|
||||
|
||||
403
README.md
403
README.md
@@ -1,53 +1,392 @@
|
||||
# QuoteForge
|
||||
|
||||
Local-first desktop web app for server configuration, quotation, and project work.
|
||||
**Server Configuration & Quotation Tool**
|
||||
|
||||
Runtime model:
|
||||
- user work is stored in local SQLite;
|
||||
- MariaDB is used only for setup checks and background sync;
|
||||
- HTTP server binds to loopback only.
|
||||
QuoteForge — корпоративный инструмент для конфигурирования серверов и формирования коммерческих предложений (КП). Приложение интегрируется с существующей базой данных RFQ_LOG.
|
||||
|
||||
## What the app does
|
||||

|
||||

|
||||

|
||||
|
||||
- configuration editor with price refresh from synced pricelists;
|
||||
- projects with variants and ordered configurations;
|
||||
- vendor BOM import and PN -> LOT resolution;
|
||||
- revision history with rollback;
|
||||
- rotating local backups.
|
||||
## Возможности
|
||||
|
||||
## Run
|
||||
### Для пользователей
|
||||
- 📱 **Mobile-first интерфейс** — удобная работа с телефона и планшета
|
||||
- 🖥️ **Конфигуратор серверов** — пошаговый выбор компонентов с проверкой совместимости
|
||||
- 💰 **Автоматический расчёт цен** — актуальные цены на основе истории закупок
|
||||
- 📊 **Экспорт в CSV/XLSX** — готовые спецификации для клиентов
|
||||
- 💾 **Сохранение конфигураций** — история и шаблоны для повторного использования
|
||||
|
||||
### Для ценовых администраторов
|
||||
- 📈 **Умный расчёт цен** — медиана, взвешенная медиана, среднее
|
||||
- 🎯 **Система алертов** — уведомления о популярных компонентах с устаревшими ценами
|
||||
- 📉 **Аналитика использования** — какие компоненты востребованы в КП
|
||||
- ⚙️ **Гибкие настройки** — периоды расчёта, методы, ручные переопределения
|
||||
|
||||
### Индикация актуальности цен
|
||||
| Цвет | Статус | Условие |
|
||||
|------|--------|---------|
|
||||
| 🟢 Зелёный | Свежая | < 30 дней, ≥ 3 источника |
|
||||
| 🟡 Жёлтый | Нормальная | 30-60 дней |
|
||||
| 🟠 Оранжевый | Устаревающая | 60-90 дней |
|
||||
| 🔴 Красный | Устаревшая | > 90 дней или нет данных |
|
||||
|
||||
## Технологии
|
||||
|
||||
- **Backend:** Go 1.22+, Gin, GORM
|
||||
- **Frontend:** HTML, Tailwind CSS, htmx
|
||||
- **Database:** MariaDB 11+
|
||||
- **Export:** excelize (XLSX), encoding/csv
|
||||
|
||||
## Требования
|
||||
|
||||
- Go 1.22 или выше
|
||||
- MariaDB 11.x (или MySQL 8.x)
|
||||
- ~50 MB дискового пространства
|
||||
|
||||
## Установка
|
||||
|
||||
### 1. Клонирование репозитория
|
||||
|
||||
```bash
|
||||
go run ./cmd/qfs
|
||||
git clone https://github.com/your-company/quoteforge.git
|
||||
cd quoteforge
|
||||
```
|
||||
|
||||
Useful commands:
|
||||
### 2. Настройка конфигурации
|
||||
|
||||
```bash
|
||||
cp config.example.yaml config.yaml
|
||||
```
|
||||
|
||||
Отредактируйте `config.yaml`:
|
||||
|
||||
```yaml
|
||||
server:
|
||||
host: "0.0.0.0"
|
||||
port: 8080
|
||||
mode: "release"
|
||||
|
||||
database:
|
||||
host: "localhost"
|
||||
port: 3306
|
||||
name: "RFQ_LOG"
|
||||
user: "quoteforge"
|
||||
password: "your-secure-password"
|
||||
|
||||
auth:
|
||||
jwt_secret: "your-jwt-secret-min-32-chars"
|
||||
token_expiry: "24h"
|
||||
```
|
||||
|
||||
### 3. Миграции базы данных
|
||||
|
||||
```bash
|
||||
go run ./cmd/qfs -migrate
|
||||
```
|
||||
|
||||
### Мигратор OPS -> проекты (preview/apply)
|
||||
|
||||
Переносит квоты, чьи названия начинаются с `OPS-xxxx` (где `x` — цифра), в проект `OPS-xxxx`.
|
||||
Если проекта нет, он будет создан; если архивный — реактивирован.
|
||||
|
||||
Сначала всегда смотрите preview:
|
||||
|
||||
```bash
|
||||
go run ./cmd/migrate_ops_projects -config config.yaml
|
||||
```
|
||||
|
||||
Применение изменений:
|
||||
|
||||
```bash
|
||||
go run ./cmd/migrate_ops_projects -config config.yaml -apply
|
||||
```
|
||||
|
||||
Без интерактивного подтверждения:
|
||||
|
||||
```bash
|
||||
go run ./cmd/migrate_ops_projects -config config.yaml -apply -yes
|
||||
```
|
||||
|
||||
### Минимальные права БД для пользователя квотаций
|
||||
|
||||
Если нужен пользователь, который может работать с конфигурациями, но не может создавать/удалять прайслисты:
|
||||
|
||||
```sql
|
||||
-- 1) Создать (или оставить существующего) пользователя
|
||||
CREATE USER IF NOT EXISTS 'quote_user'@'%' IDENTIFIED BY 'StrongPassword!';
|
||||
|
||||
-- 2) Сбросить лишние права (без пересоздания пользователя)
|
||||
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'quote_user'@'%';
|
||||
|
||||
-- 3) Чтение данных для конфигуратора и синка
|
||||
GRANT SELECT ON RFQ_LOG.lot TO 'quote_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_lot_metadata TO 'quote_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_categories TO 'quote_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_pricelists TO 'quote_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_pricelist_items TO 'quote_user'@'%';
|
||||
|
||||
-- 4) Работа с конфигурациями
|
||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_configurations TO 'quote_user'@'%';
|
||||
|
||||
FLUSH PRIVILEGES;
|
||||
|
||||
SHOW GRANTS FOR 'quote_user'@'%';
|
||||
```
|
||||
|
||||
Важно:
|
||||
- не выдавайте `INSERT/UPDATE/DELETE` на `qt_pricelists` и `qt_pricelist_items`, если пользователь не должен управлять прайслистами;
|
||||
- если используется host-специфичный аккаунт (`'quote_user'@'192.168.x.x'`), назначьте права и для него;
|
||||
- после смены DB-настроек через `/setup` приложение перезапускается автоматически и подхватывает нового пользователя.
|
||||
|
||||
### 4. Импорт метаданных компонентов
|
||||
|
||||
```bash
|
||||
go run ./cmd/importer
|
||||
```
|
||||
|
||||
### 5. Запуск
|
||||
|
||||
```bash
|
||||
# Development
|
||||
go run ./cmd/qfs
|
||||
|
||||
# Production (with Makefile - recommended)
|
||||
make build-release # Builds with version info
|
||||
./bin/qfs -version # Check version
|
||||
|
||||
# Production (manual)
|
||||
VERSION=$(git describe --tags --always --dirty)
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w -X main.Version=$VERSION" -o bin/qfs ./cmd/qfs
|
||||
./bin/qfs -version
|
||||
```
|
||||
|
||||
**Makefile команды:**
|
||||
```bash
|
||||
make build-release # Оптимизированная сборка с версией
|
||||
make build-all # Сборка для всех платформ (Linux, macOS, Windows)
|
||||
make build-windows # Только для Windows
|
||||
make run # Запуск dev сервера
|
||||
make test # Запуск тестов
|
||||
make clean # Очистка bin/
|
||||
make help # Показать все команды
|
||||
```
|
||||
|
||||
Приложение будет доступно по адресу: http://localhost:8080
|
||||
|
||||
### Локальная SQLite база (state)
|
||||
|
||||
Локальная база приложения хранится в профиле пользователя и не зависит от расположения бинарника.
|
||||
Имя файла: `qfs.db`.
|
||||
|
||||
- macOS: `~/Library/Application Support/QuoteForge/qfs.db`
|
||||
- Linux: `$XDG_STATE_HOME/quoteforge/qfs.db` (или `~/.local/state/quoteforge/qfs.db`)
|
||||
- Windows: `%LOCALAPPDATA%\\QuoteForge\\qfs.db`
|
||||
|
||||
Можно переопределить путь через `-localdb` или переменную окружения `QFS_DB_PATH`.
|
||||
|
||||
### Версионность конфигураций (local-first)
|
||||
|
||||
Для `local_configurations` используется append-only versioning через полные snapshot-версии:
|
||||
|
||||
- таблица: `local_configuration_versions`
|
||||
- для каждого изменения создаётся новая версия (`version_no = max + 1`)
|
||||
- `local_configurations.current_version_id` указывает на активную версию
|
||||
- старые версии не изменяются и не удаляются в обычном потоке
|
||||
- rollback не "перематывает" историю, а создаёт новую версию из выбранного snapshot
|
||||
|
||||
При backfill (миграция `006_add_local_configuration_versions.sql`) для существующих конфигураций создаётся `v1` и проставляется `current_version_id`.
|
||||
|
||||
#### Rollback
|
||||
|
||||
Rollback выполняется API-методом:
|
||||
|
||||
```bash
|
||||
POST /api/configs/:uuid/rollback
|
||||
{
|
||||
"target_version": 3,
|
||||
"note": "optional"
|
||||
}
|
||||
```
|
||||
|
||||
Результат:
|
||||
- создаётся новая версия `vN` с `data` из целевой версии
|
||||
- `change_note = "rollback to v{target_version}"` (+ note, если передан)
|
||||
- `current_version_id` переключается на новую версию
|
||||
- конфигурация уходит в `sync_status = pending`
|
||||
|
||||
### Локальный config.yaml
|
||||
|
||||
По умолчанию `qfs` ищет `config.yaml` в той же user-state папке, где лежит `qfs.db` (а не рядом с бинарником).
|
||||
Можно переопределить путь через `-config` или `QFS_CONFIG_PATH`.
|
||||
|
||||
## Docker
|
||||
|
||||
```bash
|
||||
# Сборка образа
|
||||
docker build -t quoteforge .
|
||||
|
||||
# Запуск с docker-compose
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## Структура проекта
|
||||
|
||||
```
|
||||
quoteforge/
|
||||
├── cmd/
|
||||
│ ├── server/main.go # Main HTTP server
|
||||
│ └── importer/main.go # Import metadata from lot table
|
||||
├── internal/
|
||||
│ ├── config/ # Конфигурация
|
||||
│ ├── models/ # GORM модели
|
||||
│ ├── handlers/ # HTTP handlers
|
||||
│ ├── services/ # Бизнес-логика
|
||||
│ ├── middleware/ # Auth, CORS, etc.
|
||||
│ └── repository/ # Работа с БД
|
||||
├── web/
|
||||
│ ├── templates/ # HTML шаблоны
|
||||
│ └── static/ # CSS, JS, изображения
|
||||
├── migrations/ # SQL миграции
|
||||
├── config.yaml # Конфигурация
|
||||
├── Dockerfile
|
||||
├── docker-compose.yml
|
||||
└── go.mod
|
||||
```
|
||||
|
||||
## Роли пользователей
|
||||
|
||||
| Роль | Описание |
|
||||
|------|----------|
|
||||
| `viewer` | Просмотр, создание квот, экспорт |
|
||||
| `editor` | + сохранение конфигураций |
|
||||
| `pricing_admin` | + управление ценами и алертами |
|
||||
| `admin` | Полный доступ, управление пользователями |
|
||||
|
||||
## API
|
||||
|
||||
Документация API доступна по адресу `/api/docs` (в разработке).
|
||||
|
||||
Основные endpoints:
|
||||
|
||||
```
|
||||
POST /api/auth/login # Авторизация
|
||||
GET /api/components # Список компонентов
|
||||
POST /api/quote/calculate # Расчёт цены
|
||||
POST /api/export/xlsx # Экспорт в Excel
|
||||
GET /api/configs # Сохранённые конфигурации
|
||||
GET /api/configs/:uuid/versions # Список версий конфигурации
|
||||
GET /api/configs/:uuid/versions/:version # Получить конкретную версию
|
||||
POST /api/configs/:uuid/rollback # Rollback на указанную версию
|
||||
POST /api/configs/:uuid/reactivate # Вернуть архивную конфигурацию в активные
|
||||
```
|
||||
|
||||
#### Sync payload для versioning
|
||||
|
||||
События в `pending_changes` для конфигураций содержат:
|
||||
- `configuration_uuid`
|
||||
- `operation` (`create` / `update` / `rollback`)
|
||||
- `current_version_id` и `current_version_no`
|
||||
- `snapshot` (текущее состояние конфигурации)
|
||||
- `idempotency_key` и `conflict_policy` (`last_write_wins`)
|
||||
|
||||
Это позволяет push-слою отправлять на сервер актуальное состояние и готовит основу для будущего conflict resolution.
|
||||
|
||||
## Cron Jobs
|
||||
|
||||
QuoteForge now includes automated cron jobs for maintenance tasks. These can be run using the built-in cron functionality in the Docker container.
|
||||
|
||||
### Docker Compose Setup
|
||||
|
||||
The Docker setup includes a dedicated cron service that runs the following jobs:
|
||||
|
||||
- **Alerts check**: Every hour (0 * * * *)
|
||||
- **Price updates**: Daily at 2 AM (0 2 * * *)
|
||||
- **Usage counter reset**: Weekly on Sunday at 1 AM (0 1 * * 0)
|
||||
- **Popularity score updates**: Daily at 3 AM (0 3 * * *)
|
||||
|
||||
To enable cron jobs in Docker, run:
|
||||
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### Manual Cron Job Execution
|
||||
|
||||
You can also run cron jobs manually using the quoteforge-cron binary:
|
||||
|
||||
```bash
|
||||
# Check and generate alerts
|
||||
go run ./cmd/cron -job=alerts
|
||||
|
||||
# Recalculate all prices
|
||||
go run ./cmd/cron -job=update-prices
|
||||
|
||||
# Reset usage counters
|
||||
go run ./cmd/cron -job=reset-counters
|
||||
|
||||
# Update popularity scores
|
||||
go run ./cmd/cron -job=update-popularity
|
||||
```
|
||||
|
||||
### Cron Job Details
|
||||
|
||||
- **Alerts check**: Generates alerts for components with high demand and stale prices, trending components without prices, and components with no recent quotes
|
||||
- **Price updates**: Recalculates prices for all components using configured methods (median, weighted median, average)
|
||||
- **Usage counter reset**: Resets weekly and monthly usage counters for components
|
||||
- **Popularity score updates**: Recalculates popularity scores based on supplier quote activity
|
||||
|
||||
## Разработка
|
||||
|
||||
```bash
|
||||
# Запуск в режиме разработки (hot reload)
|
||||
go run ./cmd/qfs
|
||||
|
||||
# Запуск тестов
|
||||
go test ./...
|
||||
go vet ./...
|
||||
make build-release
|
||||
|
||||
# Сборка для Linux
|
||||
CGO_ENABLED=0 go build -ldflags="-s -w" -o bin/qfs ./cmd/qfs
|
||||
```
|
||||
|
||||
On first run the app creates a minimal `config.yaml`, starts on `http://127.0.0.1:8080`, and opens `/setup` if DB credentials were not saved yet.
|
||||
## Переменные окружения
|
||||
|
||||
## Documentation
|
||||
| Переменная | Описание | По умолчанию |
|
||||
|------------|----------|--------------|
|
||||
| `QF_DB_HOST` | Хост базы данных | localhost |
|
||||
| `QF_DB_PORT` | Порт базы данных | 3306 |
|
||||
| `QF_DB_NAME` | Имя базы данных | RFQ_LOG |
|
||||
| `QF_DB_USER` | Пользователь БД | — |
|
||||
| `QF_DB_PASSWORD` | Пароль БД | — |
|
||||
| `QF_JWT_SECRET` | Секрет для JWT | — |
|
||||
| `QF_SERVER_PORT` | Порт сервера | 8080 |
|
||||
| `QFS_DB_PATH` | Полный путь к локальной SQLite БД | OS-specific user state dir |
|
||||
| `QFS_STATE_DIR` | Каталог state (если `QFS_DB_PATH` не задан) | OS-specific user state dir |
|
||||
| `QFS_CONFIG_PATH` | Полный путь к `config.yaml` | OS-specific user state dir |
|
||||
|
||||
- Shared engineering rules: [bible/README.md](bible/README.md)
|
||||
- Project architecture: [bible-local/README.md](bible-local/README.md)
|
||||
- Release notes: `releases/<version>/RELEASE_NOTES.md`
|
||||
## Интеграция с существующей БД
|
||||
|
||||
`bible-local/` is the source of truth for QuoteForge-specific architecture. If code changes behavior, update the matching file there in the same commit.
|
||||
QuoteForge интегрируется с существующей базой RFQ_LOG:
|
||||
|
||||
## Repository map
|
||||
- `lot` — справочник компонентов (только чтение)
|
||||
- `lot_log` — история цен от поставщиков (только чтение)
|
||||
- `supplier` — справочник поставщиков (только чтение)
|
||||
|
||||
```text
|
||||
cmd/ entry points and migration tools
|
||||
internal/ application code
|
||||
web/ templates and static assets
|
||||
bible/ shared engineering rules
|
||||
bible-local/ project architecture and contracts
|
||||
releases/ packaged release artifacts and release notes
|
||||
config.example.yaml runtime config reference
|
||||
```
|
||||
Новые таблицы QuoteForge имеют префикс `qt_`:
|
||||
|
||||
- `qt_users` — пользователи приложения
|
||||
- `qt_lot_metadata` — расширенные данные компонентов
|
||||
- `qt_configurations` — сохранённые конфигурации
|
||||
- `qt_pricing_alerts` — алерты для администраторов
|
||||
|
||||
## Поддержка
|
||||
|
||||
По вопросам работы приложения обращайтесь:
|
||||
- Email: mike@mchus.pro
|
||||
- Internal: @mchus
|
||||
|
||||
## Лицензия
|
||||
|
||||
Данное программное обеспечение является собственностью компании и предназначено исключительно для внутреннего использования. Распространение, копирование или модификация без письменного разрешения запрещены.
|
||||
|
||||
См. файл [LICENSE](LICENSE) для подробностей.
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
-- Generated from /Users/mchusavitin/Downloads/acc.csv
|
||||
-- Unambiguous rows only. Rows from headers without a date were skipped.
|
||||
INSERT INTO lot_log (`lot`, `supplier`, `date`, `price`, `quality`, `comments`) VALUES
|
||||
('ACC_RMK_L_Type', '', '2024-04-01', 19, NULL, 'header supplier missing in source (45383)'),
|
||||
('ACC_RMK_SLIDE', '', '2024-04-01', 31, NULL, 'header supplier missing in source (45383)'),
|
||||
('NVLINK_2S_Bridge', '', '2023-01-01', 431, NULL, 'header supplier missing in source (44927)'),
|
||||
('NVLINK_2S_Bridge', 'Jevy Yang', '2025-01-15', 139, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'Wendy', '2025-01-15', 143, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'HONCH (Darian)', '2025-05-06', 155, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'HONCH (Sunny)', '2025-06-17', 155, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'Wendy', '2025-07-02', 145, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'Honch (Sunny)', '2025-07-10', 155, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'Honch (Yan)', '2025-08-07', 155, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'Jevy', '2025-09-09', 155, NULL, NULL),
|
||||
('NVLINK_2S_Bridge', 'Honch (Darian)', '2025-11-17', 102, NULL, NULL),
|
||||
('NVLINK_2W_Bridge(H200)', '', '2023-01-01', 405, NULL, 'header supplier missing in source (44927)'),
|
||||
('NVLINK_2W_Bridge(H200)', 'network logic / Stephen', '2025-02-10', 305, NULL, NULL),
|
||||
('NVLINK_2W_Bridge(H200)', 'JEVY', '2025-02-18', 411, NULL, NULL),
|
||||
('NVLINK_4W_Bridge(H200)', '', '2023-01-01', 820, NULL, 'header supplier missing in source (44927)'),
|
||||
('NVLINK_4W_Bridge(H200)', 'network logic / Stephen', '2025-02-10', 610, NULL, NULL),
|
||||
('NVLINK_4W_Bridge(H200)', 'JEVY', '2025-02-18', 754, NULL, NULL),
|
||||
('25G_SFP28_MMA2P00-AS', 'HONCH (Doris)', '2025-02-19', 65, NULL, NULL),
|
||||
('ACC_SuperCap', '', '2024-04-01', 59, NULL, 'header supplier missing in source (45383)'),
|
||||
('ACC_SuperCap', 'Chiphome', '2025-02-28', 48, NULL, NULL);
|
||||
|
||||
-- Skipped source values due to missing date in header:
|
||||
-- lot=ACC_RMK_L_Type; header=FOB; price=19; reason=header has supplier but no date
|
||||
-- lot=ACC_RMK_SLIDE; header=FOB; price=31; reason=header has supplier but no date
|
||||
-- lot=NVLINK_2S_Bridge; header=FOB; price=155; reason=header has supplier but no date
|
||||
-- lot=NVLINK_2W_Bridge(H200); header=FOB; price=405; reason=header has supplier but no date
|
||||
-- lot=NVLINK_4W_Bridge(H200); header=FOB; price=754; reason=header has supplier but no date
|
||||
-- lot=25G_SFP28_MMA2P00-AS; header=FOB; price=65; reason=header has supplier but no date
|
||||
-- lot=ACC_SuperCap; header=FOB; price=48; reason=header has supplier but no date
|
||||
1
bible
1
bible
Submodule bible deleted from 52444350c1
@@ -1,70 +0,0 @@
|
||||
# 01 - Overview
|
||||
|
||||
## Product
|
||||
|
||||
QuoteForge is a local-first tool for server configuration, quotation, and project tracking.
|
||||
|
||||
Core user flows:
|
||||
- create and edit configurations locally;
|
||||
- calculate prices from synced pricelists;
|
||||
- group configurations into projects and variants;
|
||||
- import vendor workspaces and map vendor PNs to internal LOTs;
|
||||
- review revision history and roll back safely.
|
||||
|
||||
## Runtime model
|
||||
|
||||
QuoteForge is a single-user thick client.
|
||||
|
||||
Rules:
|
||||
- runtime HTTP binds to loopback only;
|
||||
- browser requests are treated as part of the same local user session;
|
||||
- MariaDB is not a live dependency for normal CRUD;
|
||||
- if non-loopback deployment is ever introduced, auth/RBAC must be added first.
|
||||
|
||||
## Product scope
|
||||
|
||||
In scope:
|
||||
- configurator and quote calculation;
|
||||
- projects, variants, and configuration ordering;
|
||||
- local revision history;
|
||||
- read-only pricelist browsing from SQLite cache;
|
||||
- background sync with MariaDB;
|
||||
- rotating local backups.
|
||||
|
||||
Out of scope and intentionally removed:
|
||||
- admin pricing UI/API;
|
||||
- alerts and notification workflows;
|
||||
- stock import tooling;
|
||||
- cron jobs and importer utilities.
|
||||
|
||||
## Tech stack
|
||||
|
||||
| Layer | Stack |
|
||||
| --- | --- |
|
||||
| Backend | Go, Gin, GORM |
|
||||
| Frontend | HTML templates, htmx, Tailwind CSS |
|
||||
| Local storage | SQLite |
|
||||
| Sync transport | MariaDB |
|
||||
| Export | CSV and XLSX generation |
|
||||
|
||||
## Repository map
|
||||
|
||||
```text
|
||||
cmd/
|
||||
qfs/ main HTTP runtime
|
||||
migrate/ server migration tool
|
||||
migrate_ops_projects/ OPS project migration helper
|
||||
internal/
|
||||
appstate/ backup and runtime state
|
||||
config/ runtime config parsing
|
||||
handlers/ HTTP handlers
|
||||
localdb/ SQLite models and migrations
|
||||
repository/ repositories
|
||||
services/ business logic and sync
|
||||
web/
|
||||
templates/ HTML templates
|
||||
static/ static assets
|
||||
bible/ shared engineering rules
|
||||
bible-local/ project-specific architecture
|
||||
releases/ release artifacts and notes
|
||||
```
|
||||
@@ -1,116 +0,0 @@
|
||||
# 02 - Architecture
|
||||
|
||||
## Local-first rule
|
||||
|
||||
SQLite is the runtime source of truth.
|
||||
MariaDB is sync transport plus setup and migration tooling.
|
||||
|
||||
```text
|
||||
browser -> Gin handlers -> SQLite
|
||||
-> pending_changes
|
||||
background sync <------> MariaDB
|
||||
```
|
||||
|
||||
Rules:
|
||||
- user CRUD must continue when MariaDB is offline;
|
||||
- runtime handlers and pages must read and write SQLite only;
|
||||
- MariaDB access in runtime code is allowed only inside sync and setup flows;
|
||||
- no live MariaDB fallback for reads that already exist in local cache.
|
||||
|
||||
## Sync contract
|
||||
|
||||
Bidirectional:
|
||||
- projects;
|
||||
- configurations;
|
||||
- `vendor_spec`;
|
||||
- pending change metadata.
|
||||
|
||||
Pull-only:
|
||||
- components;
|
||||
- pricelists and pricelist items;
|
||||
- partnumber books and partnumber book items.
|
||||
|
||||
Readiness guard:
|
||||
- every sync push/pull runs a preflight check;
|
||||
- blocked sync returns `423 Locked` with a machine-readable reason;
|
||||
- local work continues even when sync is blocked.
|
||||
- sync metadata updates must preserve project `updated_at`; sync time belongs in `synced_at`, not in the user-facing last-modified timestamp.
|
||||
- pricelist pull must persist a new local snapshot atomically: header and items appear together, and `last_pricelist_sync` advances only after item download succeeds.
|
||||
- UI sync status must distinguish "last sync failed" from "up to date"; if the app can prove newer server pricelist data exists, the indicator must say local cache is incomplete.
|
||||
|
||||
## Pricing contract
|
||||
|
||||
Prices come only from `local_pricelist_items`.
|
||||
|
||||
Rules:
|
||||
- `local_components` is metadata-only;
|
||||
- quote calculation must not read prices from components;
|
||||
- latest pricelist selection ignores snapshots without items;
|
||||
- auto pricelist mode stays auto and must not be persisted as an explicit resolved ID.
|
||||
|
||||
## Pricing tab layout
|
||||
|
||||
The Pricing tab (Ценообразование) has two tables: Buy (Цена покупки) and Sale (Цена продажи).
|
||||
|
||||
Column order (both tables):
|
||||
|
||||
```
|
||||
PN вендора | Описание | LOT | Кол-во | Estimate | Склад | Конкуренты | Ручная цена
|
||||
```
|
||||
|
||||
Per-LOT row expansion rules:
|
||||
- each `lot_mappings` entry in a BOM row becomes its own table row with its own quantity and prices;
|
||||
- `baseLot` (resolved LOT without an explicit mapping) is treated as the first sub-row with `quantity_per_pn` from `_getRowLotQtyPerPN`;
|
||||
- when one vendor PN expands into N LOT sub-rows, PN вендора and Описание cells use `rowspan="N"` and appear only on the first sub-row;
|
||||
- a visual top border (`border-t border-gray-200`) separates each vendor PN group.
|
||||
|
||||
Vendor price attachment:
|
||||
- `vendorOrig` and `vendorOrigUnit` (BOM unit/total price) are attached to the first LOT sub-row only;
|
||||
- subsequent sub-rows carry empty `data-vendor-orig` so `setPricingCustomPriceFromVendor` counts each vendor PN exactly once.
|
||||
|
||||
Controls terminology:
|
||||
- custom price input is labeled **Ручная цена** (not "Своя цена");
|
||||
- the button that fills custom price from BOM totals is labeled **BOM Цена** (not "Проставить цены BOM").
|
||||
|
||||
CSV export reads PN вендора, Описание, and LOT from `data-vendor-pn`, `data-desc`, `data-lot` row attributes to bypass the rowspan cell offset problem.
|
||||
|
||||
## Configuration versioning
|
||||
|
||||
Configuration revisions are append-only snapshots stored in `local_configuration_versions`.
|
||||
|
||||
Rules:
|
||||
- the editable working configuration is always the implicit head named `main`; UI must not switch the user to a numbered revision after save;
|
||||
- create a new revision when spec, BOM, or pricing content changes;
|
||||
- revision history is retrospective: the revisions page shows past snapshots, not the current `main` state;
|
||||
- rollback creates a new head revision from an old snapshot;
|
||||
- rename, reorder, project move, and similar operational edits do not create a new revision snapshot;
|
||||
- revision deduplication includes `items`, `server_count`, `total_price`, `custom_price`, `vendor_spec`, pricelist selectors, `disable_price_refresh`, and `only_in_stock`;
|
||||
- BOM updates must use version-aware save flow, not a direct SQL field update;
|
||||
- current revision pointer must be recoverable if legacy or damaged rows are found locally.
|
||||
|
||||
## Sync UX
|
||||
|
||||
UI-facing sync status must never block on live MariaDB calls.
|
||||
|
||||
Rules:
|
||||
- navbar sync indicator and sync info modal read only local cached state from SQLite/app settings;
|
||||
- background/manual sync may talk to MariaDB, but polling endpoints must stay fast even on slow or broken connections;
|
||||
- any MariaDB timeout/invalid-connection during sync must invalidate the cached remote handle immediately so UI stops treating the connection as healthy.
|
||||
|
||||
## Naming collisions
|
||||
|
||||
UI-driven rename and copy flows use one suffix convention for conflicts.
|
||||
|
||||
Rules:
|
||||
- configuration and variant names must auto-resolve collisions with `_копия`, then `_копия2`, `_копия3`, and so on;
|
||||
- copy checkboxes and copy modals must prefill `_копия`, not ` (копия)`;
|
||||
- the literal variant name `main` is reserved and must not be allowed for non-main variants.
|
||||
|
||||
## Vendor BOM contract
|
||||
|
||||
Vendor BOM is stored in `vendor_spec` on the configuration row.
|
||||
|
||||
Rules:
|
||||
- PN to LOT resolution uses the active local partnumber book;
|
||||
- canonical persisted mapping is `lot_mappings[]`;
|
||||
- QuoteForge does not use legacy BOM tables such as `qt_bom`, `qt_lot_bundles`, or `qt_lot_bundle_items`.
|
||||
@@ -1,405 +0,0 @@
|
||||
# 03 - Database
|
||||
|
||||
## SQLite
|
||||
|
||||
SQLite is the local runtime database.
|
||||
|
||||
Main tables:
|
||||
|
||||
| Table | Purpose |
|
||||
| --- | --- |
|
||||
| `local_components` | synced component metadata |
|
||||
| `local_pricelists` | local pricelist headers |
|
||||
| `local_pricelist_items` | local pricelist rows, the only runtime price source |
|
||||
| `local_projects` | user projects |
|
||||
| `local_configurations` | user configurations |
|
||||
| `local_configuration_versions` | immutable revision snapshots |
|
||||
| `local_partnumber_books` | partnumber book headers |
|
||||
| `local_partnumber_book_items` | PN -> LOT catalog payload |
|
||||
| `pending_changes` | sync queue |
|
||||
| `connection_settings` | encrypted MariaDB connection settings |
|
||||
| `app_settings` | local app state |
|
||||
| `local_schema_migrations` | applied local migration markers |
|
||||
|
||||
Rules:
|
||||
- cache tables may be rebuilt if local migration recovery requires it;
|
||||
- user-authored tables must not be dropped as a recovery shortcut;
|
||||
- `local_pricelist_items` is the only valid runtime source of prices;
|
||||
- configuration `items` and `vendor_spec` are stored as JSON payloads inside configuration rows.
|
||||
|
||||
## MariaDB
|
||||
|
||||
MariaDB is the central sync database (`RFQ_LOG`). Final schema as of 2026-03-21.
|
||||
|
||||
### QuoteForge tables (qt_* and stock_*)
|
||||
|
||||
Runtime read:
|
||||
- `qt_categories` — pricelist categories
|
||||
- `qt_lot_metadata` — component metadata, price settings
|
||||
- `qt_pricelists` — pricelist headers (source: estimate / warehouse / competitor)
|
||||
- `qt_pricelist_items` — pricelist rows
|
||||
- `stock_log` — raw supplier price log, source for pricelist generation
|
||||
- `stock_ignore_rules` — patterns to skip during stock import
|
||||
- `qt_partnumber_books` — partnumber book headers
|
||||
- `qt_partnumber_book_items` — PN→LOT catalog payload
|
||||
|
||||
Runtime read/write:
|
||||
- `qt_projects` — projects
|
||||
- `qt_configurations` — configurations
|
||||
- `qt_client_schema_state` — per-client sync status and version tracking
|
||||
- `qt_pricelist_sync_status` — pricelist sync timestamps per user
|
||||
|
||||
Insert-only tracking:
|
||||
- `qt_vendor_partnumber_seen` — vendor partnumbers encountered during sync
|
||||
|
||||
Server-side only (not queried by client runtime):
|
||||
- `qt_component_usage_stats` — aggregated component popularity stats (written by server jobs)
|
||||
- `qt_pricing_alerts` — price anomaly alerts (models exist in Go; feature disabled in runtime)
|
||||
- `qt_schema_migrations` — server migration history (applied via `go run ./cmd/qfs -migrate`)
|
||||
- `qt_scheduler_runs` — server background job tracking (no Go code references it in this repo)
|
||||
|
||||
### Competitor subsystem (server-side only, not used by QuoteForge Go code)
|
||||
|
||||
- `qt_competitors` — competitor registry
|
||||
- `partnumber_log_competitors` — competitor price log (FK → qt_competitors)
|
||||
|
||||
These tables exist in the schema and are maintained by another tool or workflow.
|
||||
QuoteForge references competitor pricelists only via `qt_pricelists` (source='competitor').
|
||||
|
||||
### Legacy RFQ tables (pre-QuoteForge, no Go code references)
|
||||
|
||||
- `lot` — original component registry (data preserved; superseded by `qt_lot_metadata`)
|
||||
- `lot_log` — original supplier price log (superseded by `stock_log`)
|
||||
- `supplier` — supplier registry (FK target for lot_log and machine_log)
|
||||
- `machine` — device model registry
|
||||
- `machine_log` — device price/quote log
|
||||
|
||||
These tables are retained for historical data. QuoteForge does not read or write them at runtime.
|
||||
|
||||
Rules:
|
||||
- QuoteForge runtime must not depend on any legacy RFQ tables;
|
||||
- stock enrichment happens during sync and is persisted into SQLite;
|
||||
- normal UI requests must not query MariaDB tables directly;
|
||||
- `qt_client_local_migrations` was removed from the schema on 2026-03-21 (was in earlier drafts).
|
||||
|
||||
## MariaDB Table Structures
|
||||
|
||||
Full column reference as of 2026-03-21 (`RFQ_LOG` final schema).
|
||||
|
||||
### qt_categories
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| code | varchar(20) UNIQUE NOT NULL | |
|
||||
| name | varchar(100) NOT NULL | |
|
||||
| name_ru | varchar(100) | |
|
||||
| display_order | bigint DEFAULT 0 | |
|
||||
| is_required | tinyint(1) DEFAULT 0 | |
|
||||
|
||||
### qt_client_schema_state
|
||||
PK: (username, hostname)
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| username | varchar(100) | |
|
||||
| hostname | varchar(255) DEFAULT '' | |
|
||||
| last_applied_migration_id | varchar(128) | |
|
||||
| app_version | varchar(64) | |
|
||||
| last_sync_at | datetime | |
|
||||
| last_sync_status | varchar(32) | |
|
||||
| pending_changes_count | int DEFAULT 0 | |
|
||||
| pending_errors_count | int DEFAULT 0 | |
|
||||
| configurations_count | int DEFAULT 0 | |
|
||||
| projects_count | int DEFAULT 0 | |
|
||||
| estimate_pricelist_version | varchar(128) | |
|
||||
| warehouse_pricelist_version | varchar(128) | |
|
||||
| competitor_pricelist_version | varchar(128) | |
|
||||
| last_sync_error_code | varchar(128) | |
|
||||
| last_sync_error_text | text | |
|
||||
| last_checked_at | datetime NOT NULL | |
|
||||
| updated_at | datetime NOT NULL | |
|
||||
|
||||
### qt_component_usage_stats
|
||||
PK: lot_name
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| lot_name | varchar(255) | |
|
||||
| quotes_total | bigint DEFAULT 0 | |
|
||||
| quotes_last30d | bigint DEFAULT 0 | |
|
||||
| quotes_last7d | bigint DEFAULT 0 | |
|
||||
| total_quantity | bigint DEFAULT 0 | |
|
||||
| total_revenue | decimal(14,2) DEFAULT 0 | |
|
||||
| trend_direction | enum('up','stable','down') DEFAULT 'stable' | |
|
||||
| trend_percent | decimal(5,2) DEFAULT 0 | |
|
||||
| last_used_at | datetime(3) | |
|
||||
|
||||
### qt_competitors
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| name | varchar(255) NOT NULL | |
|
||||
| code | varchar(100) UNIQUE NOT NULL | |
|
||||
| delivery_basis | varchar(50) DEFAULT 'DDP' | |
|
||||
| currency | varchar(10) DEFAULT 'USD' | |
|
||||
| column_mapping | longtext JSON | |
|
||||
| is_active | tinyint(1) DEFAULT 1 | |
|
||||
| created_at | timestamp | |
|
||||
| updated_at | timestamp ON UPDATE | |
|
||||
| price_uplift | decimal(8,4) DEFAULT 1.3 | effective_price = price / price_uplift |
|
||||
|
||||
### qt_configurations
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| uuid | varchar(36) UNIQUE NOT NULL | |
|
||||
| user_id | bigint UNSIGNED | |
|
||||
| owner_username | varchar(100) NOT NULL | |
|
||||
| app_version | varchar(64) | |
|
||||
| project_uuid | char(36) | FK → qt_projects.uuid ON DELETE SET NULL |
|
||||
| name | varchar(200) NOT NULL | |
|
||||
| items | longtext JSON NOT NULL | component list |
|
||||
| total_price | decimal(12,2) | |
|
||||
| notes | text | |
|
||||
| is_template | tinyint(1) DEFAULT 0 | |
|
||||
| created_at | datetime(3) | |
|
||||
| custom_price | decimal(12,2) | |
|
||||
| server_count | bigint DEFAULT 1 | |
|
||||
| server_model | varchar(100) | |
|
||||
| support_code | varchar(20) | |
|
||||
| article | varchar(80) | |
|
||||
| pricelist_id | bigint UNSIGNED | FK → qt_pricelists.id |
|
||||
| warehouse_pricelist_id | bigint UNSIGNED | FK → qt_pricelists.id |
|
||||
| competitor_pricelist_id | bigint UNSIGNED | FK → qt_pricelists.id |
|
||||
| disable_price_refresh | tinyint(1) DEFAULT 0 | |
|
||||
| only_in_stock | tinyint(1) DEFAULT 0 | |
|
||||
| line_no | int | position within project |
|
||||
| price_updated_at | timestamp | |
|
||||
| vendor_spec | longtext JSON | |
|
||||
|
||||
### qt_lot_metadata
|
||||
PK: lot_name
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| lot_name | varchar(255) | |
|
||||
| category_id | bigint UNSIGNED | FK → qt_categories.id |
|
||||
| vendor | varchar(50) | |
|
||||
| model | varchar(100) | |
|
||||
| specs | longtext JSON | |
|
||||
| current_price | decimal(12,2) | cached computed price |
|
||||
| price_method | enum('manual','median','average','weighted_median') DEFAULT 'median' | |
|
||||
| price_period_days | bigint DEFAULT 90 | |
|
||||
| price_updated_at | datetime(3) | |
|
||||
| request_count | bigint DEFAULT 0 | |
|
||||
| last_request_date | date | |
|
||||
| popularity_score | decimal(10,4) DEFAULT 0 | |
|
||||
| price_coefficient | decimal(5,2) DEFAULT 0 | markup % |
|
||||
| manual_price | decimal(12,2) | |
|
||||
| meta_prices | varchar(1000) | raw price samples JSON |
|
||||
| meta_method | varchar(20) | method used for last compute |
|
||||
| meta_period_days | bigint DEFAULT 90 | |
|
||||
| is_hidden | tinyint(1) DEFAULT 0 | |
|
||||
|
||||
### qt_partnumber_books
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| version | varchar(30) UNIQUE NOT NULL | |
|
||||
| created_at | timestamp | |
|
||||
| created_by | varchar(100) | |
|
||||
| is_active | tinyint(1) DEFAULT 0 | only one active at a time |
|
||||
| partnumbers_json | longtext DEFAULT '[]' | flat list of partnumbers |
|
||||
|
||||
### qt_partnumber_book_items
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| partnumber | varchar(255) UNIQUE NOT NULL | |
|
||||
| lots_json | longtext NOT NULL | JSON array of lot_names |
|
||||
| description | varchar(10000) | |
|
||||
|
||||
### qt_pricelists
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| source | varchar(20) DEFAULT 'estimate' | 'estimate' / 'warehouse' / 'competitor' |
|
||||
| version | varchar(20) NOT NULL | UNIQUE with source |
|
||||
| created_at | datetime(3) | |
|
||||
| created_by | varchar(100) | |
|
||||
| is_active | tinyint(1) DEFAULT 1 | |
|
||||
| usage_count | bigint DEFAULT 0 | |
|
||||
| expires_at | datetime(3) | |
|
||||
| notification | varchar(500) | shown to clients on sync |
|
||||
|
||||
### qt_pricelist_items
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| pricelist_id | bigint UNSIGNED NOT NULL | FK → qt_pricelists.id |
|
||||
| lot_name | varchar(255) NOT NULL | INDEX with pricelist_id |
|
||||
| lot_category | varchar(50) | |
|
||||
| price | decimal(12,2) NOT NULL | |
|
||||
| price_method | varchar(20) | |
|
||||
| price_period_days | bigint DEFAULT 90 | |
|
||||
| price_coefficient | decimal(5,2) DEFAULT 0 | |
|
||||
| manual_price | decimal(12,2) | |
|
||||
| meta_prices | varchar(1000) | |
|
||||
|
||||
### qt_pricelist_sync_status
|
||||
PK: username
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| username | varchar(100) | |
|
||||
| last_sync_at | datetime NOT NULL | |
|
||||
| updated_at | datetime NOT NULL | |
|
||||
| app_version | varchar(64) | |
|
||||
|
||||
### qt_pricing_alerts
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| lot_name | varchar(255) NOT NULL | |
|
||||
| alert_type | enum('high_demand_stale_price','price_spike','price_drop','no_recent_quotes','trending_no_price') | |
|
||||
| severity | enum('low','medium','high','critical') DEFAULT 'medium' | |
|
||||
| message | text NOT NULL | |
|
||||
| details | longtext JSON | |
|
||||
| status | enum('new','acknowledged','resolved','ignored') DEFAULT 'new' | |
|
||||
| created_at | datetime(3) | |
|
||||
|
||||
### qt_projects
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| uuid | char(36) UNIQUE NOT NULL | |
|
||||
| owner_username | varchar(100) NOT NULL | |
|
||||
| code | varchar(100) NOT NULL | UNIQUE with variant |
|
||||
| variant | varchar(100) DEFAULT '' | UNIQUE with code |
|
||||
| name | varchar(200) | |
|
||||
| tracker_url | varchar(500) | |
|
||||
| is_active | tinyint(1) DEFAULT 1 | |
|
||||
| is_system | tinyint(1) DEFAULT 0 | |
|
||||
| created_at | timestamp | |
|
||||
| updated_at | timestamp ON UPDATE | |
|
||||
|
||||
### qt_schema_migrations
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| filename | varchar(255) UNIQUE NOT NULL | |
|
||||
| applied_at | datetime(3) | |
|
||||
|
||||
### qt_scheduler_runs
|
||||
PK: job_name
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| job_name | varchar(100) | |
|
||||
| last_started_at | datetime | |
|
||||
| last_finished_at | datetime | |
|
||||
| last_status | varchar(20) DEFAULT 'idle' | |
|
||||
| last_error | text | |
|
||||
| updated_at | timestamp ON UPDATE | |
|
||||
|
||||
### qt_vendor_partnumber_seen
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| source_type | varchar(32) NOT NULL | |
|
||||
| vendor | varchar(255) DEFAULT '' | |
|
||||
| partnumber | varchar(255) UNIQUE NOT NULL | |
|
||||
| description | varchar(10000) | |
|
||||
| last_seen_at | datetime(3) NOT NULL | |
|
||||
| is_ignored | tinyint(1) DEFAULT 0 | |
|
||||
| is_pattern | tinyint(1) DEFAULT 0 | |
|
||||
| ignored_at | datetime(3) | |
|
||||
| ignored_by | varchar(100) | |
|
||||
| created_at | datetime(3) | |
|
||||
| updated_at | datetime(3) | |
|
||||
|
||||
### stock_ignore_rules
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| target | varchar(20) NOT NULL | UNIQUE with match_type+pattern |
|
||||
| match_type | varchar(20) NOT NULL | |
|
||||
| pattern | varchar(500) NOT NULL | |
|
||||
| created_at | timestamp | |
|
||||
|
||||
### stock_log
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| stock_log_id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| partnumber | varchar(255) NOT NULL | INDEX with date |
|
||||
| supplier | varchar(255) | |
|
||||
| date | date NOT NULL | |
|
||||
| price | decimal(12,2) NOT NULL | |
|
||||
| quality | varchar(255) | |
|
||||
| comments | text | |
|
||||
| vendor | varchar(255) | INDEX |
|
||||
| qty | decimal(14,3) | |
|
||||
|
||||
### partnumber_log_competitors
|
||||
| Column | Type | Notes |
|
||||
|--------|------|-------|
|
||||
| id | bigint UNSIGNED PK AUTO_INCREMENT | |
|
||||
| competitor_id | bigint UNSIGNED NOT NULL | FK → qt_competitors.id |
|
||||
| partnumber | varchar(255) NOT NULL | |
|
||||
| description | varchar(500) | |
|
||||
| vendor | varchar(255) | |
|
||||
| price | decimal(12,2) NOT NULL | |
|
||||
| price_loccur | decimal(12,2) | local currency price |
|
||||
| currency | varchar(10) | |
|
||||
| qty | decimal(12,4) DEFAULT 1 | |
|
||||
| date | date NOT NULL | |
|
||||
| created_at | timestamp | |
|
||||
|
||||
### Legacy tables (lot / lot_log / machine / machine_log / supplier)
|
||||
|
||||
Retained for historical data only. Not queried by QuoteForge.
|
||||
|
||||
**lot**: lot_name (PK, char 255), lot_category, lot_description
|
||||
**lot_log**: lot_log_id AUTO_INCREMENT, lot (FK→lot), supplier (FK→supplier), date, price double, quality, comments
|
||||
**supplier**: supplier_name (PK, char 255), supplier_comment
|
||||
**machine**: machine_name (PK, char 255), machine_description
|
||||
**machine_log**: machine_log_id AUTO_INCREMENT, date, supplier (FK→supplier), country, opty, type, machine (FK→machine), customer_requirement, variant, price_gpl, price_estimate, qty, quality, carepack, lead_time_weeks, prepayment_percent, price_got, Comment
|
||||
|
||||
## MariaDB User Permissions
|
||||
|
||||
The application user needs read-only access to reference tables and read/write access to runtime tables.
|
||||
|
||||
```sql
|
||||
-- Read-only: reference and pricing data
|
||||
GRANT SELECT ON RFQ_LOG.qt_categories TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_lot_metadata TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_pricelists TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_pricelist_items TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.stock_log TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.stock_ignore_rules TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_partnumber_books TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.qt_partnumber_book_items TO 'qfs_user'@'%';
|
||||
GRANT SELECT ON RFQ_LOG.lot TO 'qfs_user'@'%';
|
||||
|
||||
-- Read/write: runtime sync and user data
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON RFQ_LOG.qt_projects TO 'qfs_user'@'%';
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON RFQ_LOG.qt_configurations TO 'qfs_user'@'%';
|
||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_client_schema_state TO 'qfs_user'@'%';
|
||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_pricelist_sync_status TO 'qfs_user'@'%';
|
||||
GRANT SELECT, INSERT, UPDATE ON RFQ_LOG.qt_vendor_partnumber_seen TO 'qfs_user'@'%';
|
||||
|
||||
FLUSH PRIVILEGES;
|
||||
```
|
||||
|
||||
Rules:
|
||||
- `qt_client_schema_state` requires INSERT + UPDATE for sync status tracking (uses `ON DUPLICATE KEY UPDATE`);
|
||||
- `qt_vendor_partnumber_seen` requires INSERT + UPDATE (vendor PN discovery during sync);
|
||||
- no DELETE is needed on sync/tracking tables — rows are never removed by the client;
|
||||
- `lot` SELECT is required for the connection validation probe in `/setup`;
|
||||
- the setup page shows `can_write: true` only when `qt_client_schema_state` INSERT succeeds.
|
||||
|
||||
## Migrations
|
||||
|
||||
SQLite:
|
||||
- schema creation and additive changes go through GORM `AutoMigrate`;
|
||||
- data fixes, index repair, and one-off rewrites go through `runLocalMigrations`;
|
||||
- local migration state is tracked in `local_schema_migrations`.
|
||||
|
||||
MariaDB:
|
||||
- SQL files live in `migrations/`;
|
||||
- they are applied by `go run ./cmd/qfs -migrate`.
|
||||
@@ -1,125 +0,0 @@
|
||||
# 04 - API
|
||||
|
||||
## Public web routes
|
||||
|
||||
| Route | Purpose |
|
||||
| --- | --- |
|
||||
| `/` | configurator |
|
||||
| `/configs` | configuration list |
|
||||
| `/configs/:uuid/revisions` | revision history page |
|
||||
| `/projects` | project list |
|
||||
| `/projects/:uuid` | project detail |
|
||||
| `/pricelists` | pricelist list |
|
||||
| `/pricelists/:id` | pricelist detail |
|
||||
| `/partnumber-books` | partnumber book page |
|
||||
| `/setup` | DB setup page |
|
||||
|
||||
## Setup and health
|
||||
|
||||
| Method | Path | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `GET` | `/health` | process health |
|
||||
| `GET` | `/setup` | setup page |
|
||||
| `POST` | `/setup` | save tested DB settings |
|
||||
| `POST` | `/setup/test` | test DB connection |
|
||||
| `GET` | `/setup/status` | setup status |
|
||||
| `GET` | `/api/db-status` | current DB/sync status |
|
||||
| `GET` | `/api/current-user` | local user identity |
|
||||
| `GET` | `/api/ping` | lightweight API ping |
|
||||
|
||||
`POST /api/restart` exists only in `debug` mode.
|
||||
|
||||
## Reference data
|
||||
|
||||
| Method | Path | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `GET` | `/api/components` | list component metadata |
|
||||
| `GET` | `/api/components/:lot_name` | one component |
|
||||
| `GET` | `/api/categories` | list categories |
|
||||
| `GET` | `/api/pricelists` | list local pricelists |
|
||||
| `GET` | `/api/pricelists/latest` | latest pricelist by source |
|
||||
| `GET` | `/api/pricelists/:id` | pricelist header |
|
||||
| `GET` | `/api/pricelists/:id/items` | pricelist rows |
|
||||
| `GET` | `/api/pricelists/:id/lots` | lot names in a pricelist |
|
||||
| `GET` | `/api/partnumber-books` | local partnumber books |
|
||||
| `GET` | `/api/partnumber-books/:id` | book items by `server_id` |
|
||||
|
||||
## Quote and export
|
||||
|
||||
| Method | Path | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `POST` | `/api/quote/validate` | validate config items |
|
||||
| `POST` | `/api/quote/calculate` | calculate quote totals |
|
||||
| `POST` | `/api/quote/price-levels` | resolve estimate/warehouse/competitor prices |
|
||||
| `POST` | `/api/export/csv` | export a single configuration |
|
||||
| `GET` | `/api/configs/:uuid/export` | export a stored configuration |
|
||||
| `GET` | `/api/projects/:uuid/export` | legacy project BOM export |
|
||||
| `POST` | `/api/projects/:uuid/export` | pricing-tab project export |
|
||||
|
||||
## Configurations
|
||||
|
||||
| Method | Path | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `GET` | `/api/configs` | list configurations |
|
||||
| `POST` | `/api/configs/import` | import configurations from server |
|
||||
| `POST` | `/api/configs` | create configuration |
|
||||
| `POST` | `/api/configs/preview-article` | preview article |
|
||||
| `GET` | `/api/configs/:uuid` | get configuration |
|
||||
| `PUT` | `/api/configs/:uuid` | update configuration |
|
||||
| `DELETE` | `/api/configs/:uuid` | archive configuration |
|
||||
| `POST` | `/api/configs/:uuid/reactivate` | reactivate configuration |
|
||||
| `PATCH` | `/api/configs/:uuid/rename` | rename configuration |
|
||||
| `POST` | `/api/configs/:uuid/clone` | clone configuration |
|
||||
| `POST` | `/api/configs/:uuid/refresh-prices` | refresh prices |
|
||||
| `PATCH` | `/api/configs/:uuid/project` | move configuration to project |
|
||||
| `GET` | `/api/configs/:uuid/versions` | list revisions |
|
||||
| `GET` | `/api/configs/:uuid/versions/:version` | get one revision |
|
||||
| `POST` | `/api/configs/:uuid/rollback` | rollback by creating a new head revision |
|
||||
| `PATCH` | `/api/configs/:uuid/server-count` | update server count |
|
||||
| `GET` | `/api/configs/:uuid/vendor-spec` | read vendor BOM |
|
||||
| `PUT` | `/api/configs/:uuid/vendor-spec` | replace vendor BOM |
|
||||
| `POST` | `/api/configs/:uuid/vendor-spec/resolve` | resolve PN -> LOT |
|
||||
| `POST` | `/api/configs/:uuid/vendor-spec/apply` | apply BOM to cart |
|
||||
|
||||
## Projects
|
||||
|
||||
| Method | Path | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `GET` | `/api/projects` | paginated project list |
|
||||
| `GET` | `/api/projects/all` | lightweight list for dropdowns |
|
||||
| `POST` | `/api/projects` | create project |
|
||||
| `GET` | `/api/projects/:uuid` | get project |
|
||||
| `PUT` | `/api/projects/:uuid` | update project |
|
||||
| `POST` | `/api/projects/:uuid/archive` | archive project |
|
||||
| `POST` | `/api/projects/:uuid/reactivate` | reactivate project |
|
||||
| `DELETE` | `/api/projects/:uuid` | delete project variant only |
|
||||
| `GET` | `/api/projects/:uuid/configs` | list project configurations |
|
||||
| `PATCH` | `/api/projects/:uuid/configs/reorder` | persist line order |
|
||||
| `POST` | `/api/projects/:uuid/configs` | create configuration inside project |
|
||||
| `POST` | `/api/projects/:uuid/configs/:config_uuid/clone` | clone config into project |
|
||||
| `POST` | `/api/projects/:uuid/vendor-import` | import CFXML workspace into project |
|
||||
|
||||
Vendor import contract:
|
||||
- multipart field name is `file`;
|
||||
- file limit is `1 GiB`;
|
||||
- oversized payloads are rejected before XML parsing.
|
||||
|
||||
## Sync
|
||||
|
||||
| Method | Path | Purpose |
|
||||
| --- | --- | --- |
|
||||
| `GET` | `/api/sync/status` | sync status |
|
||||
| `GET` | `/api/sync/readiness` | sync readiness |
|
||||
| `GET` | `/api/sync/info` | sync modal data |
|
||||
| `GET` | `/api/sync/users-status` | remote user status |
|
||||
| `GET` | `/api/sync/pending/count` | pending queue count |
|
||||
| `GET` | `/api/sync/pending` | pending queue rows |
|
||||
| `POST` | `/api/sync/components` | pull components |
|
||||
| `POST` | `/api/sync/pricelists` | pull pricelists |
|
||||
| `POST` | `/api/sync/partnumber-books` | pull partnumber books |
|
||||
| `POST` | `/api/sync/partnumber-seen` | report unresolved vendor PN |
|
||||
| `POST` | `/api/sync/all` | push and pull full sync |
|
||||
| `POST` | `/api/sync/push` | push pending changes |
|
||||
| `POST` | `/api/sync/repair` | repair broken pending rows |
|
||||
|
||||
When readiness is blocked, sync write endpoints return `423 Locked`.
|
||||
@@ -1,74 +0,0 @@
|
||||
# 05 - Config
|
||||
|
||||
## Runtime files
|
||||
|
||||
| Artifact | Default location |
|
||||
| --- | --- |
|
||||
| `qfs.db` | OS-specific user state directory |
|
||||
| `config.yaml` | same state directory as `qfs.db` |
|
||||
| `local_encryption.key` | same state directory as `qfs.db` |
|
||||
| `backups/` | next to `qfs.db` unless overridden |
|
||||
|
||||
The runtime state directory can be overridden with `QFS_STATE_DIR`.
|
||||
Direct paths can be overridden with `QFS_DB_PATH` and `QFS_CONFIG_PATH`.
|
||||
|
||||
## Runtime config shape
|
||||
|
||||
Runtime keeps `config.yaml` intentionally small:
|
||||
|
||||
```yaml
|
||||
server:
|
||||
host: "127.0.0.1"
|
||||
port: 8080
|
||||
mode: "release"
|
||||
read_timeout: 30s
|
||||
write_timeout: 30s
|
||||
|
||||
backup:
|
||||
time: "00:00"
|
||||
|
||||
logging:
|
||||
level: "info"
|
||||
format: "json"
|
||||
output: "stdout"
|
||||
```
|
||||
|
||||
Rules:
|
||||
- QuoteForge creates this file automatically if it does not exist;
|
||||
- startup rewrites legacy config files into this minimal runtime shape;
|
||||
- startup normalizes any `server.host` value to `127.0.0.1` before saving the runtime config;
|
||||
- `server.host` must stay on loopback.
|
||||
|
||||
Saved MariaDB credentials do not live in `config.yaml`.
|
||||
They are stored in SQLite and encrypted with `local_encryption.key` unless `QUOTEFORGE_ENCRYPTION_KEY` overrides the key material.
|
||||
|
||||
## Environment variables
|
||||
|
||||
| Variable | Purpose |
|
||||
| --- | --- |
|
||||
| `QFS_STATE_DIR` | override runtime state directory |
|
||||
| `QFS_DB_PATH` | explicit SQLite path |
|
||||
| `QFS_CONFIG_PATH` | explicit config path |
|
||||
| `QFS_BACKUP_DIR` | explicit backup root |
|
||||
| `QFS_BACKUP_DISABLE` | disable rotating backups |
|
||||
| `QUOTEFORGE_ENCRYPTION_KEY` | override encryption key |
|
||||
| `QF_SERVER_PORT` | override HTTP port |
|
||||
|
||||
`QFS_BACKUP_DISABLE` accepts `1`, `true`, or `yes`.
|
||||
|
||||
## CLI flags
|
||||
|
||||
| Flag | Purpose |
|
||||
| --- | --- |
|
||||
| `-config <path>` | config file path |
|
||||
| `-localdb <path>` | SQLite path |
|
||||
| `-reset-localdb` | destructive local DB reset |
|
||||
| `-migrate` | apply server migrations and exit |
|
||||
| `-version` | print app version and exit |
|
||||
|
||||
## First run
|
||||
|
||||
1. runtime ensures `config.yaml` exists;
|
||||
2. runtime opens the local SQLite database;
|
||||
3. if no stored MariaDB credentials exist, `/setup` is served;
|
||||
4. after setup, runtime works locally and sync uses saved DB settings in the background.
|
||||
@@ -1,55 +0,0 @@
|
||||
# 06 - Backup
|
||||
|
||||
## Scope
|
||||
|
||||
QuoteForge creates rotating local ZIP backups of:
|
||||
- a consistent SQLite snapshot saved as `qfs.db`;
|
||||
- `config.yaml` when present.
|
||||
|
||||
The backup intentionally does not include `local_encryption.key`.
|
||||
|
||||
## Location and naming
|
||||
|
||||
Default root:
|
||||
- `<db dir>/backups`
|
||||
|
||||
Subdirectories:
|
||||
- `daily/`
|
||||
- `weekly/`
|
||||
- `monthly/`
|
||||
- `yearly/`
|
||||
|
||||
Archive name:
|
||||
- `qfs-backp-YYYY-MM-DD.zip`
|
||||
|
||||
## Retention
|
||||
|
||||
| Period | Keep |
|
||||
| --- | --- |
|
||||
| Daily | 7 |
|
||||
| Weekly | 4 |
|
||||
| Monthly | 12 |
|
||||
| Yearly | 10 |
|
||||
|
||||
## Behavior
|
||||
|
||||
- on startup, QuoteForge creates a backup if the current period has none yet;
|
||||
- a daily scheduler creates the next backup at `backup.time`;
|
||||
- duplicate snapshots inside the same period are prevented by a period marker file;
|
||||
- old archives are pruned automatically.
|
||||
|
||||
## Safety rules
|
||||
|
||||
- backup root must be outside the git worktree;
|
||||
- backup creation is blocked if the resolved backup root sits inside the repository;
|
||||
- SQLite snapshot must be created from a consistent database copy, not by copying live WAL files directly;
|
||||
- restore to another machine requires re-entering DB credentials unless the encryption key is migrated separately.
|
||||
|
||||
## Restore
|
||||
|
||||
1. stop QuoteForge;
|
||||
2. unpack the chosen archive outside the repository;
|
||||
3. replace `qfs.db`;
|
||||
4. replace `config.yaml` if needed;
|
||||
5. restart the app;
|
||||
6. re-enter MariaDB credentials if the original encryption key is unavailable.
|
||||
@@ -1,35 +0,0 @@
|
||||
# 07 - Development
|
||||
|
||||
## Common commands
|
||||
|
||||
```bash
|
||||
go run ./cmd/qfs
|
||||
go run ./cmd/qfs -migrate
|
||||
go run ./cmd/migrate_project_updated_at
|
||||
go test ./...
|
||||
go vet ./...
|
||||
make build-release
|
||||
make install-hooks
|
||||
```
|
||||
|
||||
## Guardrails
|
||||
|
||||
- run `gofmt` before commit;
|
||||
- use `slog` for server logging;
|
||||
- keep runtime business logic SQLite-only;
|
||||
- limit MariaDB access to sync, setup, and migration tooling;
|
||||
- keep `config.yaml` out of git and use `config.example.yaml` only as a template;
|
||||
- update `bible-local/` in the same commit as architecture changes.
|
||||
|
||||
## Removed features that must not return
|
||||
|
||||
- admin pricing UI/API;
|
||||
- alerts and notification workflows;
|
||||
- stock import tooling;
|
||||
- cron jobs;
|
||||
- standalone importer utility.
|
||||
|
||||
## Release notes
|
||||
|
||||
Release history belongs under `releases/<version>/RELEASE_NOTES.md`.
|
||||
Do not keep temporary change summaries in the repository root.
|
||||
@@ -1,64 +0,0 @@
|
||||
# 09 - Vendor BOM
|
||||
|
||||
## Storage contract
|
||||
|
||||
Vendor BOM is stored in `local_configurations.vendor_spec` and synced with `qt_configurations.vendor_spec`.
|
||||
|
||||
Each row uses this canonical shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"sort_order": 10,
|
||||
"vendor_partnumber": "ABC-123",
|
||||
"quantity": 2,
|
||||
"description": "row description",
|
||||
"unit_price": 4500.0,
|
||||
"total_price": 9000.0,
|
||||
"lot_mappings": [
|
||||
{ "lot_name": "LOT_A", "quantity_per_pn": 1 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Rules:
|
||||
- `lot_mappings[]` is the only persisted PN -> LOT mapping contract;
|
||||
- QuoteForge does not use legacy BOM tables;
|
||||
- apply flow rebuilds cart rows from `lot_mappings[]`.
|
||||
|
||||
## Partnumber books
|
||||
|
||||
Partnumber books are pull-only snapshots from PriceForge.
|
||||
|
||||
Local tables:
|
||||
- `local_partnumber_books`
|
||||
- `local_partnumber_book_items`
|
||||
|
||||
Server tables:
|
||||
- `qt_partnumber_books`
|
||||
- `qt_partnumber_book_items`
|
||||
|
||||
Resolution flow:
|
||||
1. load the active local book;
|
||||
2. find `vendor_partnumber`;
|
||||
3. copy `lots_json` into `lot_mappings[]`;
|
||||
4. keep unresolved rows editable in the UI.
|
||||
|
||||
## CFXML import
|
||||
|
||||
`POST /api/projects/:uuid/vendor-import` imports one vendor workspace into an existing project.
|
||||
|
||||
Rules:
|
||||
- accepted file field is `file`;
|
||||
- maximum file size is `1 GiB`;
|
||||
- one `ProprietaryGroupIdentifier` becomes one QuoteForge configuration;
|
||||
- software rows stay inside their hardware group and never become standalone configurations;
|
||||
- primary group row is selected structurally, without vendor-specific SKU hardcoding;
|
||||
- imported configuration order follows workspace order.
|
||||
|
||||
Imported configuration fields:
|
||||
- `name` from primary row `ProductName`
|
||||
- `server_count` from primary row `Quantity`
|
||||
- `server_model` from primary row `ProductDescription`
|
||||
- `article` or `support_code` from `ProprietaryProductIdentifier`
|
||||
|
||||
Imported BOM rows become `vendor_spec` rows and are resolved through the active local partnumber book when possible.
|
||||
@@ -1,30 +0,0 @@
|
||||
# QuoteForge Bible
|
||||
|
||||
Project-specific architecture and operational contracts.
|
||||
|
||||
## Files
|
||||
|
||||
| File | Scope |
|
||||
| --- | --- |
|
||||
| [01-overview.md](01-overview.md) | Product scope, runtime model, repository map |
|
||||
| [02-architecture.md](02-architecture.md) | Local-first rules, sync, pricing, versioning |
|
||||
| [03-database.md](03-database.md) | SQLite and MariaDB data model, permissions, migrations |
|
||||
| [04-api.md](04-api.md) | HTTP routes and API contract |
|
||||
| [05-config.md](05-config.md) | Runtime config, paths, env vars, startup behavior |
|
||||
| [06-backup.md](06-backup.md) | Backup contract and restore workflow |
|
||||
| [07-dev.md](07-dev.md) | Development commands and guardrails |
|
||||
| [09-vendor-spec.md](09-vendor-spec.md) | Vendor BOM and CFXML import contract |
|
||||
|
||||
## Rules
|
||||
|
||||
- `bible-local/` is the source of truth for QuoteForge-specific behavior.
|
||||
- Keep these files in English.
|
||||
- Update the matching file in the same commit as any architectural change.
|
||||
- Remove stale documentation instead of preserving history in place.
|
||||
|
||||
## Quick reference
|
||||
|
||||
- Local DB path: see [05-config.md](05-config.md)
|
||||
- Runtime bind: loopback only
|
||||
- Local backups: see [06-backup.md](06-backup.md)
|
||||
- Release notes: `releases/<version>/RELEASE_NOTES.md`
|
||||
84
cmd/cron/main.go
Normal file
84
cmd/cron/main.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services/alerts"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services/pricing"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
func main() {
|
||||
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||
cronJob := flag.String("job", "", "type of cron job to run (alerts, update-prices)")
|
||||
flag.Parse()
|
||||
|
||||
cfg, err := config.Load(*configPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load config: %v", err)
|
||||
}
|
||||
|
||||
db, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to database: %v", err)
|
||||
}
|
||||
|
||||
// Ensure tables exist
|
||||
if err := models.Migrate(db); err != nil {
|
||||
log.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Initialize repositories
|
||||
statsRepo := repository.NewStatsRepository(db)
|
||||
alertRepo := repository.NewAlertRepository(db)
|
||||
componentRepo := repository.NewComponentRepository(db)
|
||||
priceRepo := repository.NewPriceRepository(db)
|
||||
|
||||
// Initialize services
|
||||
alertService := alerts.NewService(alertRepo, componentRepo, priceRepo, statsRepo, cfg.Alerts, cfg.Pricing)
|
||||
pricingService := pricing.NewService(componentRepo, priceRepo, cfg.Pricing)
|
||||
|
||||
switch *cronJob {
|
||||
case "alerts":
|
||||
log.Println("Running alerts check...")
|
||||
if err := alertService.CheckAndGenerateAlerts(); err != nil {
|
||||
log.Printf("Error running alerts check: %v", err)
|
||||
} else {
|
||||
log.Println("Alerts check completed successfully")
|
||||
}
|
||||
case "update-prices":
|
||||
log.Println("Recalculating all prices...")
|
||||
updated, errors := pricingService.RecalculateAllPrices()
|
||||
log.Printf("Prices recalculated: %d updated, %d errors", updated, errors)
|
||||
case "reset-counters":
|
||||
log.Println("Resetting usage counters...")
|
||||
if err := statsRepo.ResetWeeklyCounters(); err != nil {
|
||||
log.Printf("Error resetting weekly counters: %v", err)
|
||||
}
|
||||
if err := statsRepo.ResetMonthlyCounters(); err != nil {
|
||||
log.Printf("Error resetting monthly counters: %v", err)
|
||||
}
|
||||
log.Println("Usage counters reset completed")
|
||||
case "update-popularity":
|
||||
log.Println("Updating popularity scores...")
|
||||
if err := statsRepo.UpdatePopularityScores(); err != nil {
|
||||
log.Printf("Error updating popularity scores: %v", err)
|
||||
} else {
|
||||
log.Println("Popularity scores updated successfully")
|
||||
}
|
||||
default:
|
||||
log.Println("No valid cron job specified. Available jobs:")
|
||||
log.Println(" - alerts: Check and generate alerts")
|
||||
log.Println(" - update-prices: Recalculate all prices")
|
||||
log.Println(" - reset-counters: Reset usage counters")
|
||||
log.Println(" - update-popularity: Update popularity scores")
|
||||
}
|
||||
}
|
||||
160
cmd/importer/main.go
Normal file
160
cmd/importer/main.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
func main() {
|
||||
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||
flag.Parse()
|
||||
|
||||
cfg, err := config.Load(*configPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load config: %v", err)
|
||||
}
|
||||
|
||||
db, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to database: %v", err)
|
||||
}
|
||||
|
||||
log.Println("Connected to database")
|
||||
|
||||
// Ensure tables exist
|
||||
if err := models.Migrate(db); err != nil {
|
||||
log.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
if err := models.SeedCategories(db); err != nil {
|
||||
log.Fatalf("Seeding categories failed: %v", err)
|
||||
}
|
||||
|
||||
// Load categories for lookup
|
||||
var categories []models.Category
|
||||
db.Find(&categories)
|
||||
categoryMap := make(map[string]uint)
|
||||
for _, c := range categories {
|
||||
categoryMap[c.Code] = c.ID
|
||||
}
|
||||
log.Printf("Loaded %d categories", len(categories))
|
||||
|
||||
// Get all lots
|
||||
var lots []models.Lot
|
||||
if err := db.Find(&lots).Error; err != nil {
|
||||
log.Fatalf("Failed to load lots: %v", err)
|
||||
}
|
||||
log.Printf("Found %d lots to import", len(lots))
|
||||
|
||||
// Import each lot
|
||||
var imported, skipped, updated int
|
||||
for _, lot := range lots {
|
||||
category, model := ParsePartNumber(lot.LotName)
|
||||
|
||||
var categoryID *uint
|
||||
if id, ok := categoryMap[category]; ok && id > 0 {
|
||||
categoryID = &id
|
||||
} else {
|
||||
// Try to find by prefix match
|
||||
for code, id := range categoryMap {
|
||||
if strings.HasPrefix(category, code) {
|
||||
categoryID = &id
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if already exists
|
||||
var existing models.LotMetadata
|
||||
result := db.Where("lot_name = ?", lot.LotName).First(&existing)
|
||||
|
||||
if result.Error == gorm.ErrRecordNotFound {
|
||||
// Check if there are prices in the last 90 days
|
||||
var recentPriceCount int64
|
||||
db.Model(&models.LotLog{}).
|
||||
Where("lot = ? AND date >= DATE_SUB(NOW(), INTERVAL 90 DAY)", lot.LotName).
|
||||
Count(&recentPriceCount)
|
||||
|
||||
// Default to 90 days, but use "all time" (0) if no recent prices
|
||||
periodDays := 90
|
||||
if recentPriceCount == 0 {
|
||||
periodDays = 0
|
||||
}
|
||||
|
||||
// Create new
|
||||
metadata := models.LotMetadata{
|
||||
LotName: lot.LotName,
|
||||
CategoryID: categoryID,
|
||||
Model: model,
|
||||
PricePeriodDays: periodDays,
|
||||
}
|
||||
if err := db.Create(&metadata).Error; err != nil {
|
||||
log.Printf("Failed to create metadata for %s: %v", lot.LotName, err)
|
||||
continue
|
||||
}
|
||||
imported++
|
||||
} else if result.Error == nil {
|
||||
// Update if needed
|
||||
needsUpdate := false
|
||||
|
||||
if existing.Model == "" {
|
||||
existing.Model = model
|
||||
needsUpdate = true
|
||||
}
|
||||
if existing.CategoryID == nil {
|
||||
existing.CategoryID = categoryID
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
// Check if using default period (90 days) but no recent prices
|
||||
if existing.PricePeriodDays == 90 {
|
||||
var recentPriceCount int64
|
||||
db.Model(&models.LotLog{}).
|
||||
Where("lot = ? AND date >= DATE_SUB(NOW(), INTERVAL 90 DAY)", lot.LotName).
|
||||
Count(&recentPriceCount)
|
||||
|
||||
if recentPriceCount == 0 {
|
||||
existing.PricePeriodDays = 0
|
||||
needsUpdate = true
|
||||
}
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
db.Save(&existing)
|
||||
updated++
|
||||
} else {
|
||||
skipped++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Import complete: %d imported, %d updated, %d skipped", imported, updated, skipped)
|
||||
|
||||
// Show final counts
|
||||
var metadataCount int64
|
||||
db.Model(&models.LotMetadata{}).Count(&metadataCount)
|
||||
log.Printf("Total metadata records: %d", metadataCount)
|
||||
}
|
||||
|
||||
// ParsePartNumber extracts category and model from lot_name
|
||||
// Examples:
|
||||
// "CPU_AMD_9654" → category="CPU", model="AMD_9654"
|
||||
// "MB_INTEL_4.Sapphire_2S" → category="MB", model="INTEL_4.Sapphire_2S"
|
||||
func ParsePartNumber(lotName string) (category, model string) {
|
||||
parts := strings.SplitN(lotName, "_", 2)
|
||||
if len(parts) >= 1 {
|
||||
category = parts[0]
|
||||
}
|
||||
if len(parts) >= 2 {
|
||||
model = parts[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"gorm.io/driver/mysql"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to resolve default local SQLite path: %v", err)
|
||||
@@ -26,6 +28,22 @@ func main() {
|
||||
log.Println("QuoteForge Configuration Migration Tool")
|
||||
log.Println("========================================")
|
||||
|
||||
// Load config for MariaDB connection
|
||||
cfg, err := config.Load(*configPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load config: %v", err)
|
||||
}
|
||||
|
||||
// Connect to MariaDB
|
||||
log.Printf("Connecting to MariaDB at %s:%d...", cfg.Database.Host, cfg.Database.Port)
|
||||
mariaDB, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to MariaDB: %v", err)
|
||||
}
|
||||
log.Println("Connected to MariaDB")
|
||||
|
||||
// Initialize local SQLite
|
||||
log.Printf("Opening local SQLite at %s...", *localDBPath)
|
||||
local, err := localdb.New(*localDBPath)
|
||||
@@ -33,28 +51,6 @@ func main() {
|
||||
log.Fatalf("Failed to initialize local database: %v", err)
|
||||
}
|
||||
log.Println("Local SQLite initialized")
|
||||
if !local.HasSettings() {
|
||||
log.Fatalf("SQLite connection settings are not configured. Run qfs setup first.")
|
||||
}
|
||||
|
||||
settings, err := local.GetSettings()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load SQLite connection settings: %v", err)
|
||||
}
|
||||
dsn, err := local.GetDSN()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to build DSN from SQLite settings: %v", err)
|
||||
}
|
||||
|
||||
// Connect to MariaDB
|
||||
log.Printf("Connecting to MariaDB at %s:%d...", settings.Host, settings.Port)
|
||||
mariaDB, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to MariaDB: %v", err)
|
||||
}
|
||||
log.Println("Connected to MariaDB")
|
||||
|
||||
// Count configurations in MariaDB
|
||||
var serverCount int64
|
||||
@@ -153,7 +149,23 @@ func main() {
|
||||
log.Printf(" Skipped: %d", skipped)
|
||||
log.Printf(" Errors: %d", errors)
|
||||
|
||||
fmt.Println("\nDone! You can now run the server with: go run ./cmd/qfs")
|
||||
// Save connection settings to local SQLite if not exists
|
||||
if !local.HasSettings() {
|
||||
log.Println("\nSaving connection settings to local SQLite...")
|
||||
if err := local.SaveSettings(
|
||||
cfg.Database.Host,
|
||||
cfg.Database.Port,
|
||||
cfg.Database.Name,
|
||||
cfg.Database.User,
|
||||
cfg.Database.Password,
|
||||
); err != nil {
|
||||
log.Printf("Warning: Failed to save settings: %v", err)
|
||||
} else {
|
||||
log.Println("Connection settings saved")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\nDone! You can now run the server with: go run ./cmd/server")
|
||||
}
|
||||
|
||||
func derefUint(v *uint) uint {
|
||||
|
||||
@@ -10,8 +10,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/driver/mysql"
|
||||
@@ -39,29 +38,17 @@ type migrationAction struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to resolve default local SQLite path: %v", err)
|
||||
}
|
||||
localDBPath := flag.String("localdb", defaultLocalDBPath, "path to local SQLite database (default: user state dir or QFS_DB_PATH)")
|
||||
configPath := flag.String("config", "config.yaml", "path to config file")
|
||||
apply := flag.Bool("apply", false, "apply migration (default is preview only)")
|
||||
yes := flag.Bool("yes", false, "skip interactive confirmation (works only with -apply)")
|
||||
flag.Parse()
|
||||
|
||||
local, err := localdb.New(*localDBPath)
|
||||
cfg, err := config.Load(*configPath)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize local database: %v", err)
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
if !local.HasSettings() {
|
||||
log.Fatalf("SQLite connection settings are not configured. Run qfs setup first.")
|
||||
}
|
||||
dsn, err := local.GetDSN()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build DSN from SQLite settings: %v", err)
|
||||
}
|
||||
dbUser := strings.TrimSpace(local.GetDBUser())
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
db, err := gorm.Open(mysql.Open(cfg.Database.DSN()), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -72,7 +59,7 @@ func main() {
|
||||
log.Fatalf("precheck failed: %v", err)
|
||||
}
|
||||
|
||||
actions, existingProjects, err := buildPlan(db, dbUser)
|
||||
actions, existingProjects, err := buildPlan(db, cfg.Database.User)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build migration plan: %v", err)
|
||||
}
|
||||
@@ -163,7 +150,7 @@ func buildPlan(db *gorm.DB, fallbackOwner string) ([]migrationAction, map[string
|
||||
}
|
||||
for i := range projects {
|
||||
p := projects[i]
|
||||
existingProjects[projectKey(p.OwnerUsername, derefString(p.Name))] = &p
|
||||
existingProjects[projectKey(p.OwnerUsername, p.Name)] = &p
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,13 +240,12 @@ func executePlan(db *gorm.DB, actions []migrationAction, existingProjects map[st
|
||||
|
||||
for _, action := range actions {
|
||||
key := projectKey(action.OwnerUsername, action.TargetProjectName)
|
||||
project := projectCache[key]
|
||||
project := projectCache[key]
|
||||
if project == nil {
|
||||
project = &models.Project{
|
||||
UUID: uuid.NewString(),
|
||||
OwnerUsername: action.OwnerUsername,
|
||||
Code: action.TargetProjectName,
|
||||
Name: ptrString(action.TargetProjectName),
|
||||
Name: action.TargetProjectName,
|
||||
IsActive: true,
|
||||
IsSystem: false,
|
||||
}
|
||||
@@ -269,7 +255,7 @@ func executePlan(db *gorm.DB, actions []migrationAction, existingProjects map[st
|
||||
projectCache[key] = project
|
||||
} else if !project.IsActive {
|
||||
if err := tx.Model(&models.Project{}).Where("uuid = ?", project.UUID).Update("is_active", true).Error; err != nil {
|
||||
return fmt.Errorf("reactivate project %s (%s): %w", derefString(project.Name), project.UUID, err)
|
||||
return fmt.Errorf("reactivate project %s (%s): %w", project.Name, project.UUID, err)
|
||||
}
|
||||
project.IsActive = true
|
||||
}
|
||||
@@ -295,14 +281,3 @@ func setKeys(set map[string]struct{}) []string {
|
||||
func projectKey(owner, name string) string {
|
||||
return owner + "||" + name
|
||||
}
|
||||
|
||||
func derefString(value *string) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
return *value
|
||||
}
|
||||
|
||||
func ptrString(value string) *string {
|
||||
return &value
|
||||
}
|
||||
|
||||
@@ -1,173 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
type projectTimestampRow struct {
|
||||
UUID string
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
type updatePlanRow struct {
|
||||
UUID string
|
||||
Code string
|
||||
Variant string
|
||||
LocalUpdatedAt time.Time
|
||||
ServerUpdatedAt time.Time
|
||||
}
|
||||
|
||||
func main() {
|
||||
defaultLocalDBPath, err := appstate.ResolveDBPath("")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to resolve default local SQLite path: %v", err)
|
||||
}
|
||||
|
||||
localDBPath := flag.String("localdb", defaultLocalDBPath, "path to local SQLite database (default: user state dir or QFS_DB_PATH)")
|
||||
apply := flag.Bool("apply", false, "apply updates to local SQLite (default is preview only)")
|
||||
flag.Parse()
|
||||
|
||||
local, err := localdb.New(*localDBPath)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize local database: %v", err)
|
||||
}
|
||||
defer local.Close()
|
||||
|
||||
if !local.HasSettings() {
|
||||
log.Fatalf("SQLite connection settings are not configured. Run qfs setup first.")
|
||||
}
|
||||
|
||||
dsn, err := local.GetDSN()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build DSN from SQLite settings: %v", err)
|
||||
}
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect to MariaDB: %v", err)
|
||||
}
|
||||
|
||||
serverRows, err := loadServerProjects(db)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load server projects: %v", err)
|
||||
}
|
||||
|
||||
localProjects, err := local.GetAllProjects(true)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to load local projects: %v", err)
|
||||
}
|
||||
|
||||
plan := buildUpdatePlan(localProjects, serverRows)
|
||||
printPlan(plan, *apply)
|
||||
|
||||
if !*apply || len(plan) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
updated := 0
|
||||
for i := range plan {
|
||||
project, err := local.GetProjectByUUID(plan[i].UUID)
|
||||
if err != nil {
|
||||
log.Printf("skip %s: load local project: %v", plan[i].UUID, err)
|
||||
continue
|
||||
}
|
||||
project.UpdatedAt = plan[i].ServerUpdatedAt
|
||||
if err := local.SaveProjectPreservingUpdatedAt(project); err != nil {
|
||||
log.Printf("skip %s: save local project: %v", plan[i].UUID, err)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
}
|
||||
|
||||
log.Printf("updated %d local project timestamps", updated)
|
||||
}
|
||||
|
||||
func loadServerProjects(db *gorm.DB) (map[string]time.Time, error) {
|
||||
var rows []projectTimestampRow
|
||||
if err := db.Model(&models.Project{}).
|
||||
Select("uuid, updated_at").
|
||||
Find(&rows).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make(map[string]time.Time, len(rows))
|
||||
for _, row := range rows {
|
||||
if row.UUID == "" {
|
||||
continue
|
||||
}
|
||||
out[row.UUID] = row.UpdatedAt
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func buildUpdatePlan(localProjects []localdb.LocalProject, serverRows map[string]time.Time) []updatePlanRow {
|
||||
plan := make([]updatePlanRow, 0)
|
||||
for i := range localProjects {
|
||||
project := localProjects[i]
|
||||
serverUpdatedAt, ok := serverRows[project.UUID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if project.UpdatedAt.Equal(serverUpdatedAt) {
|
||||
continue
|
||||
}
|
||||
plan = append(plan, updatePlanRow{
|
||||
UUID: project.UUID,
|
||||
Code: project.Code,
|
||||
Variant: project.Variant,
|
||||
LocalUpdatedAt: project.UpdatedAt,
|
||||
ServerUpdatedAt: serverUpdatedAt,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(plan, func(i, j int) bool {
|
||||
if plan[i].Code != plan[j].Code {
|
||||
return plan[i].Code < plan[j].Code
|
||||
}
|
||||
return plan[i].Variant < plan[j].Variant
|
||||
})
|
||||
|
||||
return plan
|
||||
}
|
||||
|
||||
func printPlan(plan []updatePlanRow, apply bool) {
|
||||
mode := "preview"
|
||||
if apply {
|
||||
mode = "apply"
|
||||
}
|
||||
log.Printf("project updated_at resync mode=%s changes=%d", mode, len(plan))
|
||||
if len(plan) == 0 {
|
||||
log.Printf("no local project timestamps need resync")
|
||||
return
|
||||
}
|
||||
for _, row := range plan {
|
||||
variant := row.Variant
|
||||
if variant == "" {
|
||||
variant = "main"
|
||||
}
|
||||
log.Printf("%s [%s] local=%s server=%s", row.Code, variant, formatStamp(row.LocalUpdatedAt), formatStamp(row.ServerUpdatedAt))
|
||||
}
|
||||
if !apply {
|
||||
fmt.Println("Re-run with -apply to write server updated_at into local SQLite.")
|
||||
}
|
||||
}
|
||||
|
||||
func formatStamp(value time.Time) string {
|
||||
if value.IsZero() {
|
||||
return "zero"
|
||||
}
|
||||
return value.Format(time.RFC3339)
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
)
|
||||
|
||||
func TestMigrateConfigFileToRuntimeShapeDropsDeprecatedSections(t *testing.T) {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.yaml")
|
||||
|
||||
legacy := `server:
|
||||
host: "0.0.0.0"
|
||||
port: 9191
|
||||
database:
|
||||
host: "legacy-db"
|
||||
port: 3306
|
||||
name: "RFQ_LOG"
|
||||
user: "old"
|
||||
password: "REDACTED_TEST_PASSWORD"
|
||||
pricing:
|
||||
default_method: "median"
|
||||
logging:
|
||||
level: "debug"
|
||||
format: "text"
|
||||
output: "stdout"
|
||||
`
|
||||
if err := os.WriteFile(path, []byte(legacy), 0644); err != nil {
|
||||
t.Fatalf("write legacy config: %v", err)
|
||||
}
|
||||
|
||||
cfg, err := config.Load(path)
|
||||
if err != nil {
|
||||
t.Fatalf("load legacy config: %v", err)
|
||||
}
|
||||
setConfigDefaults(cfg)
|
||||
cfg.Server.Host, _, err = normalizeLoopbackServerHost(cfg.Server.Host)
|
||||
if err != nil {
|
||||
t.Fatalf("normalize server host: %v", err)
|
||||
}
|
||||
if err := migrateConfigFileToRuntimeShape(path, cfg); err != nil {
|
||||
t.Fatalf("migrate config: %v", err)
|
||||
}
|
||||
|
||||
got, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read migrated config: %v", err)
|
||||
}
|
||||
text := string(got)
|
||||
if strings.Contains(text, "database:") {
|
||||
t.Fatalf("migrated config still contains deprecated database section:\n%s", text)
|
||||
}
|
||||
if strings.Contains(text, "pricing:") {
|
||||
t.Fatalf("migrated config still contains deprecated pricing section:\n%s", text)
|
||||
}
|
||||
if !strings.Contains(text, "server:") || !strings.Contains(text, "logging:") {
|
||||
t.Fatalf("migrated config missing required sections:\n%s", text)
|
||||
}
|
||||
if !strings.Contains(text, "port: 9191") {
|
||||
t.Fatalf("migrated config did not preserve server port:\n%s", text)
|
||||
}
|
||||
if !strings.Contains(text, "host: 127.0.0.1") {
|
||||
t.Fatalf("migrated config did not normalize server host:\n%s", text)
|
||||
}
|
||||
if !strings.Contains(text, "level: debug") {
|
||||
t.Fatalf("migrated config did not preserve logging level:\n%s", text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeLoopbackServerHost(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
host string
|
||||
want string
|
||||
wantChanged bool
|
||||
wantErr bool
|
||||
}{
|
||||
{host: "127.0.0.1", want: "127.0.0.1", wantChanged: false, wantErr: false},
|
||||
{host: "localhost", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
||||
{host: "::1", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
||||
{host: "0.0.0.0", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
||||
{host: "192.168.1.10", want: "127.0.0.1", wantChanged: true, wantErr: false},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
got, changed, err := normalizeLoopbackServerHost(tc.host)
|
||||
if tc.wantErr && err == nil {
|
||||
t.Fatalf("expected error for host %q", tc.host)
|
||||
}
|
||||
if !tc.wantErr && err != nil {
|
||||
t.Fatalf("unexpected error for host %q: %v", tc.host, err)
|
||||
}
|
||||
if got != tc.want {
|
||||
t.Fatalf("unexpected normalized host for %q: got %q want %q", tc.host, got, tc.want)
|
||||
}
|
||||
if changed != tc.wantChanged {
|
||||
t.Fatalf("unexpected changed flag for %q: got %t want %t", tc.host, changed, tc.wantChanged)
|
||||
}
|
||||
}
|
||||
}
|
||||
1120
cmd/qfs/main.go
1120
cmd/qfs/main.go
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestRequestLoggerDoesNotLogResponseBody(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
var logBuffer bytes.Buffer
|
||||
previousLogger := slog.Default()
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(&logBuffer, &slog.HandlerOptions{})))
|
||||
defer slog.SetDefault(previousLogger)
|
||||
|
||||
router := gin.New()
|
||||
router.Use(requestLogger())
|
||||
router.GET("/fail", func(c *gin.Context) {
|
||||
_ = c.Error(errors.New("root cause"))
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "do not log this body"})
|
||||
})
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest(http.MethodGet, "/fail?debug=1", nil)
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected 400, got %d", rec.Code)
|
||||
}
|
||||
|
||||
logOutput := logBuffer.String()
|
||||
if !strings.Contains(logOutput, "request failed") {
|
||||
t.Fatalf("expected request failure log, got %q", logOutput)
|
||||
}
|
||||
if strings.Contains(logOutput, "do not log this body") {
|
||||
t.Fatalf("response body leaked into logs: %q", logOutput)
|
||||
}
|
||||
if !strings.Contains(logOutput, "root cause") {
|
||||
t.Fatalf("expected error details in logs, got %q", logOutput)
|
||||
}
|
||||
}
|
||||
@@ -3,12 +3,10 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
@@ -39,7 +37,7 @@ func TestConfigurationVersioningAPI(t *testing.T) {
|
||||
|
||||
cfg := &config.Config{}
|
||||
setConfigDefaults(cfg)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, nil, "tester", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("setup router: %v", err)
|
||||
}
|
||||
@@ -79,7 +77,7 @@ func TestConfigurationVersioningAPI(t *testing.T) {
|
||||
if err := json.Unmarshal(rbRec.Body.Bytes(), &rbResp); err != nil {
|
||||
t.Fatalf("unmarshal rollback response: %v", err)
|
||||
}
|
||||
if rbResp.Message == "" || rbResp.CurrentVersion.VersionNo != 2 {
|
||||
if rbResp.Message == "" || rbResp.CurrentVersion.VersionNo != 3 {
|
||||
t.Fatalf("unexpected rollback response: %+v", rbResp)
|
||||
}
|
||||
|
||||
@@ -146,12 +144,12 @@ func TestProjectArchiveHidesConfigsAndCloneIntoProject(t *testing.T) {
|
||||
|
||||
cfg := &config.Config{}
|
||||
setConfigDefaults(cfg)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, nil, "tester", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("setup router: %v", err)
|
||||
}
|
||||
|
||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"P1","code":"P1"}`)))
|
||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"P1"}`)))
|
||||
createProjectReq.Header.Set("Content-Type", "application/json")
|
||||
createProjectRec := httptest.NewRecorder()
|
||||
router.ServeHTTP(createProjectRec, createProjectReq)
|
||||
@@ -240,12 +238,12 @@ func TestConfigMoveToProjectEndpoint(t *testing.T) {
|
||||
local, connMgr, _ := newAPITestStack(t)
|
||||
cfg := &config.Config{}
|
||||
setConfigDefaults(cfg)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, nil, "tester", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("setup router: %v", err)
|
||||
}
|
||||
|
||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"Move Project","code":"MOVE"}`)))
|
||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"Move Project"}`)))
|
||||
createProjectReq.Header.Set("Content-Type", "application/json")
|
||||
createProjectRec := httptest.NewRecorder()
|
||||
router.ServeHTTP(createProjectRec, createProjectReq)
|
||||
@@ -292,88 +290,6 @@ func TestConfigMoveToProjectEndpoint(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestVendorImportRejectsOversizedUpload(t *testing.T) {
|
||||
moveToRepoRoot(t)
|
||||
|
||||
prevLimit := vendorImportMaxBytes
|
||||
vendorImportMaxBytes = 128
|
||||
defer func() { vendorImportMaxBytes = prevLimit }()
|
||||
|
||||
local, connMgr, _ := newAPITestStack(t)
|
||||
cfg := &config.Config{}
|
||||
setConfigDefaults(cfg)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("setup router: %v", err)
|
||||
}
|
||||
|
||||
createProjectReq := httptest.NewRequest(http.MethodPost, "/api/projects", bytes.NewReader([]byte(`{"name":"Import Project","code":"IMP"}`)))
|
||||
createProjectReq.Header.Set("Content-Type", "application/json")
|
||||
createProjectRec := httptest.NewRecorder()
|
||||
router.ServeHTTP(createProjectRec, createProjectReq)
|
||||
if createProjectRec.Code != http.StatusCreated {
|
||||
t.Fatalf("create project status=%d body=%s", createProjectRec.Code, createProjectRec.Body.String())
|
||||
}
|
||||
|
||||
var project models.Project
|
||||
if err := json.Unmarshal(createProjectRec.Body.Bytes(), &project); err != nil {
|
||||
t.Fatalf("unmarshal project: %v", err)
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
writer := multipart.NewWriter(&body)
|
||||
part, err := writer.CreateFormFile("file", "huge.xml")
|
||||
if err != nil {
|
||||
t.Fatalf("create form file: %v", err)
|
||||
}
|
||||
payload := "<CFXML>" + strings.Repeat("A", int(vendorImportMaxBytes)+1) + "</CFXML>"
|
||||
if _, err := part.Write([]byte(payload)); err != nil {
|
||||
t.Fatalf("write multipart payload: %v", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatalf("close multipart writer: %v", err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/projects/"+project.UUID+"/vendor-import", &body)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected 400 for oversized upload, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if !strings.Contains(rec.Body.String(), "1 GiB") {
|
||||
t.Fatalf("expected size limit message, got %s", rec.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateConfigMalformedJSONReturnsGenericError(t *testing.T) {
|
||||
moveToRepoRoot(t)
|
||||
|
||||
local, connMgr, _ := newAPITestStack(t)
|
||||
cfg := &config.Config{}
|
||||
setConfigDefaults(cfg)
|
||||
router, _, err := setupRouter(cfg, local, connMgr, "tester", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("setup router: %v", err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodPost, "/api/configs", bytes.NewReader([]byte(`{"name":`)))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
rec := httptest.NewRecorder()
|
||||
router.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected 400 for malformed json, got %d body=%s", rec.Code, rec.Body.String())
|
||||
}
|
||||
if strings.Contains(strings.ToLower(rec.Body.String()), "unexpected eof") {
|
||||
t.Fatalf("expected sanitized error body, got %s", rec.Body.String())
|
||||
}
|
||||
if !strings.Contains(rec.Body.String(), "invalid request") {
|
||||
t.Fatalf("expected generic invalid request message, got %s", rec.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
func newAPITestStack(t *testing.T) (*localdb.LocalDB, *db.ConnectionManager, *services.LocalConfigurationService) {
|
||||
t.Helper()
|
||||
|
||||
|
||||
@@ -1,18 +1,58 @@
|
||||
# QuoteForge runtime config
|
||||
# Runtime creates a minimal config automatically on first start.
|
||||
# This file is only a reference template.
|
||||
# QuoteForge Configuration
|
||||
# Copy this file to config.yaml and update values
|
||||
|
||||
server:
|
||||
host: "127.0.0.1" # Loopback only; remote HTTP binding is unsupported
|
||||
host: "127.0.0.1" # Use 0.0.0.0 to listen on all interfaces
|
||||
port: 8080
|
||||
mode: "release" # debug | release
|
||||
read_timeout: "30s"
|
||||
write_timeout: "30s"
|
||||
|
||||
backup:
|
||||
time: "00:00"
|
||||
database:
|
||||
host: "localhost"
|
||||
port: 3306
|
||||
name: "RFQ_LOG"
|
||||
user: "quoteforge"
|
||||
password: "CHANGE_ME"
|
||||
max_open_conns: 25
|
||||
max_idle_conns: 5
|
||||
conn_max_lifetime: "5m"
|
||||
|
||||
auth:
|
||||
jwt_secret: "CHANGE_ME_MIN_32_CHARACTERS_LONG"
|
||||
token_expiry: "24h"
|
||||
refresh_expiry: "168h" # 7 days
|
||||
|
||||
pricing:
|
||||
default_method: "weighted_median" # median | average | weighted_median
|
||||
default_period_days: 90
|
||||
freshness_green_days: 30
|
||||
freshness_yellow_days: 60
|
||||
freshness_red_days: 90
|
||||
min_quotes_for_median: 3
|
||||
popularity_decay_days: 180
|
||||
|
||||
export:
|
||||
temp_dir: "/tmp/quoteforge-exports"
|
||||
max_file_age: "1h"
|
||||
company_name: "Your Company Name"
|
||||
|
||||
alerts:
|
||||
enabled: true
|
||||
check_interval: "1h"
|
||||
high_demand_threshold: 5 # КП за 30 дней
|
||||
trending_threshold_percent: 50 # % роста для алерта
|
||||
|
||||
notifications:
|
||||
email_enabled: false
|
||||
smtp_host: "smtp.example.com"
|
||||
smtp_port: 587
|
||||
smtp_user: ""
|
||||
smtp_password: ""
|
||||
from_address: "quoteforge@example.com"
|
||||
|
||||
logging:
|
||||
level: "info" # debug | info | warn | error
|
||||
format: "json" # json | text
|
||||
output: "stdout" # stdout | stderr | /path/to/file
|
||||
format: "json" # json | text
|
||||
output: "stdout" # stdout | file
|
||||
file_path: "/var/log/quoteforge/app.log"
|
||||
|
||||
15
crontab
Normal file
15
crontab
Normal file
@@ -0,0 +1,15 @@
|
||||
# Cron jobs for QuoteForge
|
||||
# Run alerts check every hour
|
||||
0 * * * * /app/quoteforge-cron -job=alerts
|
||||
|
||||
# Run price updates daily at 2 AM
|
||||
0 2 * * * /app/quoteforge-cron -job=update-prices
|
||||
|
||||
# Reset weekly counters every Sunday at 1 AM
|
||||
0 1 * * 0 /app/quoteforge-cron -job=reset-counters
|
||||
|
||||
# Update popularity scores daily at 3 AM
|
||||
0 3 * * * /app/quoteforge-cron -job=update-popularity
|
||||
|
||||
# Log rotation (optional)
|
||||
# 0 0 * * * /usr/bin/logrotate /etc/logrotate.conf
|
||||
BIN
dist/qfs-darwin-amd64
vendored
BIN
dist/qfs-darwin-amd64
vendored
Binary file not shown.
BIN
dist/qfs-darwin-arm64
vendored
BIN
dist/qfs-darwin-arm64
vendored
Binary file not shown.
BIN
dist/qfs-linux-amd64
vendored
BIN
dist/qfs-linux-amd64
vendored
Binary file not shown.
BIN
dist/qfs-windows-amd64.exe
vendored
BIN
dist/qfs-windows-amd64.exe
vendored
Binary file not shown.
5
go.mod
5
go.mod
@@ -5,8 +5,9 @@ go 1.24.0
|
||||
require (
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-sql-driver/mysql v1.7.1
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||
github.com/google/uuid v1.6.0
|
||||
golang.org/x/crypto v0.43.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/mysql v1.5.2
|
||||
gorm.io/gorm v1.25.7
|
||||
@@ -22,6 +23,7 @@ require (
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
@@ -37,7 +39,6 @@ require (
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -32,6 +32,8 @@ github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrt
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
|
||||
@@ -1,393 +0,0 @@
|
||||
package appstate
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
type backupPeriod struct {
|
||||
name string
|
||||
retention int
|
||||
key func(time.Time) string
|
||||
date func(time.Time) string
|
||||
}
|
||||
|
||||
var backupPeriods = []backupPeriod{
|
||||
{
|
||||
name: "daily",
|
||||
retention: 7,
|
||||
key: func(t time.Time) string {
|
||||
return t.Format("2006-01-02")
|
||||
},
|
||||
date: func(t time.Time) string {
|
||||
return t.Format("2006-01-02")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "weekly",
|
||||
retention: 4,
|
||||
key: func(t time.Time) string {
|
||||
y, w := t.ISOWeek()
|
||||
return fmt.Sprintf("%04d-W%02d", y, w)
|
||||
},
|
||||
date: func(t time.Time) string {
|
||||
return t.Format("2006-01-02")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "monthly",
|
||||
retention: 12,
|
||||
key: func(t time.Time) string {
|
||||
return t.Format("2006-01")
|
||||
},
|
||||
date: func(t time.Time) string {
|
||||
return t.Format("2006-01-02")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "yearly",
|
||||
retention: 10,
|
||||
key: func(t time.Time) string {
|
||||
return t.Format("2006")
|
||||
},
|
||||
date: func(t time.Time) string {
|
||||
return t.Format("2006-01-02")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
envBackupDisable = "QFS_BACKUP_DISABLE"
|
||||
envBackupDir = "QFS_BACKUP_DIR"
|
||||
)
|
||||
|
||||
var backupNow = time.Now
|
||||
|
||||
// EnsureRotatingLocalBackup creates or refreshes daily/weekly/monthly/yearly backups
|
||||
// for the local database and config. It keeps a limited number per period.
|
||||
func EnsureRotatingLocalBackup(dbPath, configPath string) ([]string, error) {
|
||||
if isBackupDisabled() {
|
||||
return nil, nil
|
||||
}
|
||||
if dbPath == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("stat db: %w", err)
|
||||
}
|
||||
|
||||
root := resolveBackupRoot(dbPath)
|
||||
if err := validateBackupRoot(root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
now := backupNow()
|
||||
|
||||
created := make([]string, 0)
|
||||
for _, period := range backupPeriods {
|
||||
newFiles, err := ensurePeriodBackup(root, period, now, dbPath, configPath)
|
||||
if err != nil {
|
||||
return created, err
|
||||
}
|
||||
if len(newFiles) > 0 {
|
||||
created = append(created, newFiles...)
|
||||
}
|
||||
}
|
||||
|
||||
return created, nil
|
||||
}
|
||||
|
||||
func resolveBackupRoot(dbPath string) string {
|
||||
if fromEnv := strings.TrimSpace(os.Getenv(envBackupDir)); fromEnv != "" {
|
||||
return filepath.Clean(fromEnv)
|
||||
}
|
||||
return filepath.Join(filepath.Dir(dbPath), "backups")
|
||||
}
|
||||
|
||||
func validateBackupRoot(root string) error {
|
||||
absRoot, err := filepath.Abs(root)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolve backup root: %w", err)
|
||||
}
|
||||
|
||||
if gitRoot, ok := findGitWorktreeRoot(absRoot); ok {
|
||||
return fmt.Errorf("backup root must stay outside git worktree: %s is inside %s", absRoot, gitRoot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findGitWorktreeRoot(path string) (string, bool) {
|
||||
current := filepath.Clean(path)
|
||||
info, err := os.Stat(current)
|
||||
if err == nil && !info.IsDir() {
|
||||
current = filepath.Dir(current)
|
||||
}
|
||||
|
||||
for {
|
||||
gitPath := filepath.Join(current, ".git")
|
||||
if _, err := os.Stat(gitPath); err == nil {
|
||||
return current, true
|
||||
}
|
||||
|
||||
parent := filepath.Dir(current)
|
||||
if parent == current {
|
||||
return "", false
|
||||
}
|
||||
current = parent
|
||||
}
|
||||
}
|
||||
|
||||
func isBackupDisabled() bool {
|
||||
val := strings.ToLower(strings.TrimSpace(os.Getenv(envBackupDisable)))
|
||||
return val == "1" || val == "true" || val == "yes"
|
||||
}
|
||||
|
||||
func ensurePeriodBackup(root string, period backupPeriod, now time.Time, dbPath, configPath string) ([]string, error) {
|
||||
key := period.key(now)
|
||||
periodDir := filepath.Join(root, period.name)
|
||||
if err := os.MkdirAll(periodDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("create %s backup dir: %w", period.name, err)
|
||||
}
|
||||
|
||||
if hasBackupForKey(periodDir, key) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
archiveName := fmt.Sprintf("qfs-backp-%s.zip", period.date(now))
|
||||
archivePath := filepath.Join(periodDir, archiveName)
|
||||
|
||||
if err := createBackupArchive(archivePath, dbPath, configPath); err != nil {
|
||||
return nil, fmt.Errorf("create %s backup archive: %w", period.name, err)
|
||||
}
|
||||
|
||||
if err := writePeriodMarker(periodDir, key); err != nil {
|
||||
return []string{archivePath}, err
|
||||
}
|
||||
|
||||
if err := pruneOldBackups(periodDir, period.retention); err != nil {
|
||||
return []string{archivePath}, err
|
||||
}
|
||||
|
||||
return []string{archivePath}, nil
|
||||
}
|
||||
|
||||
func hasBackupForKey(periodDir, key string) bool {
|
||||
marker := periodMarker{Key: ""}
|
||||
data, err := os.ReadFile(periodMarkerPath(periodDir))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal(data, &marker); err != nil {
|
||||
return false
|
||||
}
|
||||
return marker.Key == key
|
||||
}
|
||||
|
||||
type periodMarker struct {
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
func periodMarkerPath(periodDir string) string {
|
||||
return filepath.Join(periodDir, ".period.json")
|
||||
}
|
||||
|
||||
func writePeriodMarker(periodDir, key string) error {
|
||||
data, err := json.MarshalIndent(periodMarker{Key: key}, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(periodMarkerPath(periodDir), data, 0644)
|
||||
}
|
||||
|
||||
func pruneOldBackups(periodDir string, keep int) error {
|
||||
entries, err := os.ReadDir(periodDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read backups dir: %w", err)
|
||||
}
|
||||
|
||||
files := make([]os.DirEntry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(entry.Name(), ".zip") {
|
||||
files = append(files, entry)
|
||||
}
|
||||
}
|
||||
|
||||
if len(files) <= keep {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
infoI, errI := files[i].Info()
|
||||
infoJ, errJ := files[j].Info()
|
||||
if errI != nil || errJ != nil {
|
||||
return files[i].Name() < files[j].Name()
|
||||
}
|
||||
return infoI.ModTime().Before(infoJ.ModTime())
|
||||
})
|
||||
|
||||
for i := 0; i < len(files)-keep; i++ {
|
||||
path := filepath.Join(periodDir, files[i].Name())
|
||||
if err := os.Remove(path); err != nil {
|
||||
return fmt.Errorf("remove old backup %s: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createBackupArchive(destPath, dbPath, configPath string) error {
|
||||
snapshotPath, cleanup, err := createSQLiteSnapshot(dbPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
file, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(file)
|
||||
if err := addZipFileAs(zipWriter, snapshotPath, filepath.Base(dbPath)); err != nil {
|
||||
_ = zipWriter.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.TrimSpace(configPath) != "" {
|
||||
_ = addZipOptionalFile(zipWriter, configPath)
|
||||
}
|
||||
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Sync()
|
||||
}
|
||||
|
||||
func createSQLiteSnapshot(dbPath string) (string, func(), error) {
|
||||
tempFile, err := os.CreateTemp("", "qfs-backup-*.db")
|
||||
if err != nil {
|
||||
return "", func() {}, err
|
||||
}
|
||||
tempPath := tempFile.Name()
|
||||
if err := tempFile.Close(); err != nil {
|
||||
_ = os.Remove(tempPath)
|
||||
return "", func() {}, err
|
||||
}
|
||||
if err := os.Remove(tempPath); err != nil && !os.IsNotExist(err) {
|
||||
return "", func() {}, err
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
_ = os.Remove(tempPath)
|
||||
}
|
||||
|
||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return "", func() {}, err
|
||||
}
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return "", func() {}, err
|
||||
}
|
||||
defer sqlDB.Close()
|
||||
|
||||
if err := db.Exec("PRAGMA busy_timeout = 5000").Error; err != nil {
|
||||
cleanup()
|
||||
return "", func() {}, fmt.Errorf("configure sqlite busy_timeout: %w", err)
|
||||
}
|
||||
|
||||
literalPath := strings.ReplaceAll(tempPath, "'", "''")
|
||||
if err := vacuumIntoWithRetry(db, literalPath); err != nil {
|
||||
cleanup()
|
||||
return "", func() {}, err
|
||||
}
|
||||
|
||||
return tempPath, cleanup, nil
|
||||
}
|
||||
|
||||
func vacuumIntoWithRetry(db *gorm.DB, literalPath string) error {
|
||||
var lastErr error
|
||||
for attempt := 0; attempt < 3; attempt++ {
|
||||
if err := db.Exec("VACUUM INTO '" + literalPath + "'").Error; err != nil {
|
||||
lastErr = err
|
||||
if !isSQLiteBusyError(err) {
|
||||
return fmt.Errorf("create sqlite snapshot: %w", err)
|
||||
}
|
||||
time.Sleep(time.Duration(attempt+1) * 250 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("create sqlite snapshot after retries: %w", lastErr)
|
||||
}
|
||||
|
||||
func isSQLiteBusyError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
lower := strings.ToLower(err.Error())
|
||||
return strings.Contains(lower, "database is locked") || strings.Contains(lower, "database is busy")
|
||||
}
|
||||
|
||||
func addZipOptionalFile(writer *zip.Writer, path string) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil
|
||||
}
|
||||
return addZipFile(writer, path)
|
||||
}
|
||||
|
||||
func addZipFile(writer *zip.Writer, path string) error {
|
||||
return addZipFileAs(writer, path, filepath.Base(path))
|
||||
}
|
||||
|
||||
func addZipFileAs(writer *zip.Writer, path string, archiveName string) error {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
info, err := in.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header, err := zip.FileInfoHeader(info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header.Name = archiveName
|
||||
header.Method = zip.Deflate
|
||||
|
||||
out, err := writer.CreateHeader(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return err
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
package appstate
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
func TestEnsureRotatingLocalBackupCreatesAndRotates(t *testing.T) {
|
||||
temp := t.TempDir()
|
||||
dbPath := filepath.Join(temp, "qfs.db")
|
||||
cfgPath := filepath.Join(temp, "config.yaml")
|
||||
|
||||
if err := writeTestSQLiteDB(dbPath); err != nil {
|
||||
t.Fatalf("write sqlite db: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
|
||||
prevNow := backupNow
|
||||
defer func() { backupNow = prevNow }()
|
||||
backupNow = func() time.Time { return time.Date(2026, 2, 11, 10, 0, 0, 0, time.UTC) }
|
||||
|
||||
created, err := EnsureRotatingLocalBackup(dbPath, cfgPath)
|
||||
if err != nil {
|
||||
t.Fatalf("backup: %v", err)
|
||||
}
|
||||
if len(created) == 0 {
|
||||
t.Fatalf("expected backup to be created")
|
||||
}
|
||||
|
||||
dailyArchive := filepath.Join(temp, "backups", "daily", "qfs-backp-2026-02-11.zip")
|
||||
if _, err := os.Stat(dailyArchive); err != nil {
|
||||
t.Fatalf("daily archive missing: %v", err)
|
||||
}
|
||||
assertZipContains(t, dailyArchive, "qfs.db", "config.yaml")
|
||||
|
||||
backupNow = func() time.Time { return time.Date(2026, 2, 12, 10, 0, 0, 0, time.UTC) }
|
||||
created, err = EnsureRotatingLocalBackup(dbPath, cfgPath)
|
||||
if err != nil {
|
||||
t.Fatalf("backup rotate: %v", err)
|
||||
}
|
||||
if len(created) == 0 {
|
||||
t.Fatalf("expected backup to be created for new day")
|
||||
}
|
||||
|
||||
dailyArchive = filepath.Join(temp, "backups", "daily", "qfs-backp-2026-02-12.zip")
|
||||
if _, err := os.Stat(dailyArchive); err != nil {
|
||||
t.Fatalf("daily archive missing after rotate: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureRotatingLocalBackupEnvControls(t *testing.T) {
|
||||
temp := t.TempDir()
|
||||
dbPath := filepath.Join(temp, "qfs.db")
|
||||
cfgPath := filepath.Join(temp, "config.yaml")
|
||||
|
||||
if err := writeTestSQLiteDB(dbPath); err != nil {
|
||||
t.Fatalf("write sqlite db: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
|
||||
backupRoot := filepath.Join(temp, "custom_backups")
|
||||
t.Setenv(envBackupDir, backupRoot)
|
||||
|
||||
if _, err := EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil {
|
||||
t.Fatalf("backup with env: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(backupRoot, "daily", ".period.json")); err != nil {
|
||||
t.Fatalf("expected backup in custom dir: %v", err)
|
||||
}
|
||||
|
||||
t.Setenv(envBackupDisable, "1")
|
||||
if _, err := EnsureRotatingLocalBackup(dbPath, cfgPath); err != nil {
|
||||
t.Fatalf("backup disabled: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(backupRoot, "daily", ".period.json")); err != nil {
|
||||
t.Fatalf("backup should remain from previous run: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureRotatingLocalBackupRejectsGitWorktree(t *testing.T) {
|
||||
temp := t.TempDir()
|
||||
repoRoot := filepath.Join(temp, "repo")
|
||||
if err := os.MkdirAll(filepath.Join(repoRoot, ".git"), 0755); err != nil {
|
||||
t.Fatalf("mkdir git dir: %v", err)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(repoRoot, "data", "qfs.db")
|
||||
cfgPath := filepath.Join(repoRoot, "data", "config.yaml")
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
|
||||
t.Fatalf("mkdir data dir: %v", err)
|
||||
}
|
||||
if err := writeTestSQLiteDB(dbPath); err != nil {
|
||||
t.Fatalf("write sqlite db: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(cfgPath, []byte("cfg"), 0644); err != nil {
|
||||
t.Fatalf("write cfg: %v", err)
|
||||
}
|
||||
|
||||
_, err := EnsureRotatingLocalBackup(dbPath, cfgPath)
|
||||
if err == nil {
|
||||
t.Fatal("expected git worktree backup root to be rejected")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "outside git worktree") {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeTestSQLiteDB(path string) error {
|
||||
db, err := gorm.Open(sqlite.Open(path), &gorm.Config{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sqlDB.Close()
|
||||
|
||||
return db.Exec(`
|
||||
CREATE TABLE sample_items (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL
|
||||
);
|
||||
INSERT INTO sample_items(name) VALUES ('backup');
|
||||
`).Error
|
||||
}
|
||||
|
||||
func assertZipContains(t *testing.T, archivePath string, expected ...string) {
|
||||
t.Helper()
|
||||
|
||||
reader, err := zip.OpenReader(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("open archive: %v", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
found := make(map[string]bool, len(reader.File))
|
||||
for _, file := range reader.File {
|
||||
found[file.Name] = true
|
||||
}
|
||||
for _, name := range expected {
|
||||
if !found[name] {
|
||||
t.Fatalf("archive %s missing %s", archivePath, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,25 +55,6 @@ func ResolveConfigPath(explicitPath string) (string, error) {
|
||||
return filepath.Join(dir, defaultCfg), nil
|
||||
}
|
||||
|
||||
// ResolveConfigPathNearDB returns config path using priority:
|
||||
// explicit CLI path > QFS_CONFIG_PATH > directory of resolved local DB path.
|
||||
// Falls back to ResolveConfigPath when dbPath is empty.
|
||||
func ResolveConfigPathNearDB(explicitPath, dbPath string) (string, error) {
|
||||
if explicitPath != "" {
|
||||
return filepath.Clean(explicitPath), nil
|
||||
}
|
||||
|
||||
if fromEnv := os.Getenv(envCfgPath); fromEnv != "" {
|
||||
return filepath.Clean(fromEnv), nil
|
||||
}
|
||||
|
||||
if strings.TrimSpace(dbPath) != "" {
|
||||
return filepath.Join(filepath.Dir(filepath.Clean(dbPath)), defaultCfg), nil
|
||||
}
|
||||
|
||||
return ResolveConfigPath("")
|
||||
}
|
||||
|
||||
// MigrateLegacyDB copies an existing legacy DB (and optional SQLite sidecars)
|
||||
// to targetPath if targetPath does not already exist.
|
||||
// Returns source path if migration happened.
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package article
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
)
|
||||
|
||||
// ErrMissingCategoryForLot is returned when a lot has no category in local_pricelist_items.lot_category.
|
||||
var ErrMissingCategoryForLot = errors.New("missing_category_for_lot")
|
||||
|
||||
type MissingCategoryForLotError struct {
|
||||
LotName string
|
||||
}
|
||||
|
||||
func (e *MissingCategoryForLotError) Error() string {
|
||||
if e == nil || strings.TrimSpace(e.LotName) == "" {
|
||||
return ErrMissingCategoryForLot.Error()
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", ErrMissingCategoryForLot.Error(), e.LotName)
|
||||
}
|
||||
|
||||
func (e *MissingCategoryForLotError) Unwrap() error {
|
||||
return ErrMissingCategoryForLot
|
||||
}
|
||||
|
||||
type Group string
|
||||
|
||||
const (
|
||||
GroupCPU Group = "CPU"
|
||||
GroupMEM Group = "MEM"
|
||||
GroupGPU Group = "GPU"
|
||||
GroupDISK Group = "DISK"
|
||||
GroupNET Group = "NET"
|
||||
GroupPSU Group = "PSU"
|
||||
)
|
||||
|
||||
// GroupForLotCategory maps pricelist lot_category codes into article groups.
|
||||
// Unknown/unrelated categories return ok=false.
|
||||
func GroupForLotCategory(cat string) (group Group, ok bool) {
|
||||
c := strings.ToUpper(strings.TrimSpace(cat))
|
||||
switch c {
|
||||
case "CPU":
|
||||
return GroupCPU, true
|
||||
case "MEM":
|
||||
return GroupMEM, true
|
||||
case "GPU":
|
||||
return GroupGPU, true
|
||||
case "M2", "SSD", "HDD", "EDSFF", "HHHL":
|
||||
return GroupDISK, true
|
||||
case "NIC", "HCA", "DPU":
|
||||
return GroupNET, true
|
||||
case "HBA":
|
||||
return GroupNET, true
|
||||
case "PSU", "PS":
|
||||
return GroupPSU, true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveLotCategoriesStrict resolves categories for lotNames using local_pricelist_items.lot_category
|
||||
// for a given server pricelist id. If any lot is missing or has empty category, returns an error.
|
||||
func ResolveLotCategoriesStrict(local *localdb.LocalDB, serverPricelistID uint, lotNames []string) (map[string]string, error) {
|
||||
if local == nil {
|
||||
return nil, fmt.Errorf("local db is nil")
|
||||
}
|
||||
cats, err := local.GetLocalLotCategoriesByServerPricelistID(serverPricelistID, lotNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make([]string, 0)
|
||||
for _, lot := range lotNames {
|
||||
cat := strings.TrimSpace(cats[lot])
|
||||
if cat == "" {
|
||||
missing = append(missing, lot)
|
||||
continue
|
||||
}
|
||||
cats[lot] = cat
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
fallback, err := local.GetLocalComponentCategoriesByLotNames(missing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, lot := range missing {
|
||||
if cat := strings.TrimSpace(fallback[lot]); cat != "" {
|
||||
cats[lot] = cat
|
||||
}
|
||||
}
|
||||
for _, lot := range missing {
|
||||
if strings.TrimSpace(cats[lot]) == "" {
|
||||
return nil, &MissingCategoryForLotError{LotName: lot}
|
||||
}
|
||||
}
|
||||
}
|
||||
return cats, nil
|
||||
}
|
||||
|
||||
// NormalizeServerModel produces a stable article segment for the server model.
|
||||
func NormalizeServerModel(model string) string {
|
||||
trimmed := strings.TrimSpace(model)
|
||||
if trimmed == "" {
|
||||
return ""
|
||||
}
|
||||
upper := strings.ToUpper(trimmed)
|
||||
var b strings.Builder
|
||||
for _, r := range upper {
|
||||
if r >= 'A' && r <= 'Z' {
|
||||
b.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
if r >= '0' && r <= '9' {
|
||||
b.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
if r == '.' {
|
||||
b.WriteRune(r)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package article
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
)
|
||||
|
||||
func TestResolveLotCategoriesStrict_MissingCategoryReturnsError(t *testing.T) {
|
||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("init local db: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
||||
ServerID: 1,
|
||||
Source: "estimate",
|
||||
Version: "S-2026-02-11-001",
|
||||
Name: "test",
|
||||
CreatedAt: time.Now(),
|
||||
SyncedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("save local pricelist: %v", err)
|
||||
}
|
||||
localPL, err := local.GetLocalPricelistByServerID(1)
|
||||
if err != nil {
|
||||
t.Fatalf("get local pricelist: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
||||
{PricelistID: localPL.ID, LotName: "CPU_A", LotCategory: "", Price: 10},
|
||||
}); err != nil {
|
||||
t.Fatalf("save local items: %v", err)
|
||||
}
|
||||
|
||||
_, err = ResolveLotCategoriesStrict(local, 1, []string{"CPU_A"})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
if !errors.Is(err, ErrMissingCategoryForLot) {
|
||||
t.Fatalf("expected ErrMissingCategoryForLot, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveLotCategoriesStrict_FallbackToLocalComponents(t *testing.T) {
|
||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("init local db: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
||||
ServerID: 2,
|
||||
Source: "estimate",
|
||||
Version: "S-2026-02-11-002",
|
||||
Name: "test",
|
||||
CreatedAt: time.Now(),
|
||||
SyncedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("save local pricelist: %v", err)
|
||||
}
|
||||
localPL, err := local.GetLocalPricelistByServerID(2)
|
||||
if err != nil {
|
||||
t.Fatalf("get local pricelist: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
||||
{PricelistID: localPL.ID, LotName: "CPU_B", LotCategory: "", Price: 10},
|
||||
}); err != nil {
|
||||
t.Fatalf("save local items: %v", err)
|
||||
}
|
||||
if err := local.DB().Create(&localdb.LocalComponent{
|
||||
LotName: "CPU_B",
|
||||
Category: "CPU",
|
||||
LotDescription: "cpu",
|
||||
}).Error; err != nil {
|
||||
t.Fatalf("save local components: %v", err)
|
||||
}
|
||||
|
||||
cats, err := ResolveLotCategoriesStrict(local, 2, []string{"CPU_B"})
|
||||
if err != nil {
|
||||
t.Fatalf("expected fallback, got error: %v", err)
|
||||
}
|
||||
if cats["CPU_B"] != "CPU" {
|
||||
t.Fatalf("expected CPU, got %q", cats["CPU_B"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupForLotCategory(t *testing.T) {
|
||||
if g, ok := GroupForLotCategory("cpu"); !ok || g != GroupCPU {
|
||||
t.Fatalf("expected cpu -> GroupCPU")
|
||||
}
|
||||
if g, ok := GroupForLotCategory("SFP"); ok || g != "" {
|
||||
t.Fatalf("expected SFP to be excluded")
|
||||
}
|
||||
}
|
||||
@@ -1,605 +0,0 @@
|
||||
package article
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
)
|
||||
|
||||
type BuildOptions struct {
|
||||
ServerModel string
|
||||
SupportCode string
|
||||
ServerPricelist *uint
|
||||
}
|
||||
|
||||
type BuildResult struct {
|
||||
Article string
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
var (
|
||||
reMemGiB = regexp.MustCompile(`(?i)(\d+)\s*(GB|G)`)
|
||||
reMemTiB = regexp.MustCompile(`(?i)(\d+)\s*(TB|T)`)
|
||||
reCapacityT = regexp.MustCompile(`(?i)(\d+(?:[.,]\d+)?)T`)
|
||||
reCapacityG = regexp.MustCompile(`(?i)(\d+(?:[.,]\d+)?)G`)
|
||||
rePortSpeed = regexp.MustCompile(`(?i)(\d+)p(\d+)(GbE|G)`)
|
||||
rePortFC = regexp.MustCompile(`(?i)(\d+)pFC(\d+)`)
|
||||
reWatts = regexp.MustCompile(`(?i)(\d{3,5})\s*W`)
|
||||
)
|
||||
|
||||
func Build(local *localdb.LocalDB, items []models.ConfigItem, opts BuildOptions) (BuildResult, error) {
|
||||
segments := make([]string, 0, 8)
|
||||
warnings := make([]string, 0)
|
||||
|
||||
model := NormalizeServerModel(opts.ServerModel)
|
||||
if model == "" {
|
||||
return BuildResult{}, fmt.Errorf("server_model required")
|
||||
}
|
||||
segments = append(segments, model)
|
||||
|
||||
lotNames := make([]string, 0, len(items))
|
||||
for _, it := range items {
|
||||
lotNames = append(lotNames, it.LotName)
|
||||
}
|
||||
|
||||
if opts.ServerPricelist == nil || *opts.ServerPricelist == 0 {
|
||||
return BuildResult{}, fmt.Errorf("pricelist_id required for article")
|
||||
}
|
||||
|
||||
cats, err := ResolveLotCategoriesStrict(local, *opts.ServerPricelist, lotNames)
|
||||
if err != nil {
|
||||
return BuildResult{}, err
|
||||
}
|
||||
|
||||
cpuSeg := buildCPUSegment(items, cats)
|
||||
if cpuSeg != "" {
|
||||
segments = append(segments, cpuSeg)
|
||||
}
|
||||
memSeg, memWarn := buildMemSegment(items, cats)
|
||||
if memWarn != "" {
|
||||
warnings = append(warnings, memWarn)
|
||||
}
|
||||
if memSeg != "" {
|
||||
segments = append(segments, memSeg)
|
||||
}
|
||||
gpuSeg := buildGPUSegment(items, cats)
|
||||
if gpuSeg != "" {
|
||||
segments = append(segments, gpuSeg)
|
||||
}
|
||||
diskSeg, diskWarn := buildDiskSegment(items, cats)
|
||||
if diskWarn != "" {
|
||||
warnings = append(warnings, diskWarn)
|
||||
}
|
||||
if diskSeg != "" {
|
||||
segments = append(segments, diskSeg)
|
||||
}
|
||||
netSeg, netWarn := buildNetSegment(items, cats)
|
||||
if netWarn != "" {
|
||||
warnings = append(warnings, netWarn)
|
||||
}
|
||||
if netSeg != "" {
|
||||
segments = append(segments, netSeg)
|
||||
}
|
||||
psuSeg, psuWarn := buildPSUSegment(items, cats)
|
||||
if psuWarn != "" {
|
||||
warnings = append(warnings, psuWarn)
|
||||
}
|
||||
if psuSeg != "" {
|
||||
segments = append(segments, psuSeg)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(opts.SupportCode) != "" {
|
||||
code := strings.TrimSpace(opts.SupportCode)
|
||||
if !isSupportCodeValid(code) {
|
||||
return BuildResult{}, fmt.Errorf("invalid_support_code")
|
||||
}
|
||||
segments = append(segments, code)
|
||||
}
|
||||
|
||||
article := strings.Join(segments, "-")
|
||||
if len([]rune(article)) > 80 {
|
||||
article = compressArticle(segments)
|
||||
warnings = append(warnings, "compressed")
|
||||
}
|
||||
if len([]rune(article)) > 80 {
|
||||
return BuildResult{}, fmt.Errorf("article_overflow")
|
||||
}
|
||||
|
||||
return BuildResult{Article: article, Warnings: warnings}, nil
|
||||
}
|
||||
|
||||
func isSupportCodeValid(code string) bool {
|
||||
if len(code) < 3 {
|
||||
return false
|
||||
}
|
||||
if !strings.Contains(code, "y") {
|
||||
return false
|
||||
}
|
||||
parts := strings.Split(code, "y")
|
||||
if len(parts) != 2 || parts[0] == "" || parts[1] == "" {
|
||||
return false
|
||||
}
|
||||
for _, r := range parts[0] {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
switch parts[1] {
|
||||
case "W", "B", "S", "P":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func buildCPUSegment(items []models.ConfigItem, cats map[string]string) string {
|
||||
type agg struct {
|
||||
qty int
|
||||
}
|
||||
models := map[string]*agg{}
|
||||
for _, it := range items {
|
||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
||||
if !ok || group != GroupCPU {
|
||||
continue
|
||||
}
|
||||
model := parseCPUModel(it.LotName)
|
||||
if model == "" {
|
||||
model = "UNK"
|
||||
}
|
||||
if _, ok := models[model]; !ok {
|
||||
models[model] = &agg{}
|
||||
}
|
||||
models[model].qty += it.Quantity
|
||||
}
|
||||
if len(models) == 0 {
|
||||
return ""
|
||||
}
|
||||
parts := make([]string, 0, len(models))
|
||||
for model, a := range models {
|
||||
parts = append(parts, fmt.Sprintf("%dx%s", a.qty, model))
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, "+")
|
||||
}
|
||||
|
||||
func buildMemSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
||||
totalGiB := 0
|
||||
for _, it := range items {
|
||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
||||
if !ok || group != GroupMEM {
|
||||
continue
|
||||
}
|
||||
per := parseMemGiB(it.LotName)
|
||||
if per <= 0 {
|
||||
return "", "mem_unknown"
|
||||
}
|
||||
totalGiB += per * it.Quantity
|
||||
}
|
||||
if totalGiB == 0 {
|
||||
return "", ""
|
||||
}
|
||||
if totalGiB%1024 == 0 {
|
||||
return fmt.Sprintf("%dT", totalGiB/1024), ""
|
||||
}
|
||||
return fmt.Sprintf("%dG", totalGiB), ""
|
||||
}
|
||||
|
||||
func buildGPUSegment(items []models.ConfigItem, cats map[string]string) string {
|
||||
models := map[string]int{}
|
||||
for _, it := range items {
|
||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
||||
if !ok || group != GroupGPU {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(strings.ToUpper(it.LotName), "MB_") {
|
||||
continue
|
||||
}
|
||||
model := parseGPUModel(it.LotName)
|
||||
if model == "" {
|
||||
model = "UNK"
|
||||
}
|
||||
models[model] += it.Quantity
|
||||
}
|
||||
if len(models) == 0 {
|
||||
return ""
|
||||
}
|
||||
parts := make([]string, 0, len(models))
|
||||
for model, qty := range models {
|
||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, model))
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, "+")
|
||||
}
|
||||
|
||||
func buildDiskSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
||||
type key struct {
|
||||
t string
|
||||
c string
|
||||
}
|
||||
groupQty := map[key]int{}
|
||||
warn := ""
|
||||
for _, it := range items {
|
||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
||||
if !ok || group != GroupDISK {
|
||||
continue
|
||||
}
|
||||
capToken := parseCapacity(it.LotName)
|
||||
if capToken == "" {
|
||||
warn = "disk_unknown"
|
||||
}
|
||||
typeCode := diskTypeCode(cats[it.LotName], it.LotName)
|
||||
k := key{t: typeCode, c: capToken}
|
||||
groupQty[k] += it.Quantity
|
||||
}
|
||||
if len(groupQty) == 0 {
|
||||
return "", ""
|
||||
}
|
||||
parts := make([]string, 0, len(groupQty))
|
||||
for k, qty := range groupQty {
|
||||
if k.c == "" {
|
||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, k.t))
|
||||
} else {
|
||||
parts = append(parts, fmt.Sprintf("%dx%s%s", qty, k.c, k.t))
|
||||
}
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, "+"), warn
|
||||
}
|
||||
|
||||
func buildNetSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
||||
groupQty := map[string]int{}
|
||||
warn := ""
|
||||
for _, it := range items {
|
||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
||||
if !ok || group != GroupNET {
|
||||
continue
|
||||
}
|
||||
profile := parsePortSpeed(it.LotName)
|
||||
if profile == "" {
|
||||
profile = "UNKNET"
|
||||
warn = "net_unknown"
|
||||
}
|
||||
groupQty[profile] += it.Quantity
|
||||
}
|
||||
if len(groupQty) == 0 {
|
||||
return "", ""
|
||||
}
|
||||
parts := make([]string, 0, len(groupQty))
|
||||
for profile, qty := range groupQty {
|
||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, profile))
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, "+"), warn
|
||||
}
|
||||
|
||||
func buildPSUSegment(items []models.ConfigItem, cats map[string]string) (string, string) {
|
||||
groupQty := map[string]int{}
|
||||
warn := ""
|
||||
for _, it := range items {
|
||||
group, ok := GroupForLotCategory(cats[it.LotName])
|
||||
if !ok || group != GroupPSU {
|
||||
continue
|
||||
}
|
||||
rating := parseWatts(it.LotName)
|
||||
if rating == "" {
|
||||
rating = "UNKPSU"
|
||||
warn = "psu_unknown"
|
||||
}
|
||||
groupQty[rating] += it.Quantity
|
||||
}
|
||||
if len(groupQty) == 0 {
|
||||
return "", ""
|
||||
}
|
||||
parts := make([]string, 0, len(groupQty))
|
||||
for rating, qty := range groupQty {
|
||||
parts = append(parts, fmt.Sprintf("%dx%s", qty, rating))
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, "+"), warn
|
||||
}
|
||||
|
||||
func normalizeModelToken(lotName string) string {
|
||||
if idx := strings.Index(lotName, "_"); idx >= 0 && idx+1 < len(lotName) {
|
||||
lotName = lotName[idx+1:]
|
||||
}
|
||||
parts := strings.Split(lotName, "_")
|
||||
token := parts[len(parts)-1]
|
||||
return strings.ToUpper(strings.TrimSpace(token))
|
||||
}
|
||||
|
||||
func parseCPUModel(lotName string) string {
|
||||
parts := strings.Split(lotName, "_")
|
||||
if len(parts) >= 2 {
|
||||
last := strings.ToUpper(strings.TrimSpace(parts[len(parts)-1]))
|
||||
if last != "" {
|
||||
return last
|
||||
}
|
||||
}
|
||||
return normalizeModelToken(lotName)
|
||||
}
|
||||
|
||||
func parseGPUModel(lotName string) string {
|
||||
upper := strings.ToUpper(lotName)
|
||||
if idx := strings.Index(upper, "GPU_"); idx >= 0 {
|
||||
upper = upper[idx+4:]
|
||||
}
|
||||
parts := strings.Split(upper, "_")
|
||||
model := ""
|
||||
mem := ""
|
||||
for i, p := range parts {
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
switch p {
|
||||
case "NV", "NVIDIA", "INTEL", "AMD", "RADEON", "PCIE", "PCI", "SXM", "SXMX":
|
||||
continue
|
||||
default:
|
||||
if strings.Contains(p, "GB") {
|
||||
mem = p
|
||||
continue
|
||||
}
|
||||
if model == "" && (i > 0) {
|
||||
model = p
|
||||
}
|
||||
}
|
||||
}
|
||||
if model != "" && mem != "" {
|
||||
return model + "_" + mem
|
||||
}
|
||||
if model != "" {
|
||||
return model
|
||||
}
|
||||
return normalizeModelToken(lotName)
|
||||
}
|
||||
|
||||
func parseMemGiB(lotName string) int {
|
||||
if m := reMemTiB.FindStringSubmatch(lotName); len(m) == 3 {
|
||||
return atoi(m[1]) * 1024
|
||||
}
|
||||
if m := reMemGiB.FindStringSubmatch(lotName); len(m) == 3 {
|
||||
return atoi(m[1])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func parseCapacity(lotName string) string {
|
||||
if m := reCapacityT.FindStringSubmatch(lotName); len(m) == 2 {
|
||||
return normalizeTToken(strings.ReplaceAll(m[1], ",", ".")) + "T"
|
||||
}
|
||||
if m := reCapacityG.FindStringSubmatch(lotName); len(m) == 2 {
|
||||
return normalizeNumberToken(strings.ReplaceAll(m[1], ",", ".")) + "G"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func diskTypeCode(cat string, lotName string) string {
|
||||
c := strings.ToUpper(strings.TrimSpace(cat))
|
||||
if c == "M2" {
|
||||
return "M2"
|
||||
}
|
||||
upper := strings.ToUpper(lotName)
|
||||
if strings.Contains(upper, "NVME") {
|
||||
return "NV"
|
||||
}
|
||||
if strings.Contains(upper, "SAS") {
|
||||
return "SAS"
|
||||
}
|
||||
if strings.Contains(upper, "SATA") {
|
||||
return "SAT"
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func parsePortSpeed(lotName string) string {
|
||||
if m := rePortSpeed.FindStringSubmatch(lotName); len(m) == 4 {
|
||||
return fmt.Sprintf("%sp%sG", m[1], m[2])
|
||||
}
|
||||
if m := rePortFC.FindStringSubmatch(lotName); len(m) == 3 {
|
||||
return fmt.Sprintf("%spFC%s", m[1], m[2])
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseWatts(lotName string) string {
|
||||
if m := reWatts.FindStringSubmatch(lotName); len(m) == 2 {
|
||||
w := atoi(m[1])
|
||||
if w >= 1000 {
|
||||
kw := fmt.Sprintf("%.1f", float64(w)/1000.0)
|
||||
kw = strings.TrimSuffix(kw, ".0")
|
||||
return fmt.Sprintf("%skW", kw)
|
||||
}
|
||||
return fmt.Sprintf("%dW", w)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func normalizeNumberToken(raw string) string {
|
||||
raw = strings.TrimSpace(raw)
|
||||
raw = strings.TrimLeft(raw, "0")
|
||||
if raw == "" || raw[0] == '.' {
|
||||
raw = "0" + raw
|
||||
}
|
||||
return raw
|
||||
}
|
||||
|
||||
func normalizeTToken(raw string) string {
|
||||
raw = normalizeNumberToken(raw)
|
||||
parts := strings.SplitN(raw, ".", 2)
|
||||
intPart := parts[0]
|
||||
frac := ""
|
||||
if len(parts) == 2 {
|
||||
frac = parts[1]
|
||||
}
|
||||
if frac == "" {
|
||||
frac = "0"
|
||||
}
|
||||
if len(intPart) >= 2 {
|
||||
return intPart + "." + frac
|
||||
}
|
||||
if len(frac) > 1 {
|
||||
frac = frac[:1]
|
||||
}
|
||||
return intPart + "." + frac
|
||||
}
|
||||
|
||||
func atoi(v string) int {
|
||||
n := 0
|
||||
for _, r := range v {
|
||||
if r < '0' || r > '9' {
|
||||
continue
|
||||
}
|
||||
n = n*10 + int(r-'0')
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func compressArticle(segments []string) string {
|
||||
if len(segments) == 0 {
|
||||
return ""
|
||||
}
|
||||
normalized := make([]string, 0, len(segments))
|
||||
for _, s := range segments {
|
||||
normalized = append(normalized, strings.ReplaceAll(s, "GbE", "G"))
|
||||
}
|
||||
segments = normalized
|
||||
article := strings.Join(segments, "-")
|
||||
if len([]rune(article)) <= 80 {
|
||||
return article
|
||||
}
|
||||
|
||||
// segment order: model, cpu, mem, gpu, disk, net, psu, support
|
||||
index := func(i int) (int, bool) {
|
||||
if i >= 0 && i < len(segments) {
|
||||
return i, true
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
// 1) remove PSU
|
||||
if i, ok := index(6); ok {
|
||||
segments = append(segments[:i], segments[i+1:]...)
|
||||
article = strings.Join(segments, "-")
|
||||
if len([]rune(article)) <= 80 {
|
||||
return article
|
||||
}
|
||||
}
|
||||
|
||||
// 2) compress NET/HBA/HCA
|
||||
if i, ok := index(5); ok {
|
||||
segments[i] = compressNetSegment(segments[i])
|
||||
article = strings.Join(segments, "-")
|
||||
if len([]rune(article)) <= 80 {
|
||||
return article
|
||||
}
|
||||
}
|
||||
|
||||
// 3) compress DISK
|
||||
if i, ok := index(4); ok {
|
||||
segments[i] = compressDiskSegment(segments[i])
|
||||
article = strings.Join(segments, "-")
|
||||
if len([]rune(article)) <= 80 {
|
||||
return article
|
||||
}
|
||||
}
|
||||
|
||||
// 4) compress GPU to vendor only (GPU_NV)
|
||||
if i, ok := index(3); ok {
|
||||
segments[i] = compressGPUSegment(segments[i])
|
||||
}
|
||||
return strings.Join(segments, "-")
|
||||
}
|
||||
|
||||
func compressNetSegment(seg string) string {
|
||||
if seg == "" {
|
||||
return seg
|
||||
}
|
||||
parts := strings.Split(seg, "+")
|
||||
out := make([]string, 0, len(parts))
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
qty := "1"
|
||||
profile := p
|
||||
if x := strings.SplitN(p, "x", 2); len(x) == 2 {
|
||||
qty = x[0]
|
||||
profile = x[1]
|
||||
}
|
||||
upper := strings.ToUpper(profile)
|
||||
label := "NIC"
|
||||
if strings.Contains(upper, "FC") {
|
||||
label = "HBA"
|
||||
} else if strings.Contains(upper, "HCA") || strings.Contains(upper, "IB") {
|
||||
label = "HCA"
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%sx%s", qty, label))
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return seg
|
||||
}
|
||||
sort.Strings(out)
|
||||
return strings.Join(out, "+")
|
||||
}
|
||||
|
||||
func compressDiskSegment(seg string) string {
|
||||
if seg == "" {
|
||||
return seg
|
||||
}
|
||||
parts := strings.Split(seg, "+")
|
||||
out := make([]string, 0, len(parts))
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
qty := "1"
|
||||
spec := p
|
||||
if x := strings.SplitN(p, "x", 2); len(x) == 2 {
|
||||
qty = x[0]
|
||||
spec = x[1]
|
||||
}
|
||||
upper := strings.ToUpper(spec)
|
||||
label := "DSK"
|
||||
for _, t := range []string{"M2", "NV", "SAS", "SAT", "SSD", "HDD", "EDS", "HHH"} {
|
||||
if strings.Contains(upper, t) {
|
||||
label = t
|
||||
break
|
||||
}
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%sx%s", qty, label))
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return seg
|
||||
}
|
||||
sort.Strings(out)
|
||||
return strings.Join(out, "+")
|
||||
}
|
||||
|
||||
func compressGPUSegment(seg string) string {
|
||||
if seg == "" {
|
||||
return seg
|
||||
}
|
||||
parts := strings.Split(seg, "+")
|
||||
out := make([]string, 0, len(parts))
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
qty := "1"
|
||||
if x := strings.SplitN(p, "x", 2); len(x) == 2 {
|
||||
qty = x[0]
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%sxGPU_NV", qty))
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return seg
|
||||
}
|
||||
sort.Strings(out)
|
||||
return strings.Join(out, "+")
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package article
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
)
|
||||
|
||||
func TestBuild_ParsesNetAndPSU(t *testing.T) {
|
||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("init local db: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
||||
ServerID: 1,
|
||||
Source: "estimate",
|
||||
Version: "S-2026-02-11-001",
|
||||
Name: "test",
|
||||
CreatedAt: time.Now(),
|
||||
SyncedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("save local pricelist: %v", err)
|
||||
}
|
||||
localPL, err := local.GetLocalPricelistByServerID(1)
|
||||
if err != nil {
|
||||
t.Fatalf("get local pricelist: %v", err)
|
||||
}
|
||||
|
||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
||||
{PricelistID: localPL.ID, LotName: "NIC_2p25G_MCX512A-AC", LotCategory: "NIC", Price: 1},
|
||||
{PricelistID: localPL.ID, LotName: "HBA_2pFC32_Gen6", LotCategory: "HBA", Price: 1},
|
||||
{PricelistID: localPL.ID, LotName: "PS_1000W_Platinum", LotCategory: "PS", Price: 1},
|
||||
}); err != nil {
|
||||
t.Fatalf("save local items: %v", err)
|
||||
}
|
||||
|
||||
items := models.ConfigItems{
|
||||
{LotName: "NIC_2p25G_MCX512A-AC", Quantity: 1},
|
||||
{LotName: "HBA_2pFC32_Gen6", Quantity: 1},
|
||||
{LotName: "PS_1000W_Platinum", Quantity: 2},
|
||||
}
|
||||
result, err := Build(local, items, BuildOptions{
|
||||
ServerModel: "DL380GEN11",
|
||||
SupportCode: "1yW",
|
||||
ServerPricelist: &localPL.ServerID,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("build article: %v", err)
|
||||
}
|
||||
if result.Article == "" {
|
||||
t.Fatalf("expected article to be non-empty")
|
||||
}
|
||||
if contains(result.Article, "UNKNET") || contains(result.Article, "UNKPSU") {
|
||||
t.Fatalf("unexpected UNK in article: %s", result.Article)
|
||||
}
|
||||
}
|
||||
|
||||
func contains(s, sub string) bool {
|
||||
return strings.Contains(s, sub)
|
||||
}
|
||||
@@ -2,19 +2,21 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Server ServerConfig `yaml:"server"`
|
||||
Export ExportConfig `yaml:"export"`
|
||||
Logging LoggingConfig `yaml:"logging"`
|
||||
Backup BackupConfig `yaml:"backup"`
|
||||
Server ServerConfig `yaml:"server"`
|
||||
Database DatabaseConfig `yaml:"database"`
|
||||
Auth AuthConfig `yaml:"auth"`
|
||||
Pricing PricingConfig `yaml:"pricing"`
|
||||
Export ExportConfig `yaml:"export"`
|
||||
Alerts AlertsConfig `yaml:"alerts"`
|
||||
Notifications NotificationsConfig `yaml:"notifications"`
|
||||
Logging LoggingConfig `yaml:"logging"`
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
@@ -25,6 +27,60 @@ type ServerConfig struct {
|
||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||
}
|
||||
|
||||
type DatabaseConfig struct {
|
||||
Host string `yaml:"host"`
|
||||
Port int `yaml:"port"`
|
||||
Name string `yaml:"name"`
|
||||
User string `yaml:"user"`
|
||||
Password string `yaml:"password"`
|
||||
MaxOpenConns int `yaml:"max_open_conns"`
|
||||
MaxIdleConns int `yaml:"max_idle_conns"`
|
||||
ConnMaxLifetime time.Duration `yaml:"conn_max_lifetime"`
|
||||
}
|
||||
|
||||
func (d *DatabaseConfig) DSN() string {
|
||||
return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||
d.User, d.Password, d.Host, d.Port, d.Name)
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
JWTSecret string `yaml:"jwt_secret"`
|
||||
TokenExpiry time.Duration `yaml:"token_expiry"`
|
||||
RefreshExpiry time.Duration `yaml:"refresh_expiry"`
|
||||
}
|
||||
|
||||
type PricingConfig struct {
|
||||
DefaultMethod string `yaml:"default_method"`
|
||||
DefaultPeriodDays int `yaml:"default_period_days"`
|
||||
FreshnessGreenDays int `yaml:"freshness_green_days"`
|
||||
FreshnessYellowDays int `yaml:"freshness_yellow_days"`
|
||||
FreshnessRedDays int `yaml:"freshness_red_days"`
|
||||
MinQuotesForMedian int `yaml:"min_quotes_for_median"`
|
||||
PopularityDecayDays int `yaml:"popularity_decay_days"`
|
||||
}
|
||||
|
||||
type ExportConfig struct {
|
||||
TempDir string `yaml:"temp_dir"`
|
||||
MaxFileAge time.Duration `yaml:"max_file_age"`
|
||||
CompanyName string `yaml:"company_name"`
|
||||
}
|
||||
|
||||
type AlertsConfig struct {
|
||||
Enabled bool `yaml:"enabled"`
|
||||
CheckInterval time.Duration `yaml:"check_interval"`
|
||||
HighDemandThreshold int `yaml:"high_demand_threshold"`
|
||||
TrendingThresholdPercent int `yaml:"trending_threshold_percent"`
|
||||
}
|
||||
|
||||
type NotificationsConfig struct {
|
||||
EmailEnabled bool `yaml:"email_enabled"`
|
||||
SMTPHost string `yaml:"smtp_host"`
|
||||
SMTPPort int `yaml:"smtp_port"`
|
||||
SMTPUser string `yaml:"smtp_user"`
|
||||
SMTPPassword string `yaml:"smtp_password"`
|
||||
FromAddress string `yaml:"from_address"`
|
||||
}
|
||||
|
||||
type LoggingConfig struct {
|
||||
Level string `yaml:"level"`
|
||||
Format string `yaml:"format"`
|
||||
@@ -32,14 +88,6 @@ type LoggingConfig struct {
|
||||
FilePath string `yaml:"file_path"`
|
||||
}
|
||||
|
||||
// ExportConfig is kept for constructor compatibility in export services.
|
||||
// Runtime no longer persists an export section in config.yaml.
|
||||
type ExportConfig struct{}
|
||||
|
||||
type BackupConfig struct {
|
||||
Time string `yaml:"time"`
|
||||
}
|
||||
|
||||
func Load(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
@@ -73,6 +121,45 @@ func (c *Config) setDefaults() {
|
||||
c.Server.WriteTimeout = 30 * time.Second
|
||||
}
|
||||
|
||||
if c.Database.Port == 0 {
|
||||
c.Database.Port = 3306
|
||||
}
|
||||
if c.Database.MaxOpenConns == 0 {
|
||||
c.Database.MaxOpenConns = 25
|
||||
}
|
||||
if c.Database.MaxIdleConns == 0 {
|
||||
c.Database.MaxIdleConns = 5
|
||||
}
|
||||
if c.Database.ConnMaxLifetime == 0 {
|
||||
c.Database.ConnMaxLifetime = 5 * time.Minute
|
||||
}
|
||||
|
||||
if c.Auth.TokenExpiry == 0 {
|
||||
c.Auth.TokenExpiry = 24 * time.Hour
|
||||
}
|
||||
if c.Auth.RefreshExpiry == 0 {
|
||||
c.Auth.RefreshExpiry = 7 * 24 * time.Hour
|
||||
}
|
||||
|
||||
if c.Pricing.DefaultMethod == "" {
|
||||
c.Pricing.DefaultMethod = "weighted_median"
|
||||
}
|
||||
if c.Pricing.DefaultPeriodDays == 0 {
|
||||
c.Pricing.DefaultPeriodDays = 90
|
||||
}
|
||||
if c.Pricing.FreshnessGreenDays == 0 {
|
||||
c.Pricing.FreshnessGreenDays = 30
|
||||
}
|
||||
if c.Pricing.FreshnessYellowDays == 0 {
|
||||
c.Pricing.FreshnessYellowDays = 60
|
||||
}
|
||||
if c.Pricing.FreshnessRedDays == 0 {
|
||||
c.Pricing.FreshnessRedDays = 90
|
||||
}
|
||||
if c.Pricing.MinQuotesForMedian == 0 {
|
||||
c.Pricing.MinQuotesForMedian = 3
|
||||
}
|
||||
|
||||
if c.Logging.Level == "" {
|
||||
c.Logging.Level = "info"
|
||||
}
|
||||
@@ -82,12 +169,8 @@ func (c *Config) setDefaults() {
|
||||
if c.Logging.Output == "" {
|
||||
c.Logging.Output = "stdout"
|
||||
}
|
||||
|
||||
if c.Backup.Time == "" {
|
||||
c.Backup.Time = "00:00"
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) Address() string {
|
||||
return net.JoinHostPort(c.Server.Host, strconv.Itoa(c.Server.Port))
|
||||
return fmt.Sprintf("%s:%d", c.Server.Host, c.Server.Port)
|
||||
}
|
||||
|
||||
@@ -238,22 +238,6 @@ func (cm *ConnectionManager) Disconnect() {
|
||||
cm.lastError = nil
|
||||
}
|
||||
|
||||
// MarkOffline closes the current connection and preserves the last observed error.
|
||||
func (cm *ConnectionManager) MarkOffline(err error) {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.db != nil {
|
||||
sqlDB, dbErr := cm.db.DB()
|
||||
if dbErr == nil {
|
||||
sqlDB.Close()
|
||||
}
|
||||
}
|
||||
cm.db = nil
|
||||
cm.lastError = err
|
||||
cm.lastCheck = time.Now()
|
||||
}
|
||||
|
||||
// GetLastError returns the last connection error (thread-safe)
|
||||
func (cm *ConnectionManager) GetLastError() error {
|
||||
cm.mu.RLock()
|
||||
|
||||
113
internal/handlers/auth.go
Normal file
113
internal/handlers/auth.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/middleware"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
)
|
||||
|
||||
type AuthHandler struct {
|
||||
authService *services.AuthService
|
||||
userRepo *repository.UserRepository
|
||||
}
|
||||
|
||||
func NewAuthHandler(authService *services.AuthService, userRepo *repository.UserRepository) *AuthHandler {
|
||||
return &AuthHandler{
|
||||
authService: authService,
|
||||
userRepo: userRepo,
|
||||
}
|
||||
}
|
||||
|
||||
type LoginRequest struct {
|
||||
Username string `json:"username" binding:"required"`
|
||||
Password string `json:"password" binding:"required"`
|
||||
}
|
||||
|
||||
type LoginResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresAt int64 `json:"expires_at"`
|
||||
User UserResponse `json:"user"`
|
||||
}
|
||||
|
||||
type UserResponse struct {
|
||||
ID uint `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
func (h *AuthHandler) Login(c *gin.Context) {
|
||||
var req LoginRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
tokens, user, err := h.authService.Login(req.Username, req.Password)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, LoginResponse{
|
||||
AccessToken: tokens.AccessToken,
|
||||
RefreshToken: tokens.RefreshToken,
|
||||
ExpiresAt: tokens.ExpiresAt,
|
||||
User: UserResponse{
|
||||
ID: user.ID,
|
||||
Username: user.Username,
|
||||
Email: user.Email,
|
||||
Role: string(user.Role),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type RefreshRequest struct {
|
||||
RefreshToken string `json:"refresh_token" binding:"required"`
|
||||
}
|
||||
|
||||
func (h *AuthHandler) Refresh(c *gin.Context) {
|
||||
var req RefreshRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
tokens, err := h.authService.RefreshTokens(req.RefreshToken)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, tokens)
|
||||
}
|
||||
|
||||
func (h *AuthHandler) Me(c *gin.Context) {
|
||||
claims := middleware.GetClaims(c)
|
||||
if claims == nil {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": "not authenticated"})
|
||||
return
|
||||
}
|
||||
|
||||
user, err := h.userRepo.GetByID(claims.UserID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "user not found"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, UserResponse{
|
||||
ID: user.ID,
|
||||
Username: user.Username,
|
||||
Email: user.Email,
|
||||
Role: string(user.Role),
|
||||
})
|
||||
}
|
||||
|
||||
func (h *AuthHandler) Logout(c *gin.Context) {
|
||||
// JWT is stateless, logout is handled on client by discarding tokens
|
||||
c.JSON(http.StatusOK, gin.H{"message": "logged out"})
|
||||
}
|
||||
@@ -3,10 +3,8 @@ package handlers
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -27,12 +25,6 @@ func NewComponentHandler(componentService *services.ComponentService, localDB *l
|
||||
func (h *ComponentHandler) List(c *gin.Context) {
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 {
|
||||
perPage = 20
|
||||
}
|
||||
|
||||
filter := repository.ComponentFilter{
|
||||
Category: c.Query("category"),
|
||||
@@ -41,68 +33,73 @@ func (h *ComponentHandler) List(c *gin.Context) {
|
||||
ExcludeHidden: c.Query("include_hidden") != "true", // По умолчанию скрытые не показываются
|
||||
}
|
||||
|
||||
localFilter := localdb.ComponentFilter{
|
||||
Category: filter.Category,
|
||||
Search: filter.Search,
|
||||
HasPrice: filter.HasPrice,
|
||||
}
|
||||
offset := (page - 1) * perPage
|
||||
localComps, total, err := h.localDB.ListComponents(localFilter, offset, perPage)
|
||||
result, err := h.componentService.List(filter, page, perPage)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
components := make([]services.ComponentView, len(localComps))
|
||||
for i, lc := range localComps {
|
||||
components[i] = services.ComponentView{
|
||||
LotName: lc.LotName,
|
||||
Description: lc.LotDescription,
|
||||
Category: lc.Category,
|
||||
CategoryName: lc.Category,
|
||||
Model: lc.Model,
|
||||
// If offline mode (empty result), fallback to local components
|
||||
isOffline := false
|
||||
if v, ok := c.Get("is_offline"); ok {
|
||||
if b, ok := v.(bool); ok {
|
||||
isOffline = b
|
||||
}
|
||||
}
|
||||
if isOffline && result.Total == 0 && h.localDB != nil {
|
||||
localFilter := localdb.ComponentFilter{
|
||||
Category: filter.Category,
|
||||
Search: filter.Search,
|
||||
HasPrice: filter.HasPrice,
|
||||
}
|
||||
|
||||
offset := (page - 1) * perPage
|
||||
localComps, total, err := h.localDB.ListComponents(localFilter, offset, perPage)
|
||||
if err == nil && len(localComps) > 0 {
|
||||
// Convert local components to ComponentView format
|
||||
components := make([]services.ComponentView, len(localComps))
|
||||
for i, lc := range localComps {
|
||||
components[i] = services.ComponentView{
|
||||
LotName: lc.LotName,
|
||||
Description: lc.LotDescription,
|
||||
Category: lc.Category,
|
||||
CategoryName: lc.Category, // No translation in local mode
|
||||
Model: lc.Model,
|
||||
CurrentPrice: lc.CurrentPrice,
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, &services.ComponentListResult{
|
||||
Components: components,
|
||||
Total: total,
|
||||
Page: page,
|
||||
PerPage: perPage,
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, &services.ComponentListResult{
|
||||
Components: components,
|
||||
Total: total,
|
||||
Page: page,
|
||||
PerPage: perPage,
|
||||
})
|
||||
c.JSON(http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *ComponentHandler) Get(c *gin.Context) {
|
||||
lotName := c.Param("lot_name")
|
||||
component, err := h.localDB.GetLocalComponent(lotName)
|
||||
|
||||
component, err := h.componentService.GetByLotName(lotName)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, services.ComponentView{
|
||||
LotName: component.LotName,
|
||||
Description: component.LotDescription,
|
||||
Category: component.Category,
|
||||
CategoryName: component.Category,
|
||||
Model: component.Model,
|
||||
})
|
||||
c.JSON(http.StatusOK, component)
|
||||
}
|
||||
|
||||
func (h *ComponentHandler) GetCategories(c *gin.Context) {
|
||||
codes, err := h.localDB.GetLocalComponentCategories()
|
||||
if err == nil && len(codes) > 0 {
|
||||
categories := make([]models.Category, 0, len(codes))
|
||||
for _, code := range codes {
|
||||
trimmed := strings.TrimSpace(code)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
categories = append(categories, models.Category{Code: trimmed, Name: trimmed})
|
||||
}
|
||||
c.JSON(http.StatusOK, categories)
|
||||
categories, err := h.componentService.GetCategories()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, models.DefaultCategories)
|
||||
c.JSON(http.StatusOK, categories)
|
||||
}
|
||||
|
||||
239
internal/handlers/configuration.go
Normal file
239
internal/handlers/configuration.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/middleware"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ConfigurationHandler struct {
|
||||
configService *services.ConfigurationService
|
||||
exportService *services.ExportService
|
||||
}
|
||||
|
||||
func NewConfigurationHandler(
|
||||
configService *services.ConfigurationService,
|
||||
exportService *services.ExportService,
|
||||
) *ConfigurationHandler {
|
||||
return &ConfigurationHandler{
|
||||
configService: configService,
|
||||
exportService: exportService,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) List(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||
|
||||
configs, total, err := h.configService.ListByUser(username, page, perPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"configurations": configs,
|
||||
"total": total,
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) Create(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
|
||||
var req services.CreateConfigRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
config, err := h.configService.Create(username, &req)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, config)
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) Get(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
config, err := h.configService.GetByUUID(uuid, username)
|
||||
if err != nil {
|
||||
status := http.StatusNotFound
|
||||
if err == services.ErrConfigForbidden {
|
||||
status = http.StatusForbidden
|
||||
}
|
||||
c.JSON(status, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, config)
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) Update(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
var req services.CreateConfigRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
config, err := h.configService.Update(uuid, username, &req)
|
||||
if err != nil {
|
||||
status := http.StatusInternalServerError
|
||||
if err == services.ErrConfigNotFound {
|
||||
status = http.StatusNotFound
|
||||
} else if err == services.ErrConfigForbidden {
|
||||
status = http.StatusForbidden
|
||||
}
|
||||
c.JSON(status, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, config)
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) Delete(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
err := h.configService.Delete(uuid, username)
|
||||
if err != nil {
|
||||
status := http.StatusInternalServerError
|
||||
if err == services.ErrConfigNotFound {
|
||||
status = http.StatusNotFound
|
||||
} else if err == services.ErrConfigForbidden {
|
||||
status = http.StatusForbidden
|
||||
}
|
||||
c.JSON(status, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "deleted"})
|
||||
}
|
||||
|
||||
type RenameConfigRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) Rename(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
var req RenameConfigRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
config, err := h.configService.Rename(uuid, username, req.Name)
|
||||
if err != nil {
|
||||
status := http.StatusInternalServerError
|
||||
if err == services.ErrConfigNotFound {
|
||||
status = http.StatusNotFound
|
||||
} else if err == services.ErrConfigForbidden {
|
||||
status = http.StatusForbidden
|
||||
}
|
||||
c.JSON(status, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, config)
|
||||
}
|
||||
|
||||
type CloneConfigRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) Clone(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
var req CloneConfigRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
config, err := h.configService.Clone(uuid, username, req.Name)
|
||||
if err != nil {
|
||||
status := http.StatusInternalServerError
|
||||
if err == services.ErrConfigNotFound {
|
||||
status = http.StatusNotFound
|
||||
} else if err == services.ErrConfigForbidden {
|
||||
status = http.StatusForbidden
|
||||
}
|
||||
c.JSON(status, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, config)
|
||||
}
|
||||
|
||||
func (h *ConfigurationHandler) RefreshPrices(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
config, err := h.configService.RefreshPrices(uuid, username)
|
||||
if err != nil {
|
||||
status := http.StatusInternalServerError
|
||||
if err == services.ErrConfigNotFound {
|
||||
status = http.StatusNotFound
|
||||
} else if err == services.ErrConfigForbidden {
|
||||
status = http.StatusForbidden
|
||||
}
|
||||
c.JSON(status, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, config)
|
||||
}
|
||||
|
||||
// func (h *ConfigurationHandler) ExportJSON(c *gin.Context) {
|
||||
// userID := middleware.GetUserID(c)
|
||||
// uuid := c.Param("uuid")
|
||||
//
|
||||
// config, err := h.configService.GetByUUID(uuid, userID)
|
||||
// if err != nil {
|
||||
// c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// data, err := h.configService.ExportJSON(uuid, userID)
|
||||
// if err != nil {
|
||||
// c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// filename := fmt.Sprintf("%s %s SPEC.json", config.CreatedAt.Format("2006-01-02"), config.Name)
|
||||
// c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
// c.Data(http.StatusOK, "application/json", data)
|
||||
// }
|
||||
|
||||
// func (h *ConfigurationHandler) ImportJSON(c *gin.Context) {
|
||||
// userID := middleware.GetUserID(c)
|
||||
//
|
||||
// data, err := io.ReadAll(c.Request.Body)
|
||||
// if err != nil {
|
||||
// c.JSON(http.StatusBadRequest, gin.H{"error": "failed to read body"})
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// config, err := h.configService.ImportJSON(userID, data)
|
||||
// if err != nil {
|
||||
// c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// c.JSON(http.StatusCreated, config)
|
||||
// }
|
||||
@@ -3,43 +3,34 @@ package handlers
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/middleware"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ExportHandler struct {
|
||||
exportService *services.ExportService
|
||||
configService services.ConfigurationGetter
|
||||
projectService *services.ProjectService
|
||||
dbUsername string
|
||||
exportService *services.ExportService
|
||||
configService services.ConfigurationGetter
|
||||
componentService *services.ComponentService
|
||||
}
|
||||
|
||||
func NewExportHandler(
|
||||
exportService *services.ExportService,
|
||||
configService services.ConfigurationGetter,
|
||||
projectService *services.ProjectService,
|
||||
dbUsername string,
|
||||
componentService *services.ComponentService,
|
||||
) *ExportHandler {
|
||||
return &ExportHandler{
|
||||
exportService: exportService,
|
||||
configService: configService,
|
||||
projectService: projectService,
|
||||
dbUsername: dbUsername,
|
||||
exportService: exportService,
|
||||
configService: configService,
|
||||
componentService: componentService,
|
||||
}
|
||||
}
|
||||
|
||||
type ExportRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
ProjectName string `json:"project_name"`
|
||||
ProjectUUID string `json:"project_uuid"`
|
||||
Article string `json:"article"`
|
||||
ServerCount int `json:"server_count"`
|
||||
PricelistID *uint `json:"pricelist_id"`
|
||||
Items []struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
Items []struct {
|
||||
LotName string `json:"lot_name" binding:"required"`
|
||||
Quantity int `json:"quantity" binding:"required,min=1"`
|
||||
UnitPrice float64 `json:"unit_price"`
|
||||
@@ -47,237 +38,84 @@ type ExportRequest struct {
|
||||
Notes string `json:"notes"`
|
||||
}
|
||||
|
||||
type ProjectExportOptionsRequest struct {
|
||||
IncludeLOT bool `json:"include_lot"`
|
||||
IncludeBOM bool `json:"include_bom"`
|
||||
IncludeEstimate bool `json:"include_estimate"`
|
||||
IncludeStock bool `json:"include_stock"`
|
||||
IncludeCompetitor bool `json:"include_competitor"`
|
||||
Basis string `json:"basis"` // "fob" or "ddp"
|
||||
SaleMarkup float64 `json:"sale_markup"` // DDP multiplier; 0 defaults to 1.3
|
||||
}
|
||||
|
||||
func (h *ExportHandler) ExportCSV(c *gin.Context) {
|
||||
var req ExportRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
data := h.buildExportData(&req)
|
||||
|
||||
// Validate before streaming (can return JSON error)
|
||||
if len(data.Configs) == 0 || len(data.Configs[0].Items) == 0 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no items to export"})
|
||||
csvData, err := h.exportService.ToCSV(data)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Get project code for filename
|
||||
projectCode := req.ProjectName // legacy field: may contain code from frontend
|
||||
if projectCode == "" && req.ProjectUUID != "" {
|
||||
if project, err := h.projectService.GetByUUID(req.ProjectUUID, h.dbUsername); err == nil && project != nil {
|
||||
projectCode = project.Code
|
||||
}
|
||||
}
|
||||
if projectCode == "" {
|
||||
projectCode = req.Name
|
||||
}
|
||||
|
||||
// Set headers before streaming
|
||||
exportDate := data.CreatedAt
|
||||
articleSegment := sanitizeFilenameSegment(req.Article)
|
||||
if articleSegment == "" {
|
||||
articleSegment = "BOM"
|
||||
}
|
||||
filename := fmt.Sprintf("%s (%s) %s %s.csv", exportDate.Format("2006-01-02"), projectCode, req.Name, articleSegment)
|
||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
||||
filename := fmt.Sprintf("%s %s SPEC.csv", time.Now().Format("2006-01-02"), req.Name)
|
||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
|
||||
// Stream CSV (cannot return JSON after this point)
|
||||
if err := h.exportService.ToCSV(c.Writer, data); err != nil {
|
||||
c.Error(err) // Log only
|
||||
return
|
||||
}
|
||||
c.Data(http.StatusOK, "text/csv; charset=utf-8", csvData)
|
||||
}
|
||||
|
||||
// buildExportData converts an ExportRequest into a ProjectExportData using a temporary Configuration model
|
||||
// so that ExportService.ConfigToExportData can resolve categories via localDB.
|
||||
func (h *ExportHandler) buildExportData(req *ExportRequest) *services.ProjectExportData {
|
||||
configItems := make(models.ConfigItems, len(req.Items))
|
||||
func (h *ExportHandler) buildExportData(req *ExportRequest) *services.ExportData {
|
||||
items := make([]services.ExportItem, len(req.Items))
|
||||
var total float64
|
||||
|
||||
for i, item := range req.Items {
|
||||
configItems[i] = models.ConfigItem{
|
||||
LotName: item.LotName,
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
itemTotal := item.UnitPrice * float64(item.Quantity)
|
||||
|
||||
// Получаем информацию о компоненте для заполнения категории и описания
|
||||
componentView, err := h.componentService.GetByLotName(item.LotName)
|
||||
if err != nil {
|
||||
// Если не удалось получить информацию о компоненте, используем только основные данные
|
||||
items[i] = services.ExportItem{
|
||||
LotName: item.LotName,
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: itemTotal,
|
||||
}
|
||||
} else {
|
||||
items[i] = services.ExportItem{
|
||||
LotName: item.LotName,
|
||||
Description: componentView.Description,
|
||||
Category: componentView.Category,
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: itemTotal,
|
||||
}
|
||||
}
|
||||
total += itemTotal
|
||||
}
|
||||
|
||||
serverCount := req.ServerCount
|
||||
if serverCount < 1 {
|
||||
serverCount = 1
|
||||
return &services.ExportData{
|
||||
Name: req.Name,
|
||||
Items: items,
|
||||
Total: total,
|
||||
Notes: req.Notes,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
cfg := &models.Configuration{
|
||||
Article: req.Article,
|
||||
ServerCount: serverCount,
|
||||
PricelistID: req.PricelistID,
|
||||
Items: configItems,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
return h.exportService.ConfigToExportData(cfg)
|
||||
}
|
||||
|
||||
func sanitizeFilenameSegment(value string) string {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return ""
|
||||
}
|
||||
replacer := strings.NewReplacer(
|
||||
"/", "_",
|
||||
"\\", "_",
|
||||
":", "_",
|
||||
"*", "_",
|
||||
"?", "_",
|
||||
"\"", "_",
|
||||
"<", "_",
|
||||
">", "_",
|
||||
"|", "_",
|
||||
)
|
||||
return strings.TrimSpace(replacer.Replace(value))
|
||||
}
|
||||
|
||||
func (h *ExportHandler) ExportConfigCSV(c *gin.Context) {
|
||||
username := middleware.GetUsername(c)
|
||||
uuid := c.Param("uuid")
|
||||
|
||||
// Get config before streaming (can return JSON error)
|
||||
config, err := h.configService.GetByUUID(uuid, h.dbUsername)
|
||||
config, err := h.configService.GetByUUID(uuid, username)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusNotFound, "resource not found", err)
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
data := h.exportService.ConfigToExportData(config)
|
||||
data := h.exportService.ConfigToExportData(config, h.componentService)
|
||||
|
||||
// Validate before streaming (can return JSON error)
|
||||
if len(data.Configs) == 0 || len(data.Configs[0].Items) == 0 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no items to export"})
|
||||
csvData, err := h.exportService.ToCSV(data)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Get project code for filename
|
||||
projectCode := config.Name // fallback: use config name if no project
|
||||
if config.ProjectUUID != nil && *config.ProjectUUID != "" {
|
||||
if project, err := h.projectService.GetByUUID(*config.ProjectUUID, h.dbUsername); err == nil && project != nil {
|
||||
projectCode = project.Code
|
||||
}
|
||||
}
|
||||
|
||||
// Set headers before streaming
|
||||
// Use price update time if available, otherwise creation time
|
||||
exportDate := config.CreatedAt
|
||||
if config.PriceUpdatedAt != nil {
|
||||
exportDate = *config.PriceUpdatedAt
|
||||
}
|
||||
filename := fmt.Sprintf("%s (%s) %s BOM.csv", exportDate.Format("2006-01-02"), projectCode, config.Name)
|
||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
||||
filename := fmt.Sprintf("%s %s SPEC.csv", config.CreatedAt.Format("2006-01-02"), config.Name)
|
||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
|
||||
// Stream CSV (cannot return JSON after this point)
|
||||
if err := h.exportService.ToCSV(c.Writer, data); err != nil {
|
||||
c.Error(err) // Log only
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ExportProjectCSV exports all active configurations of a project as a single CSV file.
|
||||
func (h *ExportHandler) ExportProjectCSV(c *gin.Context) {
|
||||
projectUUID := c.Param("uuid")
|
||||
|
||||
project, err := h.projectService.GetByUUID(projectUUID, h.dbUsername)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusNotFound, "resource not found", err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.projectService.ListConfigurations(projectUUID, h.dbUsername, "active")
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(result.Configs) == 0 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no configurations to export"})
|
||||
return
|
||||
}
|
||||
|
||||
data := h.exportService.ProjectToExportData(result.Configs)
|
||||
|
||||
// Filename: YYYY-MM-DD (ProjectCode) BOM.csv
|
||||
filename := fmt.Sprintf("%s (%s) BOM.csv", time.Now().Format("2006-01-02"), project.Code)
|
||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
|
||||
if err := h.exportService.ToCSV(c.Writer, data); err != nil {
|
||||
c.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *ExportHandler) ExportProjectPricingCSV(c *gin.Context) {
|
||||
projectUUID := c.Param("uuid")
|
||||
|
||||
var req ProjectExportOptionsRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
project, err := h.projectService.GetByUUID(projectUUID, h.dbUsername)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusNotFound, "resource not found", err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.projectService.ListConfigurations(projectUUID, h.dbUsername, "active")
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
if len(result.Configs) == 0 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no configurations to export"})
|
||||
return
|
||||
}
|
||||
|
||||
opts := services.ProjectPricingExportOptions{
|
||||
IncludeLOT: req.IncludeLOT,
|
||||
IncludeBOM: req.IncludeBOM,
|
||||
IncludeEstimate: req.IncludeEstimate,
|
||||
IncludeStock: req.IncludeStock,
|
||||
IncludeCompetitor: req.IncludeCompetitor,
|
||||
Basis: req.Basis,
|
||||
SaleMarkup: req.SaleMarkup,
|
||||
}
|
||||
|
||||
data, err := h.exportService.ProjectToPricingExportData(result.Configs, opts)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
basisLabel := "FOB"
|
||||
if strings.EqualFold(strings.TrimSpace(req.Basis), "ddp") {
|
||||
basisLabel = "DDP"
|
||||
}
|
||||
variantLabel := strings.TrimSpace(project.Variant)
|
||||
if variantLabel == "" {
|
||||
variantLabel = "main"
|
||||
}
|
||||
filename := fmt.Sprintf("%s (%s) %s %s.csv", time.Now().Format("2006-01-02"), project.Code, basisLabel, variantLabel)
|
||||
c.Header("Content-Type", "text/csv; charset=utf-8")
|
||||
c.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
|
||||
if err := h.exportService.ToPricingCSV(c.Writer, data, opts); err != nil {
|
||||
c.Error(err)
|
||||
return
|
||||
}
|
||||
c.Data(http.StatusOK, "text/csv; charset=utf-8", csvData)
|
||||
}
|
||||
|
||||
@@ -1,303 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// Mock services for testing
|
||||
type mockConfigService struct {
|
||||
config *models.Configuration
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockConfigService) GetByUUID(uuid string, ownerUsername string) (*models.Configuration, error) {
|
||||
return m.config, m.err
|
||||
}
|
||||
|
||||
func TestExportCSV_Success(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Create handler with mocks
|
||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
||||
handler := NewExportHandler(
|
||||
exportSvc,
|
||||
&mockConfigService{},
|
||||
nil,
|
||||
"testuser",
|
||||
)
|
||||
|
||||
// Create JSON request body
|
||||
jsonBody := `{
|
||||
"name": "Test Export",
|
||||
"items": [
|
||||
{
|
||||
"lot_name": "LOT-001",
|
||||
"quantity": 2,
|
||||
"unit_price": 100.50
|
||||
}
|
||||
],
|
||||
"notes": "Test notes"
|
||||
}`
|
||||
|
||||
// Create HTTP request
|
||||
req, _ := http.NewRequest("POST", "/api/export/csv", bytes.NewBufferString(jsonBody))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Create response recorder
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
// Create Gin context
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
|
||||
// Call handler
|
||||
handler.ExportCSV(c)
|
||||
|
||||
// Check status code
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Check Content-Type header
|
||||
contentType := w.Header().Get("Content-Type")
|
||||
if contentType != "text/csv; charset=utf-8" {
|
||||
t.Errorf("Expected Content-Type 'text/csv; charset=utf-8', got %q", contentType)
|
||||
}
|
||||
|
||||
// Check for BOM
|
||||
responseBody := w.Body.Bytes()
|
||||
if len(responseBody) < 3 {
|
||||
t.Fatalf("Response too short to contain BOM")
|
||||
}
|
||||
|
||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
||||
actualBOM := responseBody[:3]
|
||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
||||
t.Errorf("UTF-8 BOM mismatch. Expected %v, got %v", expectedBOM, actualBOM)
|
||||
}
|
||||
|
||||
// Check semicolon delimiter in CSV
|
||||
reader := csv.NewReader(bytes.NewReader(responseBody[3:]))
|
||||
reader.Comma = ';'
|
||||
|
||||
header, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to parse CSV header: %v", err)
|
||||
}
|
||||
|
||||
if len(header) != 8 {
|
||||
t.Errorf("Expected 8 columns, got %d", len(header))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportCSV_InvalidRequest(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
||||
handler := NewExportHandler(
|
||||
exportSvc,
|
||||
&mockConfigService{},
|
||||
nil,
|
||||
"testuser",
|
||||
)
|
||||
|
||||
// Create invalid request (missing required field)
|
||||
req, _ := http.NewRequest("POST", "/api/export/csv", bytes.NewBufferString(`{"name": "Test"}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
|
||||
handler.ExportCSV(c)
|
||||
|
||||
// Should return 400 Bad Request
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected status 400, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Should return JSON error
|
||||
var errResp map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &errResp)
|
||||
if _, hasError := errResp["error"]; !hasError {
|
||||
t.Errorf("Expected error in JSON response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportCSV_EmptyItems(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
||||
handler := NewExportHandler(
|
||||
exportSvc,
|
||||
&mockConfigService{},
|
||||
nil,
|
||||
"testuser",
|
||||
)
|
||||
|
||||
// Create request with empty items array - should fail binding validation
|
||||
req, _ := http.NewRequest("POST", "/api/export/csv", bytes.NewBufferString(`{"name":"Empty Export","items":[],"notes":""}`))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
|
||||
handler.ExportCSV(c)
|
||||
|
||||
// Should return 400 Bad Request (validation error from gin binding)
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Logf("Status code: %d (expected 400 for empty items)", w.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportConfigCSV_Success(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Mock configuration
|
||||
mockConfig := &models.Configuration{
|
||||
UUID: "test-uuid",
|
||||
Name: "Test Config",
|
||||
OwnerUsername: "testuser",
|
||||
Items: models.ConfigItems{
|
||||
{
|
||||
LotName: "LOT-001",
|
||||
Quantity: 1,
|
||||
UnitPrice: 100.0,
|
||||
},
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
||||
handler := NewExportHandler(
|
||||
exportSvc,
|
||||
&mockConfigService{config: mockConfig},
|
||||
nil,
|
||||
"testuser",
|
||||
)
|
||||
|
||||
// Create HTTP request
|
||||
req, _ := http.NewRequest("GET", "/api/configs/test-uuid/export", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
c.Params = gin.Params{
|
||||
{Key: "uuid", Value: "test-uuid"},
|
||||
}
|
||||
|
||||
handler.ExportConfigCSV(c)
|
||||
|
||||
// Check status code
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("Expected status 200, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Check Content-Type header
|
||||
contentType := w.Header().Get("Content-Type")
|
||||
if contentType != "text/csv; charset=utf-8" {
|
||||
t.Errorf("Expected Content-Type 'text/csv; charset=utf-8', got %q", contentType)
|
||||
}
|
||||
|
||||
// Check for BOM
|
||||
responseBody := w.Body.Bytes()
|
||||
if len(responseBody) < 3 {
|
||||
t.Fatalf("Response too short to contain BOM")
|
||||
}
|
||||
|
||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
||||
actualBOM := responseBody[:3]
|
||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
||||
t.Errorf("UTF-8 BOM mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportConfigCSV_NotFound(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
||||
handler := NewExportHandler(
|
||||
exportSvc,
|
||||
&mockConfigService{err: errors.New("config not found")},
|
||||
nil,
|
||||
"testuser",
|
||||
)
|
||||
|
||||
req, _ := http.NewRequest("GET", "/api/configs/nonexistent-uuid/export", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
c.Params = gin.Params{
|
||||
{Key: "uuid", Value: "nonexistent-uuid"},
|
||||
}
|
||||
handler.ExportConfigCSV(c)
|
||||
|
||||
// Should return 404 Not Found
|
||||
if w.Code != http.StatusNotFound {
|
||||
t.Errorf("Expected status 404, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Should return JSON error
|
||||
var errResp map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &errResp)
|
||||
if _, hasError := errResp["error"]; !hasError {
|
||||
t.Errorf("Expected error in JSON response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportConfigCSV_EmptyItems(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
// Mock configuration with empty items
|
||||
mockConfig := &models.Configuration{
|
||||
UUID: "test-uuid",
|
||||
Name: "Empty Config",
|
||||
OwnerUsername: "testuser",
|
||||
Items: models.ConfigItems{},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
exportSvc := services.NewExportService(config.ExportConfig{}, nil, nil)
|
||||
handler := NewExportHandler(
|
||||
exportSvc,
|
||||
&mockConfigService{config: mockConfig},
|
||||
nil,
|
||||
"testuser",
|
||||
)
|
||||
|
||||
req, _ := http.NewRequest("GET", "/api/configs/test-uuid/export", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
c.Params = gin.Params{
|
||||
{Key: "uuid", Value: "test-uuid"},
|
||||
}
|
||||
handler.ExportConfigCSV(c)
|
||||
|
||||
// Should return 400 Bad Request
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected status 400, got %d", w.Code)
|
||||
}
|
||||
|
||||
// Should return JSON error
|
||||
var errResp map[string]interface{}
|
||||
json.Unmarshal(w.Body.Bytes(), &errResp)
|
||||
if _, hasError := errResp["error"]; !hasError {
|
||||
t.Errorf("Expected error in JSON response")
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// PartnumberBooksHandler provides read-only access to local partnumber book snapshots.
|
||||
type PartnumberBooksHandler struct {
|
||||
localDB *localdb.LocalDB
|
||||
}
|
||||
|
||||
func NewPartnumberBooksHandler(localDB *localdb.LocalDB) *PartnumberBooksHandler {
|
||||
return &PartnumberBooksHandler{localDB: localDB}
|
||||
}
|
||||
|
||||
// List returns all local partnumber book snapshots.
|
||||
// GET /api/partnumber-books
|
||||
func (h *PartnumberBooksHandler) List(c *gin.Context) {
|
||||
bookRepo := repository.NewPartnumberBookRepository(h.localDB.DB())
|
||||
books, err := bookRepo.ListBooks()
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
type bookSummary struct {
|
||||
ID uint `json:"id"`
|
||||
ServerID int `json:"server_id"`
|
||||
Version string `json:"version"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
IsActive bool `json:"is_active"`
|
||||
ItemCount int64 `json:"item_count"`
|
||||
}
|
||||
|
||||
summaries := make([]bookSummary, 0, len(books))
|
||||
for _, b := range books {
|
||||
summaries = append(summaries, bookSummary{
|
||||
ID: b.ID,
|
||||
ServerID: b.ServerID,
|
||||
Version: b.Version,
|
||||
CreatedAt: b.CreatedAt.Format("2006-01-02"),
|
||||
IsActive: b.IsActive,
|
||||
ItemCount: bookRepo.CountBookItems(b.ID),
|
||||
})
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"books": summaries,
|
||||
"total": len(summaries),
|
||||
})
|
||||
}
|
||||
|
||||
// GetItems returns items for a partnumber book by server ID.
|
||||
// GET /api/partnumber-books/:id
|
||||
func (h *PartnumberBooksHandler) GetItems(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
id, err := strconv.ParseUint(idStr, 10, 64)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid book ID"})
|
||||
return
|
||||
}
|
||||
|
||||
bookRepo := repository.NewPartnumberBookRepository(h.localDB.DB())
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "100"))
|
||||
search := strings.TrimSpace(c.Query("search"))
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 || perPage > 500 {
|
||||
perPage = 100
|
||||
}
|
||||
|
||||
// Find local book by server_id
|
||||
var book localdb.LocalPartnumberBook
|
||||
if err := h.localDB.DB().Where("server_id = ?", id).First(&book).Error; err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "partnumber book not found"})
|
||||
return
|
||||
}
|
||||
|
||||
items, total, err := bookRepo.GetBookItemsPage(book.ID, search, page, perPage)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"book_id": book.ServerID,
|
||||
"version": book.Version,
|
||||
"is_active": book.IsActive,
|
||||
"partnumbers": book.PartnumbersJSON,
|
||||
"items": items,
|
||||
"total": total,
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
"search": search,
|
||||
"book_total": bookRepo.CountBookItems(book.ID),
|
||||
"lot_count": bookRepo.CountDistinctLots(book.ID),
|
||||
})
|
||||
}
|
||||
@@ -1,120 +1,85 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services/pricelist"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type PricelistHandler struct {
|
||||
service *pricelist.Service
|
||||
localDB *localdb.LocalDB
|
||||
}
|
||||
|
||||
func NewPricelistHandler(localDB *localdb.LocalDB) *PricelistHandler {
|
||||
return &PricelistHandler{localDB: localDB}
|
||||
func NewPricelistHandler(service *pricelist.Service, localDB *localdb.LocalDB) *PricelistHandler {
|
||||
return &PricelistHandler{service: service, localDB: localDB}
|
||||
}
|
||||
|
||||
// List returns all pricelists with pagination.
|
||||
// List returns all pricelists with pagination
|
||||
func (h *PricelistHandler) List(c *gin.Context) {
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 {
|
||||
perPage = 20
|
||||
}
|
||||
source := c.Query("source")
|
||||
activeOnly := c.DefaultQuery("active_only", "false") == "true"
|
||||
|
||||
localPLs, err := h.localDB.GetLocalPricelists()
|
||||
var (
|
||||
pricelists any
|
||||
total int64
|
||||
err error
|
||||
)
|
||||
|
||||
if activeOnly {
|
||||
pricelists, total, err = h.service.ListActive(page, perPage)
|
||||
} else {
|
||||
pricelists, total, err = h.service.List(page, perPage)
|
||||
}
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
if source != "" {
|
||||
filtered := localPLs[:0]
|
||||
for _, lpl := range localPLs {
|
||||
if strings.EqualFold(lpl.Source, source) {
|
||||
filtered = append(filtered, lpl)
|
||||
|
||||
// If offline (empty list), fallback to local pricelists
|
||||
if total == 0 && h.localDB != nil {
|
||||
localPLs, err := h.localDB.GetLocalPricelists()
|
||||
if err == nil && len(localPLs) > 0 {
|
||||
// Convert to PricelistSummary format
|
||||
summaries := make([]map[string]interface{}, len(localPLs))
|
||||
for i, lpl := range localPLs {
|
||||
summaries[i] = map[string]interface{}{
|
||||
"id": lpl.ServerID,
|
||||
"version": lpl.Version,
|
||||
"created_by": "sync",
|
||||
"item_count": 0, // Not tracked
|
||||
"usage_count": 0, // Not tracked in local
|
||||
"is_active": true,
|
||||
"created_at": lpl.CreatedAt,
|
||||
"synced_from": "local",
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"pricelists": summaries,
|
||||
"total": len(summaries),
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
localPLs = filtered
|
||||
}
|
||||
type pricelistWithCount struct {
|
||||
pricelist localdb.LocalPricelist
|
||||
itemCount int64
|
||||
usageCount int
|
||||
}
|
||||
withCounts := make([]pricelistWithCount, 0, len(localPLs))
|
||||
for _, lpl := range localPLs {
|
||||
itemCount := h.localDB.CountLocalPricelistItems(lpl.ID)
|
||||
if activeOnly && itemCount == 0 {
|
||||
continue
|
||||
}
|
||||
usageCount := 0
|
||||
if lpl.IsUsed {
|
||||
usageCount = 1
|
||||
}
|
||||
withCounts = append(withCounts, pricelistWithCount{
|
||||
pricelist: lpl,
|
||||
itemCount: itemCount,
|
||||
usageCount: usageCount,
|
||||
})
|
||||
}
|
||||
localPLs = localPLs[:0]
|
||||
for _, row := range withCounts {
|
||||
localPLs = append(localPLs, row.pricelist)
|
||||
}
|
||||
sort.SliceStable(localPLs, func(i, j int) bool { return localPLs[i].CreatedAt.After(localPLs[j].CreatedAt) })
|
||||
total := len(localPLs)
|
||||
start := (page - 1) * perPage
|
||||
if start > total {
|
||||
start = total
|
||||
}
|
||||
end := start + perPage
|
||||
if end > total {
|
||||
end = total
|
||||
}
|
||||
pageSlice := localPLs[start:end]
|
||||
summaries := make([]map[string]interface{}, 0, len(pageSlice))
|
||||
for _, lpl := range pageSlice {
|
||||
itemCount := int64(0)
|
||||
usageCount := 0
|
||||
for _, row := range withCounts {
|
||||
if row.pricelist.ID == lpl.ID {
|
||||
itemCount = row.itemCount
|
||||
usageCount = row.usageCount
|
||||
break
|
||||
}
|
||||
}
|
||||
summaries = append(summaries, map[string]interface{}{
|
||||
"id": lpl.ServerID,
|
||||
"source": lpl.Source,
|
||||
"version": lpl.Version,
|
||||
"created_by": "sync",
|
||||
"item_count": itemCount,
|
||||
"usage_count": usageCount,
|
||||
"is_active": true,
|
||||
"created_at": lpl.CreatedAt,
|
||||
"synced_from": "local",
|
||||
})
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"pricelists": summaries,
|
||||
"pricelists": pricelists,
|
||||
"total": total,
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
})
|
||||
}
|
||||
|
||||
// Get returns a single pricelist by ID.
|
||||
// Get returns a single pricelist by ID
|
||||
func (h *PricelistHandler) Get(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||
@@ -123,25 +88,170 @@ func (h *PricelistHandler) Get(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
localPL, err := h.localDB.GetLocalPricelistByServerID(uint(id))
|
||||
pl, err := h.service.GetByID(uint(id))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"id": localPL.ServerID,
|
||||
"source": localPL.Source,
|
||||
"version": localPL.Version,
|
||||
"created_by": "sync",
|
||||
"item_count": h.localDB.CountLocalPricelistItems(localPL.ID),
|
||||
"is_active": true,
|
||||
"created_at": localPL.CreatedAt,
|
||||
"synced_from": "local",
|
||||
c.JSON(http.StatusOK, pl)
|
||||
}
|
||||
|
||||
// Create creates a new pricelist from current prices
|
||||
func (h *PricelistHandler) Create(c *gin.Context) {
|
||||
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||
if !canWrite {
|
||||
c.JSON(http.StatusForbidden, gin.H{
|
||||
"error": "pricelist write is not allowed",
|
||||
"debug": debugInfo,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Get the database username as the creator
|
||||
createdBy := h.localDB.GetDBUser()
|
||||
if createdBy == "" {
|
||||
createdBy = "unknown"
|
||||
}
|
||||
|
||||
pl, err := h.service.CreateFromCurrentPrices(createdBy)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, pl)
|
||||
}
|
||||
|
||||
// CreateWithProgress creates a pricelist and streams progress updates over SSE.
|
||||
func (h *PricelistHandler) CreateWithProgress(c *gin.Context) {
|
||||
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||
if !canWrite {
|
||||
c.JSON(http.StatusForbidden, gin.H{
|
||||
"error": "pricelist write is not allowed",
|
||||
"debug": debugInfo,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
createdBy := h.localDB.GetDBUser()
|
||||
if createdBy == "" {
|
||||
createdBy = "unknown"
|
||||
}
|
||||
|
||||
c.Header("Content-Type", "text/event-stream")
|
||||
c.Header("Cache-Control", "no-cache")
|
||||
c.Header("Connection", "keep-alive")
|
||||
c.Header("X-Accel-Buffering", "no")
|
||||
|
||||
flusher, ok := c.Writer.(http.Flusher)
|
||||
if !ok {
|
||||
pl, err := h.service.CreateFromCurrentPrices(createdBy)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusCreated, pl)
|
||||
return
|
||||
}
|
||||
|
||||
sendProgress := func(payload gin.H) {
|
||||
c.SSEvent("progress", payload)
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
sendProgress(gin.H{"current": 0, "total": 4, "status": "starting", "message": "Запуск..."})
|
||||
pl, err := h.service.CreateFromCurrentPricesWithProgress(createdBy, func(p pricelist.CreateProgress) {
|
||||
sendProgress(gin.H{
|
||||
"current": p.Current,
|
||||
"total": p.Total,
|
||||
"status": p.Status,
|
||||
"message": p.Message,
|
||||
"updated": p.Updated,
|
||||
"errors": p.Errors,
|
||||
"lot_name": p.LotName,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
sendProgress(gin.H{
|
||||
"current": 0,
|
||||
"total": 4,
|
||||
"status": "error",
|
||||
"message": fmt.Sprintf("Ошибка: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
sendProgress(gin.H{
|
||||
"current": 4,
|
||||
"total": 4,
|
||||
"status": "completed",
|
||||
"message": "Готово",
|
||||
"pricelist": pl,
|
||||
})
|
||||
}
|
||||
|
||||
// GetItems returns items for a pricelist with pagination.
|
||||
// Delete deletes a pricelist by ID
|
||||
func (h *PricelistHandler) Delete(c *gin.Context) {
|
||||
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||
if !canWrite {
|
||||
c.JSON(http.StatusForbidden, gin.H{
|
||||
"error": "pricelist write is not allowed",
|
||||
"debug": debugInfo,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
idStr := c.Param("id")
|
||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pricelist ID"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.Delete(uint(id)); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "pricelist deleted"})
|
||||
}
|
||||
|
||||
// SetActive toggles active flag on a pricelist.
|
||||
func (h *PricelistHandler) SetActive(c *gin.Context) {
|
||||
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||
if !canWrite {
|
||||
c.JSON(http.StatusForbidden, gin.H{
|
||||
"error": "pricelist write is not allowed",
|
||||
"debug": debugInfo,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
idStr := c.Param("id")
|
||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pricelist ID"})
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
IsActive bool `json:"is_active"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.SetActive(uint(id), req.IsActive); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "updated", "is_active": req.IsActive})
|
||||
}
|
||||
|
||||
// GetItems returns items for a pricelist with pagination
|
||||
func (h *PricelistHandler) GetItems(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||
@@ -154,126 +264,57 @@ func (h *PricelistHandler) GetItems(c *gin.Context) {
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "50"))
|
||||
search := c.Query("search")
|
||||
|
||||
localPL, err := h.localDB.GetLocalPricelistByServerID(uint(id))
|
||||
items, total, err := h.service.GetItems(uint(id), page, perPage, search)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 {
|
||||
perPage = 50
|
||||
}
|
||||
var items []localdb.LocalPricelistItem
|
||||
dbq := h.localDB.DB().Model(&localdb.LocalPricelistItem{}).Where("pricelist_id = ?", localPL.ID)
|
||||
if strings.TrimSpace(search) != "" {
|
||||
dbq = dbq.Where("lot_name LIKE ?", "%"+strings.TrimSpace(search)+"%")
|
||||
}
|
||||
var total int64
|
||||
if err := dbq.Count(&total).Error; err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
offset := (page - 1) * perPage
|
||||
|
||||
if err := dbq.Order("lot_name").Offset(offset).Limit(perPage).Find(&items).Error; err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
lotNames := make([]string, len(items))
|
||||
for i, item := range items {
|
||||
lotNames[i] = item.LotName
|
||||
}
|
||||
type compRow struct {
|
||||
LotName string
|
||||
LotDescription string
|
||||
}
|
||||
var comps []compRow
|
||||
if len(lotNames) > 0 {
|
||||
h.localDB.DB().Table("local_components").
|
||||
Select("lot_name, lot_description").
|
||||
Where("lot_name IN ?", lotNames).
|
||||
Scan(&comps)
|
||||
}
|
||||
descMap := make(map[string]string, len(comps))
|
||||
for _, c := range comps {
|
||||
descMap[c.LotName] = c.LotDescription
|
||||
}
|
||||
|
||||
resultItems := make([]gin.H, 0, len(items))
|
||||
for _, item := range items {
|
||||
resultItems = append(resultItems, gin.H{
|
||||
"id": item.ID,
|
||||
"lot_name": item.LotName,
|
||||
"lot_description": descMap[item.LotName],
|
||||
"price": item.Price,
|
||||
"category": item.LotCategory,
|
||||
"available_qty": item.AvailableQty,
|
||||
"partnumbers": []string(item.Partnumbers),
|
||||
"partnumber_qtys": map[string]interface{}{},
|
||||
"competitor_names": []string{},
|
||||
"price_spread_pct": nil,
|
||||
})
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"source": localPL.Source,
|
||||
"items": resultItems,
|
||||
"items": items,
|
||||
"total": total,
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *PricelistHandler) GetLotNames(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
id, err := strconv.ParseUint(idStr, 10, 32)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pricelist ID"})
|
||||
return
|
||||
}
|
||||
|
||||
localPL, err := h.localDB.GetLocalPricelistByServerID(uint(id))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "pricelist not found"})
|
||||
return
|
||||
}
|
||||
items, err := h.localDB.GetLocalPricelistItems(localPL.ID)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
lotNames := make([]string, 0, len(items))
|
||||
for _, item := range items {
|
||||
lotNames = append(lotNames, item.LotName)
|
||||
}
|
||||
sort.Strings(lotNames)
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"lot_names": lotNames,
|
||||
"total": len(lotNames),
|
||||
})
|
||||
// CanWrite returns whether the current user can create pricelists
|
||||
func (h *PricelistHandler) CanWrite(c *gin.Context) {
|
||||
canWrite, debugInfo := h.service.CanWriteDebug()
|
||||
c.JSON(http.StatusOK, gin.H{"can_write": canWrite, "debug": debugInfo})
|
||||
}
|
||||
|
||||
// GetLatest returns the most recent active pricelist.
|
||||
// GetLatest returns the most recent active pricelist
|
||||
func (h *PricelistHandler) GetLatest(c *gin.Context) {
|
||||
source := c.DefaultQuery("source", string(models.PricelistSourceEstimate))
|
||||
source = string(models.NormalizePricelistSource(source))
|
||||
|
||||
localPL, err := h.localDB.GetLatestLocalPricelistBySource(source)
|
||||
// Try to get from server first
|
||||
pl, err := h.service.GetLatestActive()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "no pricelists available"})
|
||||
// If offline or no server pricelists, try to get from local cache
|
||||
if h.localDB == nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "no database available"})
|
||||
return
|
||||
}
|
||||
localPL, localErr := h.localDB.GetLatestLocalPricelist()
|
||||
if localErr != nil {
|
||||
// No local pricelists either
|
||||
c.JSON(http.StatusNotFound, gin.H{
|
||||
"error": "no pricelists available",
|
||||
"local_error": localErr.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
// Return local pricelist
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"id": localPL.ServerID,
|
||||
"version": localPL.Version,
|
||||
"created_by": "sync",
|
||||
"item_count": 0, // Not tracked in local pricelists
|
||||
"is_active": true,
|
||||
"created_at": localPL.CreatedAt,
|
||||
"synced_from": "local",
|
||||
})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"id": localPL.ServerID,
|
||||
"source": localPL.Source,
|
||||
"version": localPL.Version,
|
||||
"created_by": "sync",
|
||||
"item_count": h.localDB.CountLocalPricelistItems(localPL.ID),
|
||||
"is_active": true,
|
||||
"created_at": localPL.CreatedAt,
|
||||
"synced_from": "local",
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, pl)
|
||||
}
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestPricelistGetItems_ReturnsLotCategoryFromLocalPricelistItems(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("init local db: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
||||
ServerID: 1,
|
||||
Source: "estimate",
|
||||
Version: "S-2026-02-11-001",
|
||||
Name: "test",
|
||||
CreatedAt: time.Now(),
|
||||
SyncedAt: time.Now(),
|
||||
IsUsed: false,
|
||||
}); err != nil {
|
||||
t.Fatalf("save local pricelist: %v", err)
|
||||
}
|
||||
localPL, err := local.GetLocalPricelistByServerID(1)
|
||||
if err != nil {
|
||||
t.Fatalf("get local pricelist: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
||||
{
|
||||
PricelistID: localPL.ID,
|
||||
LotName: "NO_UNDERSCORE_NAME",
|
||||
LotCategory: "CPU",
|
||||
Price: 10,
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("save local pricelist items: %v", err)
|
||||
}
|
||||
|
||||
h := NewPricelistHandler(local)
|
||||
|
||||
req, _ := http.NewRequest("GET", "/api/pricelists/1/items?page=1&per_page=50", nil)
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
c.Params = gin.Params{{Key: "id", Value: "1"}}
|
||||
|
||||
h.GetItems(c)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Items []struct {
|
||||
LotName string `json:"lot_name"`
|
||||
Category string `json:"category"`
|
||||
UnitPrice any `json:"price"`
|
||||
} `json:"items"`
|
||||
}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("unmarshal response: %v", err)
|
||||
}
|
||||
if len(resp.Items) != 1 {
|
||||
t.Fatalf("expected 1 item, got %d", len(resp.Items))
|
||||
}
|
||||
if resp.Items[0].LotName != "NO_UNDERSCORE_NAME" {
|
||||
t.Fatalf("expected lot_name NO_UNDERSCORE_NAME, got %q", resp.Items[0].LotName)
|
||||
}
|
||||
if resp.Items[0].Category != "CPU" {
|
||||
t.Fatalf("expected category CPU, got %q", resp.Items[0].Category)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPricelistList_ActiveOnlyExcludesPricelistsWithoutItems(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
local, err := localdb.New(filepath.Join(t.TempDir(), "local_active_only.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("init local db: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
||||
ServerID: 10,
|
||||
Source: "estimate",
|
||||
Version: "E-1",
|
||||
Name: "with-items",
|
||||
CreatedAt: time.Now().Add(-time.Minute),
|
||||
SyncedAt: time.Now().Add(-time.Minute),
|
||||
}); err != nil {
|
||||
t.Fatalf("save with-items pricelist: %v", err)
|
||||
}
|
||||
withItems, err := local.GetLocalPricelistByServerID(10)
|
||||
if err != nil {
|
||||
t.Fatalf("load with-items pricelist: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]localdb.LocalPricelistItem{
|
||||
{
|
||||
PricelistID: withItems.ID,
|
||||
LotName: "CPU_X",
|
||||
LotCategory: "CPU",
|
||||
Price: 100,
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("save with-items pricelist items: %v", err)
|
||||
}
|
||||
|
||||
if err := local.SaveLocalPricelist(&localdb.LocalPricelist{
|
||||
ServerID: 11,
|
||||
Source: "estimate",
|
||||
Version: "E-2",
|
||||
Name: "without-items",
|
||||
CreatedAt: time.Now(),
|
||||
SyncedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("save without-items pricelist: %v", err)
|
||||
}
|
||||
|
||||
h := NewPricelistHandler(local)
|
||||
|
||||
req, _ := http.NewRequest("GET", "/api/pricelists?source=estimate&active_only=true", nil)
|
||||
w := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
c.Request = req
|
||||
|
||||
h.List(c)
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status 200, got %d: %s", w.Code, w.Body.String())
|
||||
}
|
||||
|
||||
var resp struct {
|
||||
Pricelists []struct {
|
||||
ID uint `json:"id"`
|
||||
} `json:"pricelists"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
if err := json.Unmarshal(w.Body.Bytes(), &resp); err != nil {
|
||||
t.Fatalf("unmarshal response: %v", err)
|
||||
}
|
||||
if resp.Total != 1 {
|
||||
t.Fatalf("expected total=1, got %d", resp.Total)
|
||||
}
|
||||
if len(resp.Pricelists) != 1 {
|
||||
t.Fatalf("expected 1 pricelist, got %d", len(resp.Pricelists))
|
||||
}
|
||||
if resp.Pricelists[0].ID != 10 {
|
||||
t.Fatalf("expected pricelist id=10, got %d", resp.Pricelists[0].ID)
|
||||
}
|
||||
}
|
||||
938
internal/handlers/pricing.go
Normal file
938
internal/handlers/pricing.go
Normal file
@@ -0,0 +1,938 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services/alerts"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services/pricing"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// calculateMedian returns the median of a sorted slice of prices
|
||||
func calculateMedian(prices []float64) float64 {
|
||||
if len(prices) == 0 {
|
||||
return 0
|
||||
}
|
||||
sort.Float64s(prices)
|
||||
n := len(prices)
|
||||
if n%2 == 0 {
|
||||
return (prices[n/2-1] + prices[n/2]) / 2
|
||||
}
|
||||
return prices[n/2]
|
||||
}
|
||||
|
||||
// calculateAverage returns the arithmetic mean of prices
|
||||
func calculateAverage(prices []float64) float64 {
|
||||
if len(prices) == 0 {
|
||||
return 0
|
||||
}
|
||||
var sum float64
|
||||
for _, p := range prices {
|
||||
sum += p
|
||||
}
|
||||
return sum / float64(len(prices))
|
||||
}
|
||||
|
||||
type PricingHandler struct {
|
||||
db *gorm.DB
|
||||
pricingService *pricing.Service
|
||||
alertService *alerts.Service
|
||||
componentRepo *repository.ComponentRepository
|
||||
priceRepo *repository.PriceRepository
|
||||
statsRepo *repository.StatsRepository
|
||||
}
|
||||
|
||||
func NewPricingHandler(
|
||||
db *gorm.DB,
|
||||
pricingService *pricing.Service,
|
||||
alertService *alerts.Service,
|
||||
componentRepo *repository.ComponentRepository,
|
||||
priceRepo *repository.PriceRepository,
|
||||
statsRepo *repository.StatsRepository,
|
||||
) *PricingHandler {
|
||||
return &PricingHandler{
|
||||
db: db,
|
||||
pricingService: pricingService,
|
||||
alertService: alertService,
|
||||
componentRepo: componentRepo,
|
||||
priceRepo: priceRepo,
|
||||
statsRepo: statsRepo,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *PricingHandler) GetStats(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.statsRepo == nil || h.alertService == nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"new_alerts_count": 0,
|
||||
"top_components": []interface{}{},
|
||||
"trending_components": []interface{}{},
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
newAlerts, _ := h.alertService.GetNewAlertsCount()
|
||||
topComponents, _ := h.statsRepo.GetTopComponents(10)
|
||||
trendingComponents, _ := h.statsRepo.GetTrendingComponents(10)
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"new_alerts_count": newAlerts,
|
||||
"top_components": topComponents,
|
||||
"trending_components": trendingComponents,
|
||||
})
|
||||
}
|
||||
|
||||
type ComponentWithCount struct {
|
||||
models.LotMetadata
|
||||
QuoteCount int64 `json:"quote_count"`
|
||||
UsedInMeta []string `json:"used_in_meta,omitempty"` // List of meta-articles that use this component
|
||||
}
|
||||
|
||||
func (h *PricingHandler) ListComponents(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.componentRepo == nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"components": []ComponentWithCount{},
|
||||
"total": 0,
|
||||
"page": 1,
|
||||
"per_page": 20,
|
||||
"offline": true,
|
||||
"message": "Управление ценами доступно только в онлайн режиме",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||
|
||||
filter := repository.ComponentFilter{
|
||||
Category: c.Query("category"),
|
||||
Search: c.Query("search"),
|
||||
SortField: c.Query("sort"),
|
||||
SortDir: c.Query("dir"),
|
||||
}
|
||||
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 || perPage > 100 {
|
||||
perPage = 20
|
||||
}
|
||||
offset := (page - 1) * perPage
|
||||
|
||||
components, total, err := h.componentRepo.List(filter, offset, perPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Get quote counts
|
||||
lotNames := make([]string, len(components))
|
||||
for i, comp := range components {
|
||||
lotNames[i] = comp.LotName
|
||||
}
|
||||
|
||||
counts, _ := h.priceRepo.GetQuoteCounts(lotNames)
|
||||
|
||||
// Get meta usage information
|
||||
metaUsage := h.getMetaUsageMap(lotNames)
|
||||
|
||||
// Combine components with counts
|
||||
result := make([]ComponentWithCount, len(components))
|
||||
for i, comp := range components {
|
||||
result[i] = ComponentWithCount{
|
||||
LotMetadata: comp,
|
||||
QuoteCount: counts[comp.LotName],
|
||||
UsedInMeta: metaUsage[comp.LotName],
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"components": result,
|
||||
"total": total,
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
})
|
||||
}
|
||||
|
||||
// getMetaUsageMap returns a map of lot_name -> list of meta-articles that use this component
|
||||
func (h *PricingHandler) getMetaUsageMap(lotNames []string) map[string][]string {
|
||||
result := make(map[string][]string)
|
||||
|
||||
// Get all components with meta_prices
|
||||
var metaComponents []models.LotMetadata
|
||||
h.db.Where("meta_prices IS NOT NULL AND meta_prices != ''").Find(&metaComponents)
|
||||
|
||||
// Build reverse lookup: which components are used in which meta-articles
|
||||
for _, meta := range metaComponents {
|
||||
sources := strings.Split(meta.MetaPrices, ",")
|
||||
for _, source := range sources {
|
||||
source = strings.TrimSpace(source)
|
||||
if source == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle wildcard patterns
|
||||
if strings.HasSuffix(source, "*") {
|
||||
prefix := strings.TrimSuffix(source, "*")
|
||||
for _, lotName := range lotNames {
|
||||
if strings.HasPrefix(lotName, prefix) && lotName != meta.LotName {
|
||||
result[lotName] = append(result[lotName], meta.LotName)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Direct match
|
||||
for _, lotName := range lotNames {
|
||||
if lotName == source && lotName != meta.LotName {
|
||||
result[lotName] = append(result[lotName], meta.LotName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// expandMetaPrices expands meta_prices string to list of actual lot names
|
||||
func (h *PricingHandler) expandMetaPrices(metaPrices, excludeLot string) []string {
|
||||
sources := strings.Split(metaPrices, ",")
|
||||
var result []string
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, source := range sources {
|
||||
source = strings.TrimSpace(source)
|
||||
if source == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(source, "*") {
|
||||
// Wildcard pattern - find matching lots
|
||||
prefix := strings.TrimSuffix(source, "*")
|
||||
var matchingLots []string
|
||||
h.db.Model(&models.LotMetadata{}).
|
||||
Where("lot_name LIKE ? AND lot_name != ?", prefix+"%", excludeLot).
|
||||
Pluck("lot_name", &matchingLots)
|
||||
for _, lot := range matchingLots {
|
||||
if !seen[lot] {
|
||||
result = append(result, lot)
|
||||
seen[lot] = true
|
||||
}
|
||||
}
|
||||
} else if source != excludeLot && !seen[source] {
|
||||
result = append(result, source)
|
||||
seen[source] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (h *PricingHandler) GetComponentPricing(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.componentRepo == nil || h.pricingService == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Управление ценами доступно только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
lotName := c.Param("lot_name")
|
||||
|
||||
component, err := h.componentRepo.GetByLotName(lotName)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.pricingService.GetPriceStats(lotName, 0)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"component": component,
|
||||
"price_stats": stats,
|
||||
})
|
||||
}
|
||||
|
||||
type UpdatePriceRequest struct {
|
||||
LotName string `json:"lot_name" binding:"required"`
|
||||
Method models.PriceMethod `json:"method"`
|
||||
PeriodDays int `json:"period_days"`
|
||||
Coefficient float64 `json:"coefficient"`
|
||||
ManualPrice *float64 `json:"manual_price"`
|
||||
ClearManual bool `json:"clear_manual"`
|
||||
MetaEnabled bool `json:"meta_enabled"`
|
||||
MetaPrices string `json:"meta_prices"`
|
||||
MetaMethod string `json:"meta_method"`
|
||||
MetaPeriod int `json:"meta_period"`
|
||||
IsHidden bool `json:"is_hidden"`
|
||||
}
|
||||
|
||||
func (h *PricingHandler) UpdatePrice(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Обновление цен доступно только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var req UpdatePriceRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
updates := map[string]interface{}{}
|
||||
|
||||
// Update method if specified
|
||||
if req.Method != "" {
|
||||
updates["price_method"] = req.Method
|
||||
}
|
||||
|
||||
// Update period days
|
||||
if req.PeriodDays >= 0 {
|
||||
updates["price_period_days"] = req.PeriodDays
|
||||
}
|
||||
|
||||
// Update coefficient
|
||||
updates["price_coefficient"] = req.Coefficient
|
||||
|
||||
// Handle meta prices
|
||||
if req.MetaEnabled && req.MetaPrices != "" {
|
||||
updates["meta_prices"] = req.MetaPrices
|
||||
} else {
|
||||
updates["meta_prices"] = ""
|
||||
}
|
||||
|
||||
// Handle hidden flag
|
||||
updates["is_hidden"] = req.IsHidden
|
||||
|
||||
// Handle manual price
|
||||
if req.ClearManual {
|
||||
updates["manual_price"] = nil
|
||||
} else if req.ManualPrice != nil {
|
||||
updates["manual_price"] = *req.ManualPrice
|
||||
// Also update current price immediately when setting manual
|
||||
updates["current_price"] = *req.ManualPrice
|
||||
updates["price_updated_at"] = time.Now()
|
||||
}
|
||||
|
||||
err := h.db.Model(&models.LotMetadata{}).
|
||||
Where("lot_name = ?", req.LotName).
|
||||
Updates(updates).Error
|
||||
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Recalculate price if not using manual price
|
||||
if req.ManualPrice == nil {
|
||||
h.recalculateSinglePrice(req.LotName)
|
||||
}
|
||||
|
||||
// Get updated component to return new price
|
||||
var comp models.LotMetadata
|
||||
h.db.Where("lot_name = ?", req.LotName).First(&comp)
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "price updated",
|
||||
"current_price": comp.CurrentPrice,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *PricingHandler) recalculateSinglePrice(lotName string) {
|
||||
var comp models.LotMetadata
|
||||
if err := h.db.Where("lot_name = ?", lotName).First(&comp).Error; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if manual price is set
|
||||
if comp.ManualPrice != nil && *comp.ManualPrice > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
periodDays := comp.PricePeriodDays
|
||||
method := comp.PriceMethod
|
||||
if method == "" {
|
||||
method = models.PriceMethodMedian
|
||||
}
|
||||
|
||||
// Determine which lot names to use for price calculation
|
||||
lotNames := []string{lotName}
|
||||
if comp.MetaPrices != "" {
|
||||
lotNames = h.expandMetaPrices(comp.MetaPrices, lotName)
|
||||
}
|
||||
|
||||
// Get prices based on period from all relevant lots
|
||||
var prices []float64
|
||||
for _, ln := range lotNames {
|
||||
var lotPrices []float64
|
||||
if strings.HasSuffix(ln, "*") {
|
||||
pattern := strings.TrimSuffix(ln, "*") + "%"
|
||||
if periodDays > 0 {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||
pattern, periodDays).Pluck("price", &lotPrices)
|
||||
} else {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? ORDER BY price`, pattern).Pluck("price", &lotPrices)
|
||||
}
|
||||
} else {
|
||||
if periodDays > 0 {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||
ln, periodDays).Pluck("price", &lotPrices)
|
||||
} else {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? ORDER BY price`, ln).Pluck("price", &lotPrices)
|
||||
}
|
||||
}
|
||||
prices = append(prices, lotPrices...)
|
||||
}
|
||||
|
||||
// If no prices in period, try all time
|
||||
if len(prices) == 0 && periodDays > 0 {
|
||||
for _, ln := range lotNames {
|
||||
var lotPrices []float64
|
||||
if strings.HasSuffix(ln, "*") {
|
||||
pattern := strings.TrimSuffix(ln, "*") + "%"
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? ORDER BY price`, pattern).Pluck("price", &lotPrices)
|
||||
} else {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? ORDER BY price`, ln).Pluck("price", &lotPrices)
|
||||
}
|
||||
prices = append(prices, lotPrices...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(prices) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate price based on method
|
||||
sortFloat64s(prices)
|
||||
var finalPrice float64
|
||||
switch method {
|
||||
case models.PriceMethodMedian:
|
||||
finalPrice = calculateMedian(prices)
|
||||
case models.PriceMethodAverage:
|
||||
finalPrice = calculateAverage(prices)
|
||||
default:
|
||||
finalPrice = calculateMedian(prices)
|
||||
}
|
||||
|
||||
if finalPrice <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Apply coefficient
|
||||
if comp.PriceCoefficient != 0 {
|
||||
finalPrice = finalPrice * (1 + comp.PriceCoefficient/100)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
// Only update price, preserve all user settings
|
||||
h.db.Model(&models.LotMetadata{}).
|
||||
Where("lot_name = ?", lotName).
|
||||
Updates(map[string]interface{}{
|
||||
"current_price": finalPrice,
|
||||
"price_updated_at": now,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *PricingHandler) RecalculateAll(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Пересчёт цен доступен только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Set headers for SSE
|
||||
c.Header("Content-Type", "text/event-stream")
|
||||
c.Header("Cache-Control", "no-cache")
|
||||
c.Header("Connection", "keep-alive")
|
||||
|
||||
// Get all components with their settings
|
||||
var components []models.LotMetadata
|
||||
h.db.Find(&components)
|
||||
total := int64(len(components))
|
||||
|
||||
// Pre-load all lot names for efficient wildcard matching
|
||||
var allLotNames []string
|
||||
h.db.Model(&models.LotMetadata{}).Pluck("lot_name", &allLotNames)
|
||||
lotNameSet := make(map[string]bool, len(allLotNames))
|
||||
for _, ln := range allLotNames {
|
||||
lotNameSet[ln] = true
|
||||
}
|
||||
|
||||
// Pre-load latest quote dates for all lots (for checking updates)
|
||||
type LotDate struct {
|
||||
Lot string
|
||||
Date time.Time
|
||||
}
|
||||
var latestDates []LotDate
|
||||
h.db.Raw(`SELECT lot, MAX(date) as date FROM lot_log GROUP BY lot`).Scan(&latestDates)
|
||||
lotLatestDate := make(map[string]time.Time, len(latestDates))
|
||||
for _, ld := range latestDates {
|
||||
lotLatestDate[ld.Lot] = ld.Date
|
||||
}
|
||||
|
||||
// Send initial progress
|
||||
c.SSEvent("progress", gin.H{"current": 0, "total": total, "status": "starting"})
|
||||
c.Writer.Flush()
|
||||
|
||||
// Process components individually to respect their settings
|
||||
var updated, skipped, manual, unchanged, errors int
|
||||
now := time.Now()
|
||||
progressCounter := 0
|
||||
|
||||
for _, comp := range components {
|
||||
progressCounter++
|
||||
|
||||
// If manual price is set, skip recalculation
|
||||
if comp.ManualPrice != nil && *comp.ManualPrice > 0 {
|
||||
manual++
|
||||
goto sendProgress
|
||||
}
|
||||
|
||||
// Calculate price based on component's individual settings
|
||||
{
|
||||
periodDays := comp.PricePeriodDays
|
||||
method := comp.PriceMethod
|
||||
if method == "" {
|
||||
method = models.PriceMethodMedian
|
||||
}
|
||||
|
||||
// Determine source lots for price calculation (using cached lot names)
|
||||
var sourceLots []string
|
||||
if comp.MetaPrices != "" {
|
||||
sourceLots = expandMetaPricesWithCache(comp.MetaPrices, comp.LotName, allLotNames)
|
||||
} else {
|
||||
sourceLots = []string{comp.LotName}
|
||||
}
|
||||
|
||||
if len(sourceLots) == 0 {
|
||||
skipped++
|
||||
goto sendProgress
|
||||
}
|
||||
|
||||
// Check if there are new quotes since last update (using cached dates)
|
||||
if comp.PriceUpdatedAt != nil {
|
||||
hasNewData := false
|
||||
for _, lot := range sourceLots {
|
||||
if latestDate, ok := lotLatestDate[lot]; ok {
|
||||
if latestDate.After(*comp.PriceUpdatedAt) {
|
||||
hasNewData = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasNewData {
|
||||
unchanged++
|
||||
goto sendProgress
|
||||
}
|
||||
}
|
||||
|
||||
// Get prices from source lots
|
||||
var prices []float64
|
||||
if periodDays > 0 {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot IN ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||
sourceLots, periodDays).Pluck("price", &prices)
|
||||
} else {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot IN ? ORDER BY price`,
|
||||
sourceLots).Pluck("price", &prices)
|
||||
}
|
||||
|
||||
// If no prices in period, try all time
|
||||
if len(prices) == 0 && periodDays > 0 {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot IN ? ORDER BY price`, sourceLots).Pluck("price", &prices)
|
||||
}
|
||||
|
||||
if len(prices) == 0 {
|
||||
skipped++
|
||||
goto sendProgress
|
||||
}
|
||||
|
||||
// Calculate price based on method
|
||||
var basePrice float64
|
||||
switch method {
|
||||
case models.PriceMethodMedian:
|
||||
basePrice = calculateMedian(prices)
|
||||
case models.PriceMethodAverage:
|
||||
basePrice = calculateAverage(prices)
|
||||
default:
|
||||
basePrice = calculateMedian(prices)
|
||||
}
|
||||
|
||||
if basePrice <= 0 {
|
||||
skipped++
|
||||
goto sendProgress
|
||||
}
|
||||
|
||||
finalPrice := basePrice
|
||||
|
||||
// Apply coefficient
|
||||
if comp.PriceCoefficient != 0 {
|
||||
finalPrice = finalPrice * (1 + comp.PriceCoefficient/100)
|
||||
}
|
||||
|
||||
// Update only price fields
|
||||
err := h.db.Model(&models.LotMetadata{}).
|
||||
Where("lot_name = ?", comp.LotName).
|
||||
Updates(map[string]interface{}{
|
||||
"current_price": finalPrice,
|
||||
"price_updated_at": now,
|
||||
}).Error
|
||||
if err != nil {
|
||||
errors++
|
||||
} else {
|
||||
updated++
|
||||
}
|
||||
}
|
||||
|
||||
sendProgress:
|
||||
// Send progress update every 10 components to reduce overhead
|
||||
if progressCounter%10 == 0 || progressCounter == int(total) {
|
||||
c.SSEvent("progress", gin.H{
|
||||
"current": updated + skipped + manual + unchanged + errors,
|
||||
"total": total,
|
||||
"updated": updated,
|
||||
"skipped": skipped,
|
||||
"manual": manual,
|
||||
"unchanged": unchanged,
|
||||
"errors": errors,
|
||||
"status": "processing",
|
||||
"lot_name": comp.LotName,
|
||||
})
|
||||
c.Writer.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Update popularity scores
|
||||
h.statsRepo.UpdatePopularityScores()
|
||||
|
||||
// Send completion
|
||||
c.SSEvent("progress", gin.H{
|
||||
"current": updated + skipped + manual + unchanged + errors,
|
||||
"total": total,
|
||||
"updated": updated,
|
||||
"skipped": skipped,
|
||||
"manual": manual,
|
||||
"unchanged": unchanged,
|
||||
"errors": errors,
|
||||
"status": "completed",
|
||||
})
|
||||
c.Writer.Flush()
|
||||
}
|
||||
|
||||
func (h *PricingHandler) ListAlerts(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"alerts": []interface{}{},
|
||||
"total": 0,
|
||||
"page": 1,
|
||||
"per_page": 20,
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
perPage, _ := strconv.Atoi(c.DefaultQuery("per_page", "20"))
|
||||
|
||||
filter := repository.AlertFilter{
|
||||
Status: models.AlertStatus(c.Query("status")),
|
||||
Severity: models.AlertSeverity(c.Query("severity")),
|
||||
Type: models.AlertType(c.Query("type")),
|
||||
LotName: c.Query("lot_name"),
|
||||
}
|
||||
|
||||
alertsList, total, err := h.alertService.List(filter, page, perPage)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"alerts": alertsList,
|
||||
"total": total,
|
||||
"page": page,
|
||||
"per_page": perPage,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *PricingHandler) AcknowledgeAlert(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Управление алертами доступно только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(c.Param("id"), 10, 32)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid alert id"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.alertService.Acknowledge(uint(id)); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "acknowledged"})
|
||||
}
|
||||
|
||||
func (h *PricingHandler) ResolveAlert(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Управление алертами доступно только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(c.Param("id"), 10, 32)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid alert id"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.alertService.Resolve(uint(id)); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "resolved"})
|
||||
}
|
||||
|
||||
func (h *PricingHandler) IgnoreAlert(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Управление алертами доступно только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(c.Param("id"), 10, 32)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid alert id"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.alertService.Ignore(uint(id)); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "ignored"})
|
||||
}
|
||||
|
||||
type PreviewPriceRequest struct {
|
||||
LotName string `json:"lot_name" binding:"required"`
|
||||
Method string `json:"method"`
|
||||
PeriodDays int `json:"period_days"`
|
||||
Coefficient float64 `json:"coefficient"`
|
||||
MetaEnabled bool `json:"meta_enabled"`
|
||||
MetaPrices string `json:"meta_prices"`
|
||||
}
|
||||
|
||||
func (h *PricingHandler) PreviewPrice(c *gin.Context) {
|
||||
// Check if we're in offline mode
|
||||
if h.db == nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "Предпросмотр цены доступен только в онлайн режиме",
|
||||
"offline": true,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var req PreviewPriceRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Get component
|
||||
var comp models.LotMetadata
|
||||
if err := h.db.Where("lot_name = ?", req.LotName).First(&comp).Error; err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "component not found"})
|
||||
return
|
||||
}
|
||||
|
||||
// Determine which lot names to use for price calculation
|
||||
lotNames := []string{req.LotName}
|
||||
if req.MetaEnabled && req.MetaPrices != "" {
|
||||
lotNames = h.expandMetaPrices(req.MetaPrices, req.LotName)
|
||||
}
|
||||
|
||||
// Get all prices for calculations (from all relevant lots)
|
||||
var allPrices []float64
|
||||
for _, lotName := range lotNames {
|
||||
var lotPrices []float64
|
||||
if strings.HasSuffix(lotName, "*") {
|
||||
// Wildcard pattern
|
||||
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? ORDER BY price`, pattern).Pluck("price", &lotPrices)
|
||||
} else {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? ORDER BY price`, lotName).Pluck("price", &lotPrices)
|
||||
}
|
||||
allPrices = append(allPrices, lotPrices...)
|
||||
}
|
||||
|
||||
// Calculate median for all time
|
||||
var medianAllTime *float64
|
||||
if len(allPrices) > 0 {
|
||||
sortFloat64s(allPrices)
|
||||
median := calculateMedian(allPrices)
|
||||
medianAllTime = &median
|
||||
}
|
||||
|
||||
// Get quote count (from all relevant lots) - total count
|
||||
var quoteCountTotal int64
|
||||
for _, lotName := range lotNames {
|
||||
var count int64
|
||||
if strings.HasSuffix(lotName, "*") {
|
||||
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||
h.db.Model(&models.LotLog{}).Where("lot LIKE ?", pattern).Count(&count)
|
||||
} else {
|
||||
h.db.Model(&models.LotLog{}).Where("lot = ?", lotName).Count(&count)
|
||||
}
|
||||
quoteCountTotal += count
|
||||
}
|
||||
|
||||
// Get quote count for specified period (if period is > 0)
|
||||
var quoteCountPeriod int64
|
||||
if req.PeriodDays > 0 {
|
||||
for _, lotName := range lotNames {
|
||||
var count int64
|
||||
if strings.HasSuffix(lotName, "*") {
|
||||
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||
h.db.Raw(`SELECT COUNT(*) FROM lot_log WHERE lot LIKE ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY)`, pattern, req.PeriodDays).Scan(&count)
|
||||
} else {
|
||||
h.db.Raw(`SELECT COUNT(*) FROM lot_log WHERE lot = ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY)`, lotName, req.PeriodDays).Scan(&count)
|
||||
}
|
||||
quoteCountPeriod += count
|
||||
}
|
||||
} else {
|
||||
// If no period specified, period count equals total count
|
||||
quoteCountPeriod = quoteCountTotal
|
||||
}
|
||||
|
||||
// Get last received price (from the main lot only)
|
||||
var lastPrice struct {
|
||||
Price *float64
|
||||
Date *time.Time
|
||||
}
|
||||
h.db.Raw(`SELECT price, date FROM lot_log WHERE lot = ? ORDER BY date DESC, lot_log_id DESC LIMIT 1`, req.LotName).Scan(&lastPrice)
|
||||
|
||||
// Calculate new price based on parameters (method, period, coefficient)
|
||||
method := req.Method
|
||||
if method == "" {
|
||||
method = "median"
|
||||
}
|
||||
|
||||
var prices []float64
|
||||
if req.PeriodDays > 0 {
|
||||
for _, lotName := range lotNames {
|
||||
var lotPrices []float64
|
||||
if strings.HasSuffix(lotName, "*") {
|
||||
pattern := strings.TrimSuffix(lotName, "*") + "%"
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot LIKE ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||
pattern, req.PeriodDays).Pluck("price", &lotPrices)
|
||||
} else {
|
||||
h.db.Raw(`SELECT price FROM lot_log WHERE lot = ? AND date >= DATE_SUB(NOW(), INTERVAL ? DAY) ORDER BY price`,
|
||||
lotName, req.PeriodDays).Pluck("price", &lotPrices)
|
||||
}
|
||||
prices = append(prices, lotPrices...)
|
||||
}
|
||||
// Fall back to all time if no prices in period
|
||||
if len(prices) == 0 {
|
||||
prices = allPrices
|
||||
}
|
||||
} else {
|
||||
prices = allPrices
|
||||
}
|
||||
|
||||
var newPrice *float64
|
||||
if len(prices) > 0 {
|
||||
sortFloat64s(prices)
|
||||
var basePrice float64
|
||||
if method == "average" {
|
||||
basePrice = calculateAverage(prices)
|
||||
} else {
|
||||
basePrice = calculateMedian(prices)
|
||||
}
|
||||
|
||||
if req.Coefficient != 0 {
|
||||
basePrice = basePrice * (1 + req.Coefficient/100)
|
||||
}
|
||||
newPrice = &basePrice
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"lot_name": req.LotName,
|
||||
"current_price": comp.CurrentPrice,
|
||||
"median_all_time": medianAllTime,
|
||||
"new_price": newPrice,
|
||||
"quote_count_total": quoteCountTotal,
|
||||
"quote_count_period": quoteCountPeriod,
|
||||
"manual_price": comp.ManualPrice,
|
||||
"last_price": lastPrice.Price,
|
||||
"last_price_date": lastPrice.Date,
|
||||
})
|
||||
}
|
||||
|
||||
// sortFloat64s sorts a slice of float64 in ascending order
|
||||
func sortFloat64s(data []float64) {
|
||||
sort.Float64s(data)
|
||||
}
|
||||
|
||||
// expandMetaPricesWithCache expands meta_prices using pre-loaded lot names (no DB queries)
|
||||
func expandMetaPricesWithCache(metaPrices, excludeLot string, allLotNames []string) []string {
|
||||
sources := strings.Split(metaPrices, ",")
|
||||
var result []string
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, source := range sources {
|
||||
source = strings.TrimSpace(source)
|
||||
if source == "" || source == excludeLot {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(source, "*") {
|
||||
// Wildcard pattern - find matching lots from cache
|
||||
prefix := strings.TrimSuffix(source, "*")
|
||||
for _, lot := range allLotNames {
|
||||
if strings.HasPrefix(lot, prefix) && lot != excludeLot && !seen[lot] {
|
||||
result = append(result, lot)
|
||||
seen[lot] = true
|
||||
}
|
||||
}
|
||||
} else if !seen[source] {
|
||||
result = append(result, source)
|
||||
seen[source] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -3,8 +3,8 @@ package handlers
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
)
|
||||
|
||||
type QuoteHandler struct {
|
||||
@@ -18,13 +18,13 @@ func NewQuoteHandler(quoteService *services.QuoteService) *QuoteHandler {
|
||||
func (h *QuoteHandler) Validate(c *gin.Context) {
|
||||
var req services.QuoteRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.quoteService.ValidateAndCalculate(&req)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,13 +34,13 @@ func (h *QuoteHandler) Validate(c *gin.Context) {
|
||||
func (h *QuoteHandler) Calculate(c *gin.Context) {
|
||||
var req services.QuoteRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.quoteService.ValidateAndCalculate(&req)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -49,19 +49,3 @@ func (h *QuoteHandler) Calculate(c *gin.Context) {
|
||||
"total": result.Total,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *QuoteHandler) PriceLevels(c *gin.Context) {
|
||||
var req services.PriceLevelsRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.quoteService.CalculatePriceLevels(&req)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, result)
|
||||
}
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RespondError(c *gin.Context, status int, fallback string, err error) {
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
}
|
||||
c.JSON(status, gin.H{"error": clientFacingErrorMessage(status, fallback, err)})
|
||||
}
|
||||
|
||||
func clientFacingErrorMessage(status int, fallback string, err error) string {
|
||||
if err == nil {
|
||||
return fallback
|
||||
}
|
||||
if status >= 500 {
|
||||
return fallback
|
||||
}
|
||||
if isRequestDecodeError(err) {
|
||||
return fallback
|
||||
}
|
||||
|
||||
message := strings.TrimSpace(err.Error())
|
||||
if message == "" {
|
||||
return fallback
|
||||
}
|
||||
if looksTechnicalError(message) {
|
||||
return fallback
|
||||
}
|
||||
return message
|
||||
}
|
||||
|
||||
func isRequestDecodeError(err error) bool {
|
||||
var syntaxErr *json.SyntaxError
|
||||
if errors.As(err, &syntaxErr) {
|
||||
return true
|
||||
}
|
||||
|
||||
var unmarshalTypeErr *json.UnmarshalTypeError
|
||||
if errors.As(err, &unmarshalTypeErr) {
|
||||
return true
|
||||
}
|
||||
|
||||
return errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF)
|
||||
}
|
||||
|
||||
func looksTechnicalError(message string) bool {
|
||||
lower := strings.ToLower(strings.TrimSpace(message))
|
||||
needles := []string{
|
||||
"sql",
|
||||
"gorm",
|
||||
"driver",
|
||||
"constraint",
|
||||
"syntax error",
|
||||
"unexpected eof",
|
||||
"record not found",
|
||||
"no such table",
|
||||
"stack trace",
|
||||
}
|
||||
for _, needle := range needles {
|
||||
if strings.Contains(lower, needle) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClientFacingErrorMessageKeepsDomain4xx(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := clientFacingErrorMessage(400, "invalid request", &json.SyntaxError{Offset: 1})
|
||||
if got != "invalid request" {
|
||||
t.Fatalf("expected fallback for decode error, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientFacingErrorMessagePreservesBusinessMessage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
err := errString("main project variant cannot be deleted")
|
||||
got := clientFacingErrorMessage(400, "invalid request", err)
|
||||
if got != err.Error() {
|
||||
t.Fatalf("expected business message, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientFacingErrorMessageHidesTechnical4xx(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
err := errString("sql: no rows in result set")
|
||||
got := clientFacingErrorMessage(404, "resource not found", err)
|
||||
if got != "resource not found" {
|
||||
t.Fatalf("expected fallback for technical error, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
type errString string
|
||||
|
||||
func (e errString) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
"git.mchus.pro/mchus/quoteforge/internal/db"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"github.com/gin-gonic/gin"
|
||||
mysqlDriver "github.com/go-sql-driver/mysql"
|
||||
gormmysql "gorm.io/driver/mysql"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
@@ -27,9 +26,7 @@ type SetupHandler struct {
|
||||
restartSig chan struct{}
|
||||
}
|
||||
|
||||
var errPermissionProbeRollback = errors.New("permission probe rollback")
|
||||
|
||||
func NewSetupHandler(localDB *localdb.LocalDB, connMgr *db.ConnectionManager, _ string, restartSig chan struct{}) (*SetupHandler, error) {
|
||||
func NewSetupHandler(localDB *localdb.LocalDB, connMgr *db.ConnectionManager, templatesPath string, restartSig chan struct{}) (*SetupHandler, error) {
|
||||
funcMap := template.FuncMap{
|
||||
"sub": func(a, b int) int { return a - b },
|
||||
"add": func(a, b int) int { return a + b },
|
||||
@@ -38,9 +35,14 @@ func NewSetupHandler(localDB *localdb.LocalDB, connMgr *db.ConnectionManager, _
|
||||
templates := make(map[string]*template.Template)
|
||||
|
||||
// Load setup template (standalone, no base needed)
|
||||
setupPath := filepath.Join(templatesPath, "setup.html")
|
||||
var tmpl *template.Template
|
||||
var err error
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(qfassets.TemplatesFS, "web/templates/setup.html")
|
||||
if stat, statErr := os.Stat(templatesPath); statErr == nil && stat.IsDir() {
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFiles(setupPath)
|
||||
} else {
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(qfassets.TemplatesFS, "web/templates/setup.html")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing setup template: %w", err)
|
||||
}
|
||||
@@ -67,8 +69,7 @@ func (h *SetupHandler) ShowSetup(c *gin.Context) {
|
||||
|
||||
tmpl := h.templates["setup.html"]
|
||||
if err := tmpl.ExecuteTemplate(c.Writer, "setup.html", data); err != nil {
|
||||
_ = c.Error(err)
|
||||
c.String(http.StatusInternalServerError, "Template error")
|
||||
c.String(http.StatusInternalServerError, "Template error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,17 +93,51 @@ func (h *SetupHandler) TestConnection(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
dsn := buildMySQLDSN(host, port, database, user, password, 5*time.Second)
|
||||
lotCount, canWrite, err := validateMariaDBConnection(dsn)
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=5s",
|
||||
user, password, host, port, database)
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"error": "Connection check failed",
|
||||
"error": fmt.Sprintf("Connection failed: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Failed to get database handle: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
defer sqlDB.Close()
|
||||
|
||||
if err := sqlDB.Ping(); err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Ping failed: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Check for required tables
|
||||
var lotCount int64
|
||||
if err := db.Table("lot").Count(&lotCount).Error; err != nil {
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": false,
|
||||
"error": fmt.Sprintf("Table 'lot' not found or inaccessible: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Check write permission
|
||||
canWrite := testWritePermission(db)
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"lot_count": lotCount,
|
||||
@@ -134,22 +169,28 @@ func (h *SetupHandler) SaveConnection(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Test connection first
|
||||
dsn := buildMySQLDSN(host, port, database, user, password, 5*time.Second)
|
||||
if _, _, err := validateMariaDBConnection(dsn); err != nil {
|
||||
_ = c.Error(err)
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=5s",
|
||||
user, password, host, port, database)
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"success": false,
|
||||
"error": "Connection check failed",
|
||||
"error": fmt.Sprintf("Connection failed: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
sqlDB, _ := db.DB()
|
||||
sqlDB.Close()
|
||||
|
||||
// Save settings
|
||||
if err := h.localDB.SaveSettings(host, port, database, user, password); err != nil {
|
||||
_ = c.Error(err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "Failed to save settings",
|
||||
"error": fmt.Sprintf("Failed to save settings: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -198,62 +239,18 @@ func (h *SetupHandler) GetStatus(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
func buildMySQLDSN(host string, port int, database, user, password string, timeout time.Duration) string {
|
||||
cfg := mysqlDriver.NewConfig()
|
||||
cfg.User = user
|
||||
cfg.Passwd = password
|
||||
cfg.Net = "tcp"
|
||||
cfg.Addr = net.JoinHostPort(host, strconv.Itoa(port))
|
||||
cfg.DBName = database
|
||||
cfg.ParseTime = true
|
||||
cfg.Loc = time.Local
|
||||
cfg.Timeout = timeout
|
||||
cfg.Params = map[string]string{
|
||||
"charset": "utf8mb4",
|
||||
}
|
||||
return cfg.FormatDSN()
|
||||
}
|
||||
func testWritePermission(db *gorm.DB) bool {
|
||||
// Simple check: try to create a temporary table and drop it
|
||||
testTable := fmt.Sprintf("qt_write_test_%d", time.Now().UnixNano())
|
||||
|
||||
func validateMariaDBConnection(dsn string) (int64, bool, error) {
|
||||
db, err := gorm.Open(gormmysql.Open(dsn), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
// Try to create a test table
|
||||
err := db.Exec(fmt.Sprintf("CREATE TABLE %s (id INT)", testTable)).Error
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("open MariaDB connection: %w", err)
|
||||
return false
|
||||
}
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("get database handle: %w", err)
|
||||
}
|
||||
defer sqlDB.Close()
|
||||
// Drop it immediately
|
||||
db.Exec(fmt.Sprintf("DROP TABLE %s", testTable))
|
||||
|
||||
if err := sqlDB.Ping(); err != nil {
|
||||
return 0, false, fmt.Errorf("ping MariaDB: %w", err)
|
||||
}
|
||||
|
||||
var lotCount int64
|
||||
if err := db.Table("lot").Count(&lotCount).Error; err != nil {
|
||||
return 0, false, fmt.Errorf("check required table lot: %w", err)
|
||||
}
|
||||
|
||||
return lotCount, testSyncWritePermission(db), nil
|
||||
}
|
||||
|
||||
func testSyncWritePermission(db *gorm.DB) bool {
|
||||
sentinel := fmt.Sprintf("quoteforge-permission-check-%d", time.Now().UnixNano())
|
||||
err := db.Transaction(func(tx *gorm.DB) error {
|
||||
if err := tx.Exec(`
|
||||
INSERT INTO qt_client_schema_state (username, hostname, last_checked_at, updated_at)
|
||||
VALUES (?, ?, NOW(), NOW())
|
||||
ON DUPLICATE KEY UPDATE
|
||||
last_checked_at = VALUES(last_checked_at),
|
||||
updated_at = VALUES(updated_at)
|
||||
`, sentinel, "setup-check").Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return errPermissionProbeRollback
|
||||
})
|
||||
|
||||
return errors.Is(err, errPermissionProbeRollback)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
stdsync "sync"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
qfassets "git.mchus.pro/mchus/quoteforge"
|
||||
@@ -19,148 +17,87 @@ import (
|
||||
|
||||
// SyncHandler handles sync API endpoints
|
||||
type SyncHandler struct {
|
||||
localDB *localdb.LocalDB
|
||||
syncService *sync.Service
|
||||
connMgr *db.ConnectionManager
|
||||
autoSyncInterval time.Duration
|
||||
onlineGraceFactor float64
|
||||
tmpl *template.Template
|
||||
readinessMu stdsync.Mutex
|
||||
readinessCached *sync.SyncReadiness
|
||||
readinessCachedAt time.Time
|
||||
localDB *localdb.LocalDB
|
||||
syncService *sync.Service
|
||||
connMgr *db.ConnectionManager
|
||||
tmpl *template.Template
|
||||
}
|
||||
|
||||
// NewSyncHandler creates a new sync handler
|
||||
func NewSyncHandler(localDB *localdb.LocalDB, syncService *sync.Service, connMgr *db.ConnectionManager, _ string, autoSyncInterval time.Duration) (*SyncHandler, error) {
|
||||
func NewSyncHandler(localDB *localdb.LocalDB, syncService *sync.Service, connMgr *db.ConnectionManager, templatesPath string) (*SyncHandler, error) {
|
||||
// Load sync_status partial template
|
||||
tmpl, err := template.ParseFS(qfassets.TemplatesFS, "web/templates/partials/sync_status.html")
|
||||
partialPath := filepath.Join(templatesPath, "partials", "sync_status.html")
|
||||
var tmpl *template.Template
|
||||
var err error
|
||||
if stat, statErr := os.Stat(templatesPath); statErr == nil && stat.IsDir() {
|
||||
tmpl, err = template.ParseFiles(partialPath)
|
||||
} else {
|
||||
tmpl, err = template.ParseFS(qfassets.TemplatesFS, "web/templates/partials/sync_status.html")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SyncHandler{
|
||||
localDB: localDB,
|
||||
syncService: syncService,
|
||||
connMgr: connMgr,
|
||||
autoSyncInterval: autoSyncInterval,
|
||||
onlineGraceFactor: 1.10,
|
||||
tmpl: tmpl,
|
||||
localDB: localDB,
|
||||
syncService: syncService,
|
||||
connMgr: connMgr,
|
||||
tmpl: tmpl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SyncStatusResponse represents the sync status
|
||||
type SyncStatusResponse struct {
|
||||
LastComponentSync *time.Time `json:"last_component_sync"`
|
||||
LastPricelistSync *time.Time `json:"last_pricelist_sync"`
|
||||
LastPricelistAttemptAt *time.Time `json:"last_pricelist_attempt_at,omitempty"`
|
||||
LastPricelistSyncStatus string `json:"last_pricelist_sync_status,omitempty"`
|
||||
LastPricelistSyncError string `json:"last_pricelist_sync_error,omitempty"`
|
||||
HasIncompleteServerSync bool `json:"has_incomplete_server_sync"`
|
||||
KnownServerChangesMiss bool `json:"known_server_changes_missing"`
|
||||
IsOnline bool `json:"is_online"`
|
||||
ComponentsCount int64 `json:"components_count"`
|
||||
PricelistsCount int64 `json:"pricelists_count"`
|
||||
ServerPricelists int `json:"server_pricelists"`
|
||||
NeedComponentSync bool `json:"need_component_sync"`
|
||||
NeedPricelistSync bool `json:"need_pricelist_sync"`
|
||||
Readiness *sync.SyncReadiness `json:"readiness,omitempty"`
|
||||
}
|
||||
|
||||
type SyncReadinessResponse struct {
|
||||
Status string `json:"status"`
|
||||
Blocked bool `json:"blocked"`
|
||||
ReasonCode string `json:"reason_code,omitempty"`
|
||||
ReasonText string `json:"reason_text,omitempty"`
|
||||
RequiredMinAppVersion *string `json:"required_min_app_version,omitempty"`
|
||||
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
||||
LastComponentSync *time.Time `json:"last_component_sync"`
|
||||
LastPricelistSync *time.Time `json:"last_pricelist_sync"`
|
||||
IsOnline bool `json:"is_online"`
|
||||
ComponentsCount int64 `json:"components_count"`
|
||||
PricelistsCount int64 `json:"pricelists_count"`
|
||||
ServerPricelists int `json:"server_pricelists"`
|
||||
NeedComponentSync bool `json:"need_component_sync"`
|
||||
NeedPricelistSync bool `json:"need_pricelist_sync"`
|
||||
}
|
||||
|
||||
// GetStatus returns current sync status
|
||||
// GET /api/sync/status
|
||||
func (h *SyncHandler) GetStatus(c *gin.Context) {
|
||||
connStatus := h.connMgr.GetStatus()
|
||||
isOnline := connStatus.IsConnected && strings.TrimSpace(connStatus.LastError) == ""
|
||||
// Check online status by pinging MariaDB
|
||||
isOnline := h.checkOnline()
|
||||
|
||||
// Get sync times
|
||||
lastComponentSync := h.localDB.GetComponentSyncTime()
|
||||
lastPricelistSync := h.localDB.GetLastSyncTime()
|
||||
|
||||
// Get counts
|
||||
componentsCount := h.localDB.CountLocalComponents()
|
||||
pricelistsCount := h.localDB.CountLocalPricelists()
|
||||
lastPricelistAttemptAt := h.localDB.GetLastPricelistSyncAttemptAt()
|
||||
lastPricelistSyncStatus := h.localDB.GetLastPricelistSyncStatus()
|
||||
lastPricelistSyncError := h.localDB.GetLastPricelistSyncError()
|
||||
hasFailedSync := strings.EqualFold(lastPricelistSyncStatus, "failed")
|
||||
|
||||
// Get server pricelist count if online
|
||||
serverPricelists := 0
|
||||
needPricelistSync := false
|
||||
if isOnline {
|
||||
status, err := h.syncService.GetStatus()
|
||||
if err == nil {
|
||||
serverPricelists = status.ServerPricelists
|
||||
needPricelistSync = status.NeedsSync
|
||||
}
|
||||
}
|
||||
|
||||
// Check if component sync is needed (older than 24 hours)
|
||||
needComponentSync := h.localDB.NeedComponentSync(24)
|
||||
readiness := h.getReadinessLocal()
|
||||
|
||||
c.JSON(http.StatusOK, SyncStatusResponse{
|
||||
LastComponentSync: lastComponentSync,
|
||||
LastPricelistSync: lastPricelistSync,
|
||||
LastPricelistAttemptAt: lastPricelistAttemptAt,
|
||||
LastPricelistSyncStatus: lastPricelistSyncStatus,
|
||||
LastPricelistSyncError: lastPricelistSyncError,
|
||||
HasIncompleteServerSync: hasFailedSync,
|
||||
KnownServerChangesMiss: hasFailedSync,
|
||||
IsOnline: isOnline,
|
||||
ComponentsCount: componentsCount,
|
||||
PricelistsCount: pricelistsCount,
|
||||
ServerPricelists: 0,
|
||||
NeedComponentSync: needComponentSync,
|
||||
NeedPricelistSync: lastPricelistSync == nil || hasFailedSync,
|
||||
Readiness: readiness,
|
||||
LastComponentSync: lastComponentSync,
|
||||
LastPricelistSync: lastPricelistSync,
|
||||
IsOnline: isOnline,
|
||||
ComponentsCount: componentsCount,
|
||||
PricelistsCount: pricelistsCount,
|
||||
ServerPricelists: serverPricelists,
|
||||
NeedComponentSync: needComponentSync,
|
||||
NeedPricelistSync: needPricelistSync,
|
||||
})
|
||||
}
|
||||
|
||||
// GetReadiness returns sync readiness guard status.
|
||||
// GET /api/sync/readiness
|
||||
func (h *SyncHandler) GetReadiness(c *gin.Context) {
|
||||
readiness, err := h.syncService.GetReadiness()
|
||||
if err != nil && readiness == nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
if readiness == nil {
|
||||
c.JSON(http.StatusOK, SyncReadinessResponse{Status: sync.ReadinessUnknown, Blocked: false})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, SyncReadinessResponse{
|
||||
Status: readiness.Status,
|
||||
Blocked: readiness.Blocked,
|
||||
ReasonCode: readiness.ReasonCode,
|
||||
ReasonText: readiness.ReasonText,
|
||||
RequiredMinAppVersion: readiness.RequiredMinAppVersion,
|
||||
LastCheckedAt: readiness.LastCheckedAt,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *SyncHandler) ensureSyncReadiness(c *gin.Context) bool {
|
||||
readiness, err := h.syncService.EnsureReadinessForSync()
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
blocked := &sync.SyncBlockedError{}
|
||||
if errors.As(err, &blocked) {
|
||||
c.JSON(http.StatusLocked, gin.H{
|
||||
"success": false,
|
||||
"error": blocked.Error(),
|
||||
"reason_code": blocked.Readiness.ReasonCode,
|
||||
"reason_text": blocked.Readiness.ReasonText,
|
||||
"required_min_app_version": blocked.Readiness.RequiredMinAppVersion,
|
||||
"status": blocked.Readiness.Status,
|
||||
"blocked": true,
|
||||
"last_checked_at": blocked.Readiness.LastCheckedAt,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "internal server error",
|
||||
})
|
||||
_ = c.Error(err)
|
||||
_ = readiness
|
||||
return false
|
||||
}
|
||||
|
||||
// SyncResultResponse represents sync operation result
|
||||
type SyncResultResponse struct {
|
||||
Success bool `json:"success"`
|
||||
@@ -172,7 +109,11 @@ type SyncResultResponse struct {
|
||||
// SyncComponents syncs components from MariaDB to local SQLite
|
||||
// POST /api/sync/components
|
||||
func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
||||
if !h.ensureSyncReadiness(c) {
|
||||
if !h.checkOnline() {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"success": false,
|
||||
"error": "Database is offline",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -181,9 +122,8 @@ func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
||||
if err != nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"success": false,
|
||||
"error": "database connection failed",
|
||||
"error": "Database connection failed: " + err.Error(),
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -192,9 +132,8 @@ func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
||||
slog.Error("component sync failed", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "component sync failed",
|
||||
"error": err.Error(),
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -209,7 +148,11 @@ func (h *SyncHandler) SyncComponents(c *gin.Context) {
|
||||
// SyncPricelists syncs pricelists from MariaDB to local SQLite
|
||||
// POST /api/sync/pricelists
|
||||
func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
||||
if !h.ensureSyncReadiness(c) {
|
||||
if !h.checkOnline() {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"success": false,
|
||||
"error": "Database is offline",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -219,9 +162,8 @@ func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
||||
slog.Error("pricelist sync failed", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "pricelist sync failed",
|
||||
"error": err.Error(),
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -231,85 +173,38 @@ func (h *SyncHandler) SyncPricelists(c *gin.Context) {
|
||||
Synced: synced,
|
||||
Duration: time.Since(startTime).String(),
|
||||
})
|
||||
h.syncService.RecordSyncHeartbeat()
|
||||
}
|
||||
|
||||
// SyncPartnumberBooks syncs partnumber book snapshots from MariaDB to local SQLite.
|
||||
// POST /api/sync/partnumber-books
|
||||
func (h *SyncHandler) SyncPartnumberBooks(c *gin.Context) {
|
||||
if !h.ensureSyncReadiness(c) {
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
pulled, err := h.syncService.PullPartnumberBooks()
|
||||
if err != nil {
|
||||
slog.Error("partnumber books pull failed", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "partnumber books sync failed",
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, SyncResultResponse{
|
||||
Success: true,
|
||||
Message: "Partnumber books synced successfully",
|
||||
Synced: pulled,
|
||||
Duration: time.Since(startTime).String(),
|
||||
})
|
||||
h.syncService.RecordSyncHeartbeat()
|
||||
}
|
||||
|
||||
// SyncAllResponse represents result of full sync
|
||||
type SyncAllResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
PendingPushed int `json:"pending_pushed"`
|
||||
ComponentsSynced int `json:"components_synced"`
|
||||
PricelistsSynced int `json:"pricelists_synced"`
|
||||
ProjectsImported int `json:"projects_imported"`
|
||||
ProjectsUpdated int `json:"projects_updated"`
|
||||
ProjectsSkipped int `json:"projects_skipped"`
|
||||
ConfigurationsImported int `json:"configurations_imported"`
|
||||
ConfigurationsUpdated int `json:"configurations_updated"`
|
||||
ConfigurationsSkipped int `json:"configurations_skipped"`
|
||||
Duration string `json:"duration"`
|
||||
Success bool `json:"success"`
|
||||
Message string `json:"message"`
|
||||
ComponentsSynced int `json:"components_synced"`
|
||||
PricelistsSynced int `json:"pricelists_synced"`
|
||||
Duration string `json:"duration"`
|
||||
}
|
||||
|
||||
// SyncAll performs full bidirectional sync:
|
||||
// - push pending local changes (projects/configurations) to server
|
||||
// - pull components, pricelists, projects, and configurations from server
|
||||
// SyncAll syncs both components and pricelists
|
||||
// POST /api/sync/all
|
||||
func (h *SyncHandler) SyncAll(c *gin.Context) {
|
||||
if !h.ensureSyncReadiness(c) {
|
||||
if !h.checkOnline() {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"success": false,
|
||||
"error": "Database is offline",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
var pendingPushed, componentsSynced, pricelistsSynced int
|
||||
|
||||
// Push local pending changes first (projects/configurations)
|
||||
pendingPushed, err := h.syncService.PushPendingChanges()
|
||||
if err != nil {
|
||||
slog.Error("pending push failed during full sync", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "pending changes push failed",
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
var componentsSynced, pricelistsSynced int
|
||||
|
||||
// Sync components
|
||||
mariaDB, err := h.connMgr.GetDB()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"success": false,
|
||||
"error": "database connection failed",
|
||||
"error": "Database connection failed: " + err.Error(),
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -318,9 +213,8 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
||||
slog.Error("component sync failed during full sync", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "component sync failed",
|
||||
"error": "Component sync failed: " + err.Error(),
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
componentsSynced = compResult.TotalSynced
|
||||
@@ -331,60 +225,19 @@ func (h *SyncHandler) SyncAll(c *gin.Context) {
|
||||
slog.Error("pricelist sync failed during full sync", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "pricelist sync failed",
|
||||
"pending_pushed": pendingPushed,
|
||||
"error": "Pricelist sync failed: " + err.Error(),
|
||||
"components_synced": componentsSynced,
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
projectsResult, err := h.syncService.ImportProjectsToLocal()
|
||||
if err != nil {
|
||||
slog.Error("project import failed during full sync", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "project import failed",
|
||||
"pending_pushed": pendingPushed,
|
||||
"components_synced": componentsSynced,
|
||||
"pricelists_synced": pricelistsSynced,
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
configsResult, err := h.syncService.ImportConfigurationsToLocal()
|
||||
if err != nil {
|
||||
slog.Error("configuration import failed during full sync", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "configuration import failed",
|
||||
"pending_pushed": pendingPushed,
|
||||
"components_synced": componentsSynced,
|
||||
"pricelists_synced": pricelistsSynced,
|
||||
"projects_imported": projectsResult.Imported,
|
||||
"projects_updated": projectsResult.Updated,
|
||||
"projects_skipped": projectsResult.Skipped,
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, SyncAllResponse{
|
||||
Success: true,
|
||||
Message: "Full sync completed successfully",
|
||||
PendingPushed: pendingPushed,
|
||||
ComponentsSynced: componentsSynced,
|
||||
PricelistsSynced: pricelistsSynced,
|
||||
ProjectsImported: projectsResult.Imported,
|
||||
ProjectsUpdated: projectsResult.Updated,
|
||||
ProjectsSkipped: projectsResult.Skipped,
|
||||
ConfigurationsImported: configsResult.Imported,
|
||||
ConfigurationsUpdated: configsResult.Updated,
|
||||
ConfigurationsSkipped: configsResult.Skipped,
|
||||
Duration: time.Since(startTime).String(),
|
||||
Success: true,
|
||||
Message: "Full sync completed successfully",
|
||||
ComponentsSynced: componentsSynced,
|
||||
PricelistsSynced: pricelistsSynced,
|
||||
Duration: time.Since(startTime).String(),
|
||||
})
|
||||
h.syncService.RecordSyncHeartbeat()
|
||||
}
|
||||
|
||||
// checkOnline checks if MariaDB is accessible
|
||||
@@ -395,7 +248,11 @@ func (h *SyncHandler) checkOnline() bool {
|
||||
// PushPendingChanges pushes all pending changes to the server
|
||||
// POST /api/sync/push
|
||||
func (h *SyncHandler) PushPendingChanges(c *gin.Context) {
|
||||
if !h.ensureSyncReadiness(c) {
|
||||
if !h.checkOnline() {
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"success": false,
|
||||
"error": "Database is offline",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -405,9 +262,8 @@ func (h *SyncHandler) PushPendingChanges(c *gin.Context) {
|
||||
slog.Error("push pending changes failed", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "pending changes push failed",
|
||||
"error": err.Error(),
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -417,7 +273,6 @@ func (h *SyncHandler) PushPendingChanges(c *gin.Context) {
|
||||
Synced: pushed,
|
||||
Duration: time.Since(startTime).String(),
|
||||
})
|
||||
h.syncService.RecordSyncHeartbeat()
|
||||
}
|
||||
|
||||
// GetPendingCount returns the number of pending changes
|
||||
@@ -434,7 +289,9 @@ func (h *SyncHandler) GetPendingCount(c *gin.Context) {
|
||||
func (h *SyncHandler) GetPendingChanges(c *gin.Context) {
|
||||
changes, err := h.localDB.GetPendingChanges()
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -443,66 +300,12 @@ func (h *SyncHandler) GetPendingChanges(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
// RepairPendingChanges attempts to repair errored pending changes
|
||||
// POST /api/sync/repair
|
||||
func (h *SyncHandler) RepairPendingChanges(c *gin.Context) {
|
||||
repaired, remainingErrors, err := h.localDB.RepairPendingChanges()
|
||||
if err != nil {
|
||||
slog.Error("repair pending changes failed", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"success": false,
|
||||
"error": "pending changes repair failed",
|
||||
})
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"success": true,
|
||||
"repaired": repaired,
|
||||
"remaining_errors": remainingErrors,
|
||||
})
|
||||
}
|
||||
|
||||
// SyncInfoResponse represents sync information for the modal
|
||||
// SyncInfoResponse represents sync information
|
||||
type SyncInfoResponse struct {
|
||||
// Connection
|
||||
DBHost string `json:"db_host"`
|
||||
DBUser string `json:"db_user"`
|
||||
DBName string `json:"db_name"`
|
||||
|
||||
// Status
|
||||
IsOnline bool `json:"is_online"`
|
||||
LastSyncAt *time.Time `json:"last_sync_at"`
|
||||
LastPricelistAttemptAt *time.Time `json:"last_pricelist_attempt_at,omitempty"`
|
||||
LastPricelistSyncStatus string `json:"last_pricelist_sync_status,omitempty"`
|
||||
LastPricelistSyncError string `json:"last_pricelist_sync_error,omitempty"`
|
||||
NeedPricelistSync bool `json:"need_pricelist_sync"`
|
||||
HasIncompleteServerSync bool `json:"has_incomplete_server_sync"`
|
||||
|
||||
// Statistics
|
||||
LotCount int64 `json:"lot_count"`
|
||||
LotLogCount int64 `json:"lot_log_count"`
|
||||
ConfigCount int64 `json:"config_count"`
|
||||
ProjectCount int64 `json:"project_count"`
|
||||
|
||||
// Pending changes
|
||||
PendingChanges []localdb.PendingChange `json:"pending_changes"`
|
||||
|
||||
// Errors
|
||||
LastSyncAt *time.Time `json:"last_sync_at"`
|
||||
IsOnline bool `json:"is_online"`
|
||||
ErrorCount int `json:"error_count"`
|
||||
Errors []SyncError `json:"errors,omitempty"`
|
||||
|
||||
// Readiness guard
|
||||
Readiness *sync.SyncReadiness `json:"readiness,omitempty"`
|
||||
}
|
||||
|
||||
type SyncUsersStatusResponse struct {
|
||||
IsOnline bool `json:"is_online"`
|
||||
AutoSyncIntervalSeconds int64 `json:"auto_sync_interval_seconds"`
|
||||
OnlineThresholdSeconds int64 `json:"online_threshold_seconds"`
|
||||
GeneratedAt time.Time `json:"generated_at"`
|
||||
Users []sync.UserSyncStatus `json:"users"`
|
||||
}
|
||||
|
||||
// SyncError represents a sync error
|
||||
@@ -514,46 +317,34 @@ type SyncError struct {
|
||||
// GetInfo returns sync information for modal
|
||||
// GET /api/sync/info
|
||||
func (h *SyncHandler) GetInfo(c *gin.Context) {
|
||||
connStatus := h.connMgr.GetStatus()
|
||||
isOnline := connStatus.IsConnected && strings.TrimSpace(connStatus.LastError) == ""
|
||||
|
||||
// Get DB connection info
|
||||
var dbHost, dbUser, dbName string
|
||||
if settings, err := h.localDB.GetSettings(); err == nil {
|
||||
dbHost = settings.Host + ":" + fmt.Sprintf("%d", settings.Port)
|
||||
dbUser = settings.User
|
||||
dbName = settings.Database
|
||||
}
|
||||
// Check online status by pinging MariaDB
|
||||
isOnline := h.checkOnline()
|
||||
|
||||
// Get sync times
|
||||
lastPricelistSync := h.localDB.GetLastSyncTime()
|
||||
lastPricelistAttemptAt := h.localDB.GetLastPricelistSyncAttemptAt()
|
||||
lastPricelistSyncStatus := h.localDB.GetLastPricelistSyncStatus()
|
||||
lastPricelistSyncError := h.localDB.GetLastPricelistSyncError()
|
||||
hasFailedSync := strings.EqualFold(lastPricelistSyncStatus, "failed")
|
||||
needPricelistSync := lastPricelistSync == nil || hasFailedSync
|
||||
hasIncompleteServerSync := hasFailedSync
|
||||
|
||||
// Get local counts
|
||||
configCount := h.localDB.CountConfigurations()
|
||||
projectCount := h.localDB.CountProjects()
|
||||
componentCount := h.localDB.CountLocalComponents()
|
||||
pricelistCount := h.localDB.CountLocalPricelists()
|
||||
|
||||
// Get error count (only changes with LastError != "")
|
||||
errorCount := int(h.localDB.CountErroredChanges())
|
||||
|
||||
// Get pending changes
|
||||
// Get recent errors (last 10)
|
||||
changes, err := h.localDB.GetPendingChanges()
|
||||
if err != nil {
|
||||
slog.Error("failed to get pending changes for sync info", "error", err)
|
||||
changes = []localdb.PendingChange{}
|
||||
// Even if we can't get changes, we can still return the error count
|
||||
c.JSON(http.StatusOK, SyncInfoResponse{
|
||||
LastSyncAt: lastPricelistSync,
|
||||
IsOnline: isOnline,
|
||||
ErrorCount: errorCount,
|
||||
Errors: []SyncError{}, // Return empty errors list
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var syncErrors []SyncError
|
||||
var errors []SyncError
|
||||
for _, change := range changes {
|
||||
// Check if there's a last error and it's not empty
|
||||
if change.LastError != "" {
|
||||
syncErrors = append(syncErrors, SyncError{
|
||||
errors = append(errors, SyncError{
|
||||
Timestamp: change.CreatedAt,
|
||||
Message: change.LastError,
|
||||
})
|
||||
@@ -561,66 +352,15 @@ func (h *SyncHandler) GetInfo(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Limit to last 10 errors
|
||||
if len(syncErrors) > 10 {
|
||||
syncErrors = syncErrors[:10]
|
||||
if len(errors) > 10 {
|
||||
errors = errors[:10]
|
||||
}
|
||||
|
||||
readiness := h.getReadinessLocal()
|
||||
|
||||
c.JSON(http.StatusOK, SyncInfoResponse{
|
||||
DBHost: dbHost,
|
||||
DBUser: dbUser,
|
||||
DBName: dbName,
|
||||
IsOnline: isOnline,
|
||||
LastSyncAt: lastPricelistSync,
|
||||
LastPricelistAttemptAt: lastPricelistAttemptAt,
|
||||
LastPricelistSyncStatus: lastPricelistSyncStatus,
|
||||
LastPricelistSyncError: lastPricelistSyncError,
|
||||
NeedPricelistSync: needPricelistSync,
|
||||
HasIncompleteServerSync: hasIncompleteServerSync,
|
||||
LotCount: componentCount,
|
||||
LotLogCount: pricelistCount,
|
||||
ConfigCount: configCount,
|
||||
ProjectCount: projectCount,
|
||||
PendingChanges: changes,
|
||||
ErrorCount: errorCount,
|
||||
Errors: syncErrors,
|
||||
Readiness: readiness,
|
||||
})
|
||||
}
|
||||
|
||||
// GetUsersStatus returns last sync timestamps for users with sync heartbeats.
|
||||
// GET /api/sync/users-status
|
||||
func (h *SyncHandler) GetUsersStatus(c *gin.Context) {
|
||||
threshold := time.Duration(float64(h.autoSyncInterval) * h.onlineGraceFactor)
|
||||
isOnline := h.checkOnline()
|
||||
|
||||
if !isOnline {
|
||||
c.JSON(http.StatusOK, SyncUsersStatusResponse{
|
||||
IsOnline: false,
|
||||
AutoSyncIntervalSeconds: int64(h.autoSyncInterval.Seconds()),
|
||||
OnlineThresholdSeconds: int64(threshold.Seconds()),
|
||||
GeneratedAt: time.Now().UTC(),
|
||||
Users: []sync.UserSyncStatus{},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Keep current client heartbeat fresh so app version is available in the table.
|
||||
h.syncService.RecordSyncHeartbeat()
|
||||
|
||||
users, err := h.syncService.ListUserSyncStatuses(threshold)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, SyncUsersStatusResponse{
|
||||
IsOnline: true,
|
||||
AutoSyncIntervalSeconds: int64(h.autoSyncInterval.Seconds()),
|
||||
OnlineThresholdSeconds: int64(threshold.Seconds()),
|
||||
GeneratedAt: time.Now().UTC(),
|
||||
Users: users,
|
||||
LastSyncAt: lastPricelistSync,
|
||||
IsOnline: isOnline,
|
||||
ErrorCount: errorCount,
|
||||
Errors: errors,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -640,109 +380,17 @@ func (h *SyncHandler) SyncStatusPartial(c *gin.Context) {
|
||||
|
||||
// Get pending count
|
||||
pendingCount := h.localDB.GetPendingCount()
|
||||
readiness := h.getReadinessLocal()
|
||||
isBlocked := readiness != nil && readiness.Blocked
|
||||
lastPricelistSyncStatus := h.localDB.GetLastPricelistSyncStatus()
|
||||
lastPricelistSyncError := h.localDB.GetLastPricelistSyncError()
|
||||
hasFailedSync := strings.EqualFold(lastPricelistSyncStatus, "failed")
|
||||
hasIncompleteServerSync := hasFailedSync
|
||||
|
||||
slog.Debug("rendering sync status", "is_offline", isOffline, "pending_count", pendingCount, "sync_blocked", isBlocked)
|
||||
slog.Debug("rendering sync status", "is_offline", isOffline, "pending_count", pendingCount)
|
||||
|
||||
data := gin.H{
|
||||
"IsOffline": isOffline,
|
||||
"PendingCount": pendingCount,
|
||||
"IsBlocked": isBlocked,
|
||||
"HasFailedSync": hasFailedSync,
|
||||
"HasIncompleteServerSync": hasIncompleteServerSync,
|
||||
"SyncIssueTitle": func() string {
|
||||
if hasIncompleteServerSync {
|
||||
return "Последняя синхронизация прайслистов прервалась. На сервере есть изменения, которые не загружены локально."
|
||||
}
|
||||
if hasFailedSync {
|
||||
if lastPricelistSyncError != "" {
|
||||
return lastPricelistSyncError
|
||||
}
|
||||
return "Последняя синхронизация прайслистов завершилась ошибкой."
|
||||
}
|
||||
return ""
|
||||
}(),
|
||||
"BlockedReason": func() string {
|
||||
if readiness == nil {
|
||||
return ""
|
||||
}
|
||||
return readiness.ReasonText
|
||||
}(),
|
||||
"IsOffline": isOffline,
|
||||
"PendingCount": pendingCount,
|
||||
}
|
||||
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
if err := h.tmpl.ExecuteTemplate(c.Writer, "sync_status", data); err != nil {
|
||||
slog.Error("failed to render sync_status template", "error", err)
|
||||
_ = c.Error(err)
|
||||
c.String(http.StatusInternalServerError, "Template error")
|
||||
c.String(http.StatusInternalServerError, "Template error: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (h *SyncHandler) getReadinessLocal() *sync.SyncReadiness {
|
||||
h.readinessMu.Lock()
|
||||
if h.readinessCached != nil && time.Since(h.readinessCachedAt) < 10*time.Second {
|
||||
cached := *h.readinessCached
|
||||
h.readinessMu.Unlock()
|
||||
return &cached
|
||||
}
|
||||
h.readinessMu.Unlock()
|
||||
|
||||
state, err := h.localDB.GetSyncGuardState()
|
||||
if err != nil || state == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
readiness := &sync.SyncReadiness{
|
||||
Status: state.Status,
|
||||
Blocked: state.Status == sync.ReadinessBlocked,
|
||||
ReasonCode: state.ReasonCode,
|
||||
ReasonText: state.ReasonText,
|
||||
RequiredMinAppVersion: state.RequiredMinAppVersion,
|
||||
LastCheckedAt: state.LastCheckedAt,
|
||||
}
|
||||
|
||||
h.readinessMu.Lock()
|
||||
h.readinessCached = readiness
|
||||
h.readinessCachedAt = time.Now()
|
||||
h.readinessMu.Unlock()
|
||||
return readiness
|
||||
}
|
||||
|
||||
// ReportPartnumberSeen pushes unresolved vendor partnumbers to qt_vendor_partnumber_seen on MariaDB.
|
||||
// POST /api/sync/partnumber-seen
|
||||
func (h *SyncHandler) ReportPartnumberSeen(c *gin.Context) {
|
||||
var body struct {
|
||||
Items []struct {
|
||||
Partnumber string `json:"partnumber"`
|
||||
Description string `json:"description"`
|
||||
Ignored bool `json:"ignored"`
|
||||
} `json:"items"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
items := make([]sync.SeenPartnumber, 0, len(body.Items))
|
||||
for _, it := range body.Items {
|
||||
if it.Partnumber != "" {
|
||||
items = append(items, sync.SeenPartnumber{
|
||||
Partnumber: it.Partnumber,
|
||||
Description: it.Description,
|
||||
Ignored: it.Ignored,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.syncService.PushPartnumberSeen(items); err != nil {
|
||||
RespondError(c, http.StatusServiceUnavailable, "service unavailable", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"reported": len(items)})
|
||||
}
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
syncsvc "git.mchus.pro/mchus/quoteforge/internal/services/sync"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestSyncReadinessOfflineBlocked(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
dir := t.TempDir()
|
||||
local, err := localdb.New(filepath.Join(dir, "qfs.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("init local db: %v", err)
|
||||
}
|
||||
|
||||
service := syncsvc.NewService(nil, local)
|
||||
h, err := NewSyncHandler(local, service, nil, filepath.Join("web", "templates"), 5*time.Minute)
|
||||
if err != nil {
|
||||
t.Fatalf("new sync handler: %v", err)
|
||||
}
|
||||
|
||||
router := gin.New()
|
||||
router.GET("/api/sync/readiness", h.GetReadiness)
|
||||
router.POST("/api/sync/push", h.PushPendingChanges)
|
||||
|
||||
readinessResp := httptest.NewRecorder()
|
||||
readinessReq, _ := http.NewRequest(http.MethodGet, "/api/sync/readiness", nil)
|
||||
router.ServeHTTP(readinessResp, readinessReq)
|
||||
if readinessResp.Code != http.StatusOK {
|
||||
t.Fatalf("unexpected readiness status: %d", readinessResp.Code)
|
||||
}
|
||||
|
||||
var readinessBody map[string]any
|
||||
if err := json.Unmarshal(readinessResp.Body.Bytes(), &readinessBody); err != nil {
|
||||
t.Fatalf("decode readiness body: %v", err)
|
||||
}
|
||||
if blocked, _ := readinessBody["blocked"].(bool); !blocked {
|
||||
t.Fatalf("expected blocked readiness, got %v", readinessBody["blocked"])
|
||||
}
|
||||
|
||||
pushResp := httptest.NewRecorder()
|
||||
pushReq, _ := http.NewRequest(http.MethodPost, "/api/sync/push", nil)
|
||||
router.ServeHTTP(pushResp, pushReq)
|
||||
if pushResp.Code != http.StatusLocked {
|
||||
t.Fatalf("expected 423 for blocked sync push, got %d body=%s", pushResp.Code, pushResp.Body.String())
|
||||
}
|
||||
|
||||
var pushBody map[string]any
|
||||
if err := json.Unmarshal(pushResp.Body.Bytes(), &pushBody); err != nil {
|
||||
t.Fatalf("decode push body: %v", err)
|
||||
}
|
||||
if pushBody["reason_text"] == nil || pushBody["reason_text"] == "" {
|
||||
t.Fatalf("expected reason_text in blocked response, got %v", pushBody)
|
||||
}
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// VendorSpecHandler handles vendor BOM spec operations for a configuration.
|
||||
type VendorSpecHandler struct {
|
||||
localDB *localdb.LocalDB
|
||||
configService *services.LocalConfigurationService
|
||||
}
|
||||
|
||||
func NewVendorSpecHandler(localDB *localdb.LocalDB) *VendorSpecHandler {
|
||||
return &VendorSpecHandler{
|
||||
localDB: localDB,
|
||||
configService: services.NewLocalConfigurationService(localDB, nil, nil, func() bool { return false }),
|
||||
}
|
||||
}
|
||||
|
||||
// lookupConfig finds an active configuration by UUID using the standard localDB method.
|
||||
func (h *VendorSpecHandler) lookupConfig(uuid string) (*localdb.LocalConfiguration, error) {
|
||||
cfg, err := h.localDB.GetConfigurationByUUID(uuid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !cfg.IsActive {
|
||||
return nil, errors.New("not active")
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// GetVendorSpec returns the vendor spec (BOM) for a configuration.
|
||||
// GET /api/configs/:uuid/vendor-spec
|
||||
func (h *VendorSpecHandler) GetVendorSpec(c *gin.Context) {
|
||||
cfg, err := h.lookupConfig(c.Param("uuid"))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
||||
return
|
||||
}
|
||||
|
||||
spec := cfg.VendorSpec
|
||||
if spec == nil {
|
||||
spec = localdb.VendorSpec{}
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{"vendor_spec": spec})
|
||||
}
|
||||
|
||||
// PutVendorSpec saves (replaces) the vendor spec for a configuration.
|
||||
// PUT /api/configs/:uuid/vendor-spec
|
||||
func (h *VendorSpecHandler) PutVendorSpec(c *gin.Context) {
|
||||
cfg, err := h.lookupConfig(c.Param("uuid"))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
||||
return
|
||||
}
|
||||
|
||||
var body struct {
|
||||
VendorSpec []localdb.VendorSpecItem `json:"vendor_spec"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i := range body.VendorSpec {
|
||||
if body.VendorSpec[i].SortOrder == 0 {
|
||||
body.VendorSpec[i].SortOrder = (i + 1) * 10
|
||||
}
|
||||
// Persist canonical LOT mapping only.
|
||||
body.VendorSpec[i].LotMappings = normalizeLotMappings(body.VendorSpec[i].LotMappings)
|
||||
body.VendorSpec[i].ResolvedLotName = ""
|
||||
body.VendorSpec[i].ResolutionSource = ""
|
||||
body.VendorSpec[i].ManualLotSuggestion = ""
|
||||
body.VendorSpec[i].LotQtyPerPN = 0
|
||||
body.VendorSpec[i].LotAllocations = nil
|
||||
}
|
||||
|
||||
spec := localdb.VendorSpec(body.VendorSpec)
|
||||
if _, err := h.configService.UpdateVendorSpecNoAuth(cfg.UUID, spec); err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"vendor_spec": spec})
|
||||
}
|
||||
|
||||
func normalizeLotMappings(in []localdb.VendorSpecLotMapping) []localdb.VendorSpecLotMapping {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
merged := make(map[string]int, len(in))
|
||||
order := make([]string, 0, len(in))
|
||||
for _, m := range in {
|
||||
lot := strings.TrimSpace(m.LotName)
|
||||
if lot == "" {
|
||||
continue
|
||||
}
|
||||
qty := m.QuantityPerPN
|
||||
if qty < 1 {
|
||||
qty = 1
|
||||
}
|
||||
if _, exists := merged[lot]; !exists {
|
||||
order = append(order, lot)
|
||||
}
|
||||
merged[lot] += qty
|
||||
}
|
||||
out := make([]localdb.VendorSpecLotMapping, 0, len(order))
|
||||
for _, lot := range order {
|
||||
out = append(out, localdb.VendorSpecLotMapping{
|
||||
LotName: lot,
|
||||
QuantityPerPN: merged[lot],
|
||||
})
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ResolveVendorSpec resolves vendor PN → LOT without modifying the cart.
|
||||
// POST /api/configs/:uuid/vendor-spec/resolve
|
||||
func (h *VendorSpecHandler) ResolveVendorSpec(c *gin.Context) {
|
||||
if _, err := h.lookupConfig(c.Param("uuid")); err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
||||
return
|
||||
}
|
||||
|
||||
var body struct {
|
||||
VendorSpec []localdb.VendorSpecItem `json:"vendor_spec"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
bookRepo := repository.NewPartnumberBookRepository(h.localDB.DB())
|
||||
resolver := services.NewVendorSpecResolver(bookRepo)
|
||||
|
||||
resolved, err := resolver.Resolve(body.VendorSpec)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
book, _ := bookRepo.GetActiveBook()
|
||||
aggregated, err := services.AggregateLOTs(resolved, book, bookRepo)
|
||||
if err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"resolved": resolved,
|
||||
"aggregated": aggregated,
|
||||
})
|
||||
}
|
||||
|
||||
// ApplyVendorSpec applies the resolved BOM to the cart (Estimate items).
|
||||
// POST /api/configs/:uuid/vendor-spec/apply
|
||||
func (h *VendorSpecHandler) ApplyVendorSpec(c *gin.Context) {
|
||||
cfg, err := h.lookupConfig(c.Param("uuid"))
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "configuration not found"})
|
||||
return
|
||||
}
|
||||
|
||||
var body struct {
|
||||
Items []struct {
|
||||
LotName string `json:"lot_name"`
|
||||
Quantity int `json:"quantity"`
|
||||
UnitPrice float64 `json:"unit_price"`
|
||||
} `json:"items"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
RespondError(c, http.StatusBadRequest, "invalid request", err)
|
||||
return
|
||||
}
|
||||
|
||||
newItems := make(localdb.LocalConfigItems, 0, len(body.Items))
|
||||
for _, it := range body.Items {
|
||||
newItems = append(newItems, localdb.LocalConfigItem{
|
||||
LotName: it.LotName,
|
||||
Quantity: it.Quantity,
|
||||
UnitPrice: it.UnitPrice,
|
||||
})
|
||||
}
|
||||
|
||||
if _, err := h.configService.ApplyVendorSpecItemsNoAuth(cfg.UUID, newItems); err != nil {
|
||||
RespondError(c, http.StatusInternalServerError, "internal server error", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"items": newItems})
|
||||
}
|
||||
@@ -1,24 +1,23 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
qfassets "git.mchus.pro/mchus/quoteforge"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/appmeta"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type WebHandler struct {
|
||||
templates map[string]*template.Template
|
||||
localDB *localdb.LocalDB
|
||||
templates map[string]*template.Template
|
||||
componentService *services.ComponentService
|
||||
}
|
||||
|
||||
func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
||||
func NewWebHandler(templatesPath string, componentService *services.ComponentService) (*WebHandler, error) {
|
||||
funcMap := template.FuncMap{
|
||||
"sub": func(a, b int) int { return a - b },
|
||||
"add": func(a, b int) int { return a + b },
|
||||
@@ -61,16 +60,27 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
||||
}
|
||||
|
||||
templates := make(map[string]*template.Template)
|
||||
basePath := filepath.Join(templatesPath, "base.html")
|
||||
useDisk := false
|
||||
if stat, statErr := os.Stat(templatesPath); statErr == nil && stat.IsDir() {
|
||||
useDisk = true
|
||||
}
|
||||
|
||||
// Load each page template with base
|
||||
simplePages := []string{"configs.html", "projects.html", "project_detail.html", "pricelists.html", "pricelist_detail.html", "config_revisions.html", "partnumber_books.html"}
|
||||
simplePages := []string{"login.html", "configs.html", "projects.html", "project_detail.html", "admin_pricing.html", "pricelists.html", "pricelist_detail.html"}
|
||||
for _, page := range simplePages {
|
||||
pagePath := filepath.Join(templatesPath, page)
|
||||
var tmpl *template.Template
|
||||
var err error
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||
qfassets.TemplatesFS,
|
||||
"web/templates/base.html",
|
||||
"web/templates/"+page,
|
||||
)
|
||||
if useDisk {
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFiles(basePath, pagePath)
|
||||
} else {
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||
qfassets.TemplatesFS,
|
||||
"web/templates/base.html",
|
||||
"web/templates/"+page,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -78,14 +88,20 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
||||
}
|
||||
|
||||
// Index page needs components_list.html as well
|
||||
indexPath := filepath.Join(templatesPath, "index.html")
|
||||
componentsListPath := filepath.Join(templatesPath, "components_list.html")
|
||||
var indexTmpl *template.Template
|
||||
var err error
|
||||
indexTmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||
qfassets.TemplatesFS,
|
||||
"web/templates/base.html",
|
||||
"web/templates/index.html",
|
||||
"web/templates/components_list.html",
|
||||
)
|
||||
if useDisk {
|
||||
indexTmpl, err = template.New("").Funcs(funcMap).ParseFiles(basePath, indexPath, componentsListPath)
|
||||
} else {
|
||||
indexTmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||
qfassets.TemplatesFS,
|
||||
"web/templates/base.html",
|
||||
"web/templates/index.html",
|
||||
"web/templates/components_list.html",
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -94,12 +110,17 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
||||
// Load partial templates (no base needed)
|
||||
partials := []string{"components_list.html"}
|
||||
for _, partial := range partials {
|
||||
partialPath := filepath.Join(templatesPath, partial)
|
||||
var tmpl *template.Template
|
||||
var err error
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||
qfassets.TemplatesFS,
|
||||
"web/templates/"+partial,
|
||||
)
|
||||
if useDisk {
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFiles(partialPath)
|
||||
} else {
|
||||
tmpl, err = template.New("").Funcs(funcMap).ParseFS(
|
||||
qfassets.TemplatesFS,
|
||||
"web/templates/"+partial,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -107,55 +128,60 @@ func NewWebHandler(_ string, localDB *localdb.LocalDB) (*WebHandler, error) {
|
||||
}
|
||||
|
||||
return &WebHandler{
|
||||
templates: templates,
|
||||
localDB: localDB,
|
||||
templates: templates,
|
||||
componentService: componentService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *WebHandler) render(c *gin.Context, name string, data gin.H) {
|
||||
data["AppVersion"] = appmeta.Version()
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
tmpl, ok := h.templates[name]
|
||||
if !ok {
|
||||
_ = c.Error(fmt.Errorf("template %q not found", name))
|
||||
c.String(500, "Template error")
|
||||
c.String(500, "Template not found: %s", name)
|
||||
return
|
||||
}
|
||||
// Execute the page template which will use base
|
||||
if err := tmpl.ExecuteTemplate(c.Writer, name, data); err != nil {
|
||||
_ = c.Error(err)
|
||||
c.String(500, "Template error")
|
||||
c.String(500, "Template error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *WebHandler) Index(c *gin.Context) {
|
||||
// Redirect to projects page - configurator is accessed via /configurator?uuid=...
|
||||
c.Redirect(302, "/projects")
|
||||
// Redirect to configs page - configurator is accessed via /configurator?uuid=...
|
||||
c.Redirect(302, "/configs")
|
||||
}
|
||||
|
||||
func (h *WebHandler) Configurator(c *gin.Context) {
|
||||
categories, _ := h.componentService.GetCategories()
|
||||
uuid := c.Query("uuid")
|
||||
categories, _ := h.localCategories()
|
||||
components, total, err := h.localDB.ListComponents(localdb.ComponentFilter{}, 0, 20)
|
||||
|
||||
filter := repository.ComponentFilter{}
|
||||
result, err := h.componentService.List(filter, 1, 20)
|
||||
|
||||
data := gin.H{
|
||||
"ActivePage": "configurator",
|
||||
"Categories": categories,
|
||||
"Components": []localComponentView{},
|
||||
"Components": []interface{}{},
|
||||
"Total": int64(0),
|
||||
"Page": 1,
|
||||
"PerPage": 20,
|
||||
"ConfigUUID": uuid,
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
data["Components"] = toLocalComponentViews(components)
|
||||
data["Total"] = total
|
||||
if err == nil && result != nil {
|
||||
data["Components"] = result.Components
|
||||
data["Total"] = result.Total
|
||||
data["Page"] = result.Page
|
||||
data["PerPage"] = result.PerPage
|
||||
}
|
||||
|
||||
h.render(c, "index.html", data)
|
||||
}
|
||||
|
||||
func (h *WebHandler) Login(c *gin.Context) {
|
||||
h.render(c, "login.html", nil)
|
||||
}
|
||||
|
||||
func (h *WebHandler) Configs(c *gin.Context) {
|
||||
h.render(c, "configs.html", gin.H{"ActivePage": "configs"})
|
||||
}
|
||||
@@ -171,11 +197,8 @@ func (h *WebHandler) ProjectDetail(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
func (h *WebHandler) ConfigRevisions(c *gin.Context) {
|
||||
h.render(c, "config_revisions.html", gin.H{
|
||||
"ActivePage": "configs",
|
||||
"ConfigUUID": c.Param("uuid"),
|
||||
})
|
||||
func (h *WebHandler) AdminPricing(c *gin.Context) {
|
||||
h.render(c, "admin_pricing.html", gin.H{"ActivePage": "admin"})
|
||||
}
|
||||
|
||||
func (h *WebHandler) Pricelists(c *gin.Context) {
|
||||
@@ -186,38 +209,29 @@ func (h *WebHandler) PricelistDetail(c *gin.Context) {
|
||||
h.render(c, "pricelist_detail.html", gin.H{"ActivePage": "pricelists"})
|
||||
}
|
||||
|
||||
func (h *WebHandler) PartnumberBooks(c *gin.Context) {
|
||||
h.render(c, "partnumber_books.html", gin.H{"ActivePage": "partnumber-books"})
|
||||
}
|
||||
|
||||
// Partials for htmx
|
||||
|
||||
func (h *WebHandler) ComponentsPartial(c *gin.Context) {
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
|
||||
filter := localdb.ComponentFilter{
|
||||
filter := repository.ComponentFilter{
|
||||
Category: c.Query("category"),
|
||||
Search: c.Query("search"),
|
||||
}
|
||||
if c.Query("has_price") == "true" {
|
||||
filter.HasPrice = true
|
||||
}
|
||||
offset := (page - 1) * 20
|
||||
|
||||
data := gin.H{
|
||||
"Components": []localComponentView{},
|
||||
"Components": []interface{}{},
|
||||
"Total": int64(0),
|
||||
"Page": page,
|
||||
"PerPage": 20,
|
||||
}
|
||||
|
||||
components, total, err := h.localDB.ListComponents(filter, offset, 20)
|
||||
if err == nil {
|
||||
data["Components"] = toLocalComponentViews(components)
|
||||
data["Total"] = total
|
||||
result, err := h.componentService.List(filter, page, 20)
|
||||
if err == nil && result != nil {
|
||||
data["Components"] = result.Components
|
||||
data["Total"] = result.Total
|
||||
data["Page"] = result.Page
|
||||
data["PerPage"] = result.PerPage
|
||||
}
|
||||
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
@@ -225,46 +239,3 @@ func (h *WebHandler) ComponentsPartial(c *gin.Context) {
|
||||
tmpl.ExecuteTemplate(c.Writer, "components_list.html", data)
|
||||
}
|
||||
}
|
||||
|
||||
type localComponentView struct {
|
||||
LotName string
|
||||
Description string
|
||||
Category string
|
||||
CategoryName string
|
||||
Model string
|
||||
CurrentPrice *float64
|
||||
}
|
||||
|
||||
func toLocalComponentViews(items []localdb.LocalComponent) []localComponentView {
|
||||
result := make([]localComponentView, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, localComponentView{
|
||||
LotName: item.LotName,
|
||||
Description: item.LotDescription,
|
||||
Category: item.Category,
|
||||
CategoryName: item.Category,
|
||||
Model: item.Model,
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (h *WebHandler) localCategories() ([]models.Category, error) {
|
||||
codes, err := h.localDB.GetLocalComponentCategories()
|
||||
if err != nil || len(codes) == 0 {
|
||||
return []models.Category{}, err
|
||||
}
|
||||
|
||||
categories := make([]models.Category, 0, len(codes))
|
||||
for _, code := range codes {
|
||||
trimmed := strings.TrimSpace(code)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
categories = append(categories, models.Category{
|
||||
Code: trimmed,
|
||||
Name: trimmed,
|
||||
})
|
||||
}
|
||||
return categories, nil
|
||||
}
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestWebHandlerRenderHidesTemplateExecutionError(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
|
||||
tmpl := template.Must(template.New("broken.html").Funcs(template.FuncMap{
|
||||
"boom": func() (string, error) {
|
||||
return "", errors.New("secret template failure")
|
||||
},
|
||||
}).Parse(`{{define "broken.html"}}{{boom}}{{end}}`))
|
||||
|
||||
handler := &WebHandler{
|
||||
templates: map[string]*template.Template{
|
||||
"broken.html": tmpl,
|
||||
},
|
||||
}
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
ctx, _ := gin.CreateTestContext(rec)
|
||||
ctx.Request = httptest.NewRequest(http.MethodGet, "/broken", nil)
|
||||
|
||||
handler.render(ctx, "broken.html", gin.H{})
|
||||
|
||||
if rec.Code != http.StatusInternalServerError {
|
||||
t.Fatalf("expected 500, got %d", rec.Code)
|
||||
}
|
||||
if body := strings.TrimSpace(rec.Body.String()); body != "Template error" {
|
||||
t.Fatalf("expected generic template error, got %q", body)
|
||||
}
|
||||
if len(ctx.Errors) != 1 {
|
||||
t.Fatalf("expected logged template error, got %d", len(ctx.Errors))
|
||||
}
|
||||
if !strings.Contains(ctx.Errors.String(), "secret template failure") {
|
||||
t.Fatalf("expected original error in gin context, got %q", ctx.Errors.String())
|
||||
}
|
||||
}
|
||||
@@ -28,13 +28,14 @@ type ComponentSyncResult struct {
|
||||
func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error) {
|
||||
startTime := time.Now()
|
||||
|
||||
// Query to join lot with qt_lot_metadata (metadata only, no pricing)
|
||||
// Query to join lot with qt_lot_metadata
|
||||
// Use LEFT JOIN to include lots without metadata
|
||||
type componentRow struct {
|
||||
LotName string
|
||||
LotDescription string
|
||||
Category *string
|
||||
Model *string
|
||||
CurrentPrice *float64
|
||||
}
|
||||
|
||||
var rows []componentRow
|
||||
@@ -43,7 +44,8 @@ func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error)
|
||||
l.lot_name,
|
||||
l.lot_description,
|
||||
COALESCE(c.code, SUBSTRING_INDEX(l.lot_name, '_', 1)) as category,
|
||||
m.model
|
||||
m.model,
|
||||
m.current_price
|
||||
FROM lot l
|
||||
LEFT JOIN qt_lot_metadata m ON l.lot_name = m.lot_name
|
||||
LEFT JOIN qt_categories c ON m.category_id = c.id
|
||||
@@ -98,6 +100,8 @@ func (l *LocalDB) SyncComponents(mariaDB *gorm.DB) (*ComponentSyncResult, error)
|
||||
LotDescription: row.LotDescription,
|
||||
Category: category,
|
||||
Model: model,
|
||||
CurrentPrice: row.CurrentPrice,
|
||||
SyncedAt: syncTime,
|
||||
}
|
||||
components = append(components, comp)
|
||||
|
||||
@@ -217,6 +221,11 @@ func (l *LocalDB) ListComponents(filter ComponentFilter, offset, limit int) ([]L
|
||||
)
|
||||
}
|
||||
|
||||
// Apply price filter
|
||||
if filter.HasPrice {
|
||||
db = db.Where("current_price IS NOT NULL")
|
||||
}
|
||||
|
||||
// Get total count
|
||||
var total int64
|
||||
if err := db.Model(&LocalComponent{}).Count(&total).Error; err != nil {
|
||||
@@ -242,31 +251,6 @@ func (l *LocalDB) GetLocalComponent(lotName string) (*LocalComponent, error) {
|
||||
return &component, nil
|
||||
}
|
||||
|
||||
// GetLocalComponentCategoriesByLotNames returns category for each lot_name in the local component cache.
|
||||
// Missing lots are not included in the map; caller is responsible for strict validation.
|
||||
func (l *LocalDB) GetLocalComponentCategoriesByLotNames(lotNames []string) (map[string]string, error) {
|
||||
result := make(map[string]string, len(lotNames))
|
||||
if len(lotNames) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type row struct {
|
||||
LotName string `gorm:"column:lot_name"`
|
||||
Category string `gorm:"column:category"`
|
||||
}
|
||||
var rows []row
|
||||
if err := l.db.Model(&LocalComponent{}).
|
||||
Select("lot_name, category").
|
||||
Where("lot_name IN ?", lotNames).
|
||||
Find(&rows).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, r := range rows {
|
||||
result[r.LotName] = r.Category
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetLocalComponentCategories returns distinct categories from local components
|
||||
func (l *LocalDB) GetLocalComponentCategories() ([]string, error) {
|
||||
var categories []string
|
||||
@@ -327,3 +311,100 @@ func (l *LocalDB) NeedComponentSync(maxAgeHours int) bool {
|
||||
}
|
||||
return time.Since(*syncTime).Hours() > float64(maxAgeHours)
|
||||
}
|
||||
|
||||
// UpdateComponentPricesFromPricelist updates current_price in local_components from pricelist items
|
||||
// This allows offline price updates using synced pricelists without MariaDB connection
|
||||
func (l *LocalDB) UpdateComponentPricesFromPricelist(pricelistID uint) (int, error) {
|
||||
// Get all items from the specified pricelist
|
||||
var items []LocalPricelistItem
|
||||
if err := l.db.Where("pricelist_id = ?", pricelistID).Find(&items).Error; err != nil {
|
||||
return 0, fmt.Errorf("fetching pricelist items: %w", err)
|
||||
}
|
||||
|
||||
if len(items) == 0 {
|
||||
slog.Warn("no items found in pricelist", "pricelist_id", pricelistID)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Update current_price for each component
|
||||
updated := 0
|
||||
err := l.db.Transaction(func(tx *gorm.DB) error {
|
||||
for _, item := range items {
|
||||
result := tx.Model(&LocalComponent{}).
|
||||
Where("lot_name = ?", item.LotName).
|
||||
Update("current_price", item.Price)
|
||||
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("updating price for %s: %w", item.LotName, result.Error)
|
||||
}
|
||||
|
||||
if result.RowsAffected > 0 {
|
||||
updated++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
slog.Info("updated component prices from pricelist",
|
||||
"pricelist_id", pricelistID,
|
||||
"total_items", len(items),
|
||||
"updated_components", updated)
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// EnsureComponentPricesFromPricelists loads prices from the latest pricelist into local_components
|
||||
// if no components exist or all current prices are NULL
|
||||
func (l *LocalDB) EnsureComponentPricesFromPricelists() error {
|
||||
// Check if we have any components with prices
|
||||
var count int64
|
||||
if err := l.db.Model(&LocalComponent{}).Where("current_price IS NOT NULL").Count(&count).Error; err != nil {
|
||||
return fmt.Errorf("checking component prices: %w", err)
|
||||
}
|
||||
|
||||
// If we have components with prices, don't load from pricelists
|
||||
if count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if we have any components at all
|
||||
var totalComponents int64
|
||||
if err := l.db.Model(&LocalComponent{}).Count(&totalComponents).Error; err != nil {
|
||||
return fmt.Errorf("counting components: %w", err)
|
||||
}
|
||||
|
||||
// If we have no components, we need to load them from pricelists
|
||||
if totalComponents == 0 {
|
||||
slog.Info("no components found in local database, loading from latest pricelist")
|
||||
// This would typically be called from the sync service or setup process
|
||||
// For now, we'll just return nil to indicate no action needed
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we have components but no prices, we should load prices from pricelists
|
||||
// Find the latest pricelist
|
||||
var latestPricelist LocalPricelist
|
||||
if err := l.db.Order("created_at DESC").First(&latestPricelist).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
slog.Warn("no pricelists found in local database")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("finding latest pricelist: %w", err)
|
||||
}
|
||||
|
||||
// Update prices from the latest pricelist
|
||||
updated, err := l.UpdateComponentPricesFromPricelist(latestPricelist.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating component prices from pricelist: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("loaded component prices from latest pricelist",
|
||||
"pricelist_id", latestPricelist.ID,
|
||||
"updated_components", updated)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
package localdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
)
|
||||
|
||||
func TestConfigurationConvertersPreserveBusinessFields(t *testing.T) {
|
||||
estimateID := uint(11)
|
||||
warehouseID := uint(22)
|
||||
competitorID := uint(33)
|
||||
|
||||
cfg := &models.Configuration{
|
||||
UUID: "cfg-1",
|
||||
OwnerUsername: "tester",
|
||||
Name: "Config",
|
||||
PricelistID: &estimateID,
|
||||
WarehousePricelistID: &warehouseID,
|
||||
CompetitorPricelistID: &competitorID,
|
||||
DisablePriceRefresh: true,
|
||||
OnlyInStock: true,
|
||||
}
|
||||
|
||||
local := ConfigurationToLocal(cfg)
|
||||
if local.WarehousePricelistID == nil || *local.WarehousePricelistID != warehouseID {
|
||||
t.Fatalf("warehouse pricelist lost in ConfigurationToLocal: %+v", local.WarehousePricelistID)
|
||||
}
|
||||
if local.CompetitorPricelistID == nil || *local.CompetitorPricelistID != competitorID {
|
||||
t.Fatalf("competitor pricelist lost in ConfigurationToLocal: %+v", local.CompetitorPricelistID)
|
||||
}
|
||||
if !local.DisablePriceRefresh {
|
||||
t.Fatalf("disable_price_refresh lost in ConfigurationToLocal")
|
||||
}
|
||||
|
||||
back := LocalToConfiguration(local)
|
||||
if back.WarehousePricelistID == nil || *back.WarehousePricelistID != warehouseID {
|
||||
t.Fatalf("warehouse pricelist lost in LocalToConfiguration: %+v", back.WarehousePricelistID)
|
||||
}
|
||||
if back.CompetitorPricelistID == nil || *back.CompetitorPricelistID != competitorID {
|
||||
t.Fatalf("competitor pricelist lost in LocalToConfiguration: %+v", back.CompetitorPricelistID)
|
||||
}
|
||||
if !back.DisablePriceRefresh {
|
||||
t.Fatalf("disable_price_refresh lost in LocalToConfiguration")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurationSnapshotPreservesBusinessFields(t *testing.T) {
|
||||
estimateID := uint(11)
|
||||
warehouseID := uint(22)
|
||||
competitorID := uint(33)
|
||||
|
||||
cfg := &LocalConfiguration{
|
||||
UUID: "cfg-1",
|
||||
Name: "Config",
|
||||
PricelistID: &estimateID,
|
||||
WarehousePricelistID: &warehouseID,
|
||||
CompetitorPricelistID: &competitorID,
|
||||
DisablePriceRefresh: true,
|
||||
OnlyInStock: true,
|
||||
VendorSpec: VendorSpec{
|
||||
{
|
||||
SortOrder: 10,
|
||||
VendorPartnumber: "PN-1",
|
||||
Quantity: 1,
|
||||
LotMappings: []VendorSpecLotMapping{
|
||||
{LotName: "LOT_A", QuantityPerPN: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
raw, err := BuildConfigurationSnapshot(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("BuildConfigurationSnapshot: %v", err)
|
||||
}
|
||||
|
||||
decoded, err := DecodeConfigurationSnapshot(raw)
|
||||
if err != nil {
|
||||
t.Fatalf("DecodeConfigurationSnapshot: %v", err)
|
||||
}
|
||||
if decoded.WarehousePricelistID == nil || *decoded.WarehousePricelistID != warehouseID {
|
||||
t.Fatalf("warehouse pricelist lost in snapshot: %+v", decoded.WarehousePricelistID)
|
||||
}
|
||||
if decoded.CompetitorPricelistID == nil || *decoded.CompetitorPricelistID != competitorID {
|
||||
t.Fatalf("competitor pricelist lost in snapshot: %+v", decoded.CompetitorPricelistID)
|
||||
}
|
||||
if !decoded.DisablePriceRefresh {
|
||||
t.Fatalf("disable_price_refresh lost in snapshot")
|
||||
}
|
||||
if len(decoded.VendorSpec) != 1 || decoded.VendorSpec[0].VendorPartnumber != "PN-1" {
|
||||
t.Fatalf("vendor_spec lost in snapshot: %+v", decoded.VendorSpec)
|
||||
}
|
||||
if len(decoded.VendorSpec[0].LotMappings) != 1 || decoded.VendorSpec[0].LotMappings[0].LotName != "LOT_A" {
|
||||
t.Fatalf("lot mappings lost in snapshot: %+v", decoded.VendorSpec)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurationFingerprintIncludesPricingSelectorsAndVendorSpec(t *testing.T) {
|
||||
estimateID := uint(11)
|
||||
warehouseID := uint(22)
|
||||
competitorID := uint(33)
|
||||
|
||||
base := &LocalConfiguration{
|
||||
UUID: "cfg-1",
|
||||
Name: "Config",
|
||||
ServerCount: 1,
|
||||
Items: LocalConfigItems{{LotName: "LOT_A", Quantity: 1, UnitPrice: 100}},
|
||||
PricelistID: &estimateID,
|
||||
WarehousePricelistID: &warehouseID,
|
||||
CompetitorPricelistID: &competitorID,
|
||||
DisablePriceRefresh: true,
|
||||
OnlyInStock: true,
|
||||
VendorSpec: VendorSpec{
|
||||
{
|
||||
SortOrder: 10,
|
||||
VendorPartnumber: "PN-1",
|
||||
Quantity: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
baseFingerprint, err := BuildConfigurationSpecPriceFingerprint(base)
|
||||
if err != nil {
|
||||
t.Fatalf("base fingerprint: %v", err)
|
||||
}
|
||||
|
||||
changedPricelist := *base
|
||||
newEstimateID := uint(44)
|
||||
changedPricelist.PricelistID = &newEstimateID
|
||||
pricelistFingerprint, err := BuildConfigurationSpecPriceFingerprint(&changedPricelist)
|
||||
if err != nil {
|
||||
t.Fatalf("pricelist fingerprint: %v", err)
|
||||
}
|
||||
if pricelistFingerprint == baseFingerprint {
|
||||
t.Fatalf("expected pricelist selector to affect fingerprint")
|
||||
}
|
||||
|
||||
changedVendorSpec := *base
|
||||
changedVendorSpec.VendorSpec = VendorSpec{
|
||||
{
|
||||
SortOrder: 10,
|
||||
VendorPartnumber: "PN-2",
|
||||
Quantity: 1,
|
||||
},
|
||||
}
|
||||
vendorFingerprint, err := BuildConfigurationSpecPriceFingerprint(&changedVendorSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("vendor fingerprint: %v", err)
|
||||
}
|
||||
if vendorFingerprint == baseFingerprint {
|
||||
t.Fatalf("expected vendor spec to affect fingerprint")
|
||||
}
|
||||
}
|
||||
@@ -18,32 +18,27 @@ func ConfigurationToLocal(cfg *models.Configuration) *LocalConfiguration {
|
||||
}
|
||||
|
||||
local := &LocalConfiguration{
|
||||
UUID: cfg.UUID,
|
||||
ProjectUUID: cfg.ProjectUUID,
|
||||
IsActive: true,
|
||||
Name: cfg.Name,
|
||||
Items: items,
|
||||
TotalPrice: cfg.TotalPrice,
|
||||
CustomPrice: cfg.CustomPrice,
|
||||
Notes: cfg.Notes,
|
||||
IsTemplate: cfg.IsTemplate,
|
||||
ServerCount: cfg.ServerCount,
|
||||
ServerModel: cfg.ServerModel,
|
||||
SupportCode: cfg.SupportCode,
|
||||
Article: cfg.Article,
|
||||
PricelistID: cfg.PricelistID,
|
||||
WarehousePricelistID: cfg.WarehousePricelistID,
|
||||
CompetitorPricelistID: cfg.CompetitorPricelistID,
|
||||
VendorSpec: modelVendorSpecToLocal(cfg.VendorSpec),
|
||||
DisablePriceRefresh: cfg.DisablePriceRefresh,
|
||||
OnlyInStock: cfg.OnlyInStock,
|
||||
Line: cfg.Line,
|
||||
PriceUpdatedAt: cfg.PriceUpdatedAt,
|
||||
CreatedAt: cfg.CreatedAt,
|
||||
UpdatedAt: time.Now(),
|
||||
SyncStatus: "pending",
|
||||
OriginalUserID: derefUint(cfg.UserID),
|
||||
OriginalUsername: cfg.OwnerUsername,
|
||||
UUID: cfg.UUID,
|
||||
ProjectUUID: cfg.ProjectUUID,
|
||||
IsActive: true,
|
||||
Name: cfg.Name,
|
||||
Items: items,
|
||||
TotalPrice: cfg.TotalPrice,
|
||||
CustomPrice: cfg.CustomPrice,
|
||||
Notes: cfg.Notes,
|
||||
IsTemplate: cfg.IsTemplate,
|
||||
ServerCount: cfg.ServerCount,
|
||||
PricelistID: cfg.PricelistID,
|
||||
PriceUpdatedAt: cfg.PriceUpdatedAt,
|
||||
CreatedAt: cfg.CreatedAt,
|
||||
UpdatedAt: time.Now(),
|
||||
SyncStatus: "pending",
|
||||
OriginalUserID: derefUint(cfg.UserID),
|
||||
OriginalUsername: cfg.OwnerUsername,
|
||||
}
|
||||
|
||||
if local.OriginalUsername == "" && cfg.User != nil {
|
||||
local.OriginalUsername = cfg.User.Username
|
||||
}
|
||||
|
||||
if cfg.ID > 0 {
|
||||
@@ -66,28 +61,19 @@ func LocalToConfiguration(local *LocalConfiguration) *models.Configuration {
|
||||
}
|
||||
|
||||
cfg := &models.Configuration{
|
||||
UUID: local.UUID,
|
||||
OwnerUsername: local.OriginalUsername,
|
||||
ProjectUUID: local.ProjectUUID,
|
||||
Name: local.Name,
|
||||
Items: items,
|
||||
TotalPrice: local.TotalPrice,
|
||||
CustomPrice: local.CustomPrice,
|
||||
Notes: local.Notes,
|
||||
IsTemplate: local.IsTemplate,
|
||||
ServerCount: local.ServerCount,
|
||||
ServerModel: local.ServerModel,
|
||||
SupportCode: local.SupportCode,
|
||||
Article: local.Article,
|
||||
PricelistID: local.PricelistID,
|
||||
WarehousePricelistID: local.WarehousePricelistID,
|
||||
CompetitorPricelistID: local.CompetitorPricelistID,
|
||||
VendorSpec: localVendorSpecToModel(local.VendorSpec),
|
||||
DisablePriceRefresh: local.DisablePriceRefresh,
|
||||
OnlyInStock: local.OnlyInStock,
|
||||
Line: local.Line,
|
||||
PriceUpdatedAt: local.PriceUpdatedAt,
|
||||
CreatedAt: local.CreatedAt,
|
||||
UUID: local.UUID,
|
||||
OwnerUsername: local.OriginalUsername,
|
||||
ProjectUUID: local.ProjectUUID,
|
||||
Name: local.Name,
|
||||
Items: items,
|
||||
TotalPrice: local.TotalPrice,
|
||||
CustomPrice: local.CustomPrice,
|
||||
Notes: local.Notes,
|
||||
IsTemplate: local.IsTemplate,
|
||||
ServerCount: local.ServerCount,
|
||||
PricelistID: local.PricelistID,
|
||||
PriceUpdatedAt: local.PriceUpdatedAt,
|
||||
CreatedAt: local.CreatedAt,
|
||||
}
|
||||
|
||||
if local.ServerID != nil {
|
||||
@@ -97,9 +83,6 @@ func LocalToConfiguration(local *LocalConfiguration) *models.Configuration {
|
||||
userID := local.OriginalUserID
|
||||
cfg.UserID = &userID
|
||||
}
|
||||
if local.CurrentVersion != nil {
|
||||
cfg.CurrentVersionNo = local.CurrentVersion.VersionNo
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
@@ -111,96 +94,11 @@ func derefUint(v *uint) uint {
|
||||
return *v
|
||||
}
|
||||
|
||||
func modelVendorSpecToLocal(spec models.VendorSpec) VendorSpec {
|
||||
if len(spec) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make(VendorSpec, 0, len(spec))
|
||||
for _, item := range spec {
|
||||
row := VendorSpecItem{
|
||||
SortOrder: item.SortOrder,
|
||||
VendorPartnumber: item.VendorPartnumber,
|
||||
Quantity: item.Quantity,
|
||||
Description: item.Description,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: item.TotalPrice,
|
||||
ResolvedLotName: item.ResolvedLotName,
|
||||
ResolutionSource: item.ResolutionSource,
|
||||
ManualLotSuggestion: item.ManualLotSuggestion,
|
||||
LotQtyPerPN: item.LotQtyPerPN,
|
||||
}
|
||||
if len(item.LotAllocations) > 0 {
|
||||
row.LotAllocations = make([]VendorSpecLotAllocation, 0, len(item.LotAllocations))
|
||||
for _, alloc := range item.LotAllocations {
|
||||
row.LotAllocations = append(row.LotAllocations, VendorSpecLotAllocation{
|
||||
LotName: alloc.LotName,
|
||||
Quantity: alloc.Quantity,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(item.LotMappings) > 0 {
|
||||
row.LotMappings = make([]VendorSpecLotMapping, 0, len(item.LotMappings))
|
||||
for _, mapping := range item.LotMappings {
|
||||
row.LotMappings = append(row.LotMappings, VendorSpecLotMapping{
|
||||
LotName: mapping.LotName,
|
||||
QuantityPerPN: mapping.QuantityPerPN,
|
||||
})
|
||||
}
|
||||
}
|
||||
out = append(out, row)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func localVendorSpecToModel(spec VendorSpec) models.VendorSpec {
|
||||
if len(spec) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make(models.VendorSpec, 0, len(spec))
|
||||
for _, item := range spec {
|
||||
row := models.VendorSpecItem{
|
||||
SortOrder: item.SortOrder,
|
||||
VendorPartnumber: item.VendorPartnumber,
|
||||
Quantity: item.Quantity,
|
||||
Description: item.Description,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: item.TotalPrice,
|
||||
ResolvedLotName: item.ResolvedLotName,
|
||||
ResolutionSource: item.ResolutionSource,
|
||||
ManualLotSuggestion: item.ManualLotSuggestion,
|
||||
LotQtyPerPN: item.LotQtyPerPN,
|
||||
}
|
||||
if len(item.LotAllocations) > 0 {
|
||||
row.LotAllocations = make([]models.VendorSpecLotAllocation, 0, len(item.LotAllocations))
|
||||
for _, alloc := range item.LotAllocations {
|
||||
row.LotAllocations = append(row.LotAllocations, models.VendorSpecLotAllocation{
|
||||
LotName: alloc.LotName,
|
||||
Quantity: alloc.Quantity,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(item.LotMappings) > 0 {
|
||||
row.LotMappings = make([]models.VendorSpecLotMapping, 0, len(item.LotMappings))
|
||||
for _, mapping := range item.LotMappings {
|
||||
row.LotMappings = append(row.LotMappings, models.VendorSpecLotMapping{
|
||||
LotName: mapping.LotName,
|
||||
QuantityPerPN: mapping.QuantityPerPN,
|
||||
})
|
||||
}
|
||||
}
|
||||
out = append(out, row)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func ProjectToLocal(project *models.Project) *LocalProject {
|
||||
local := &LocalProject{
|
||||
UUID: project.UUID,
|
||||
OwnerUsername: project.OwnerUsername,
|
||||
Code: project.Code,
|
||||
Variant: project.Variant,
|
||||
Name: project.Name,
|
||||
TrackerURL: project.TrackerURL,
|
||||
IsActive: project.IsActive,
|
||||
IsSystem: project.IsSystem,
|
||||
CreatedAt: project.CreatedAt,
|
||||
@@ -218,10 +116,7 @@ func LocalToProject(local *LocalProject) *models.Project {
|
||||
project := &models.Project{
|
||||
UUID: local.UUID,
|
||||
OwnerUsername: local.OwnerUsername,
|
||||
Code: local.Code,
|
||||
Variant: local.Variant,
|
||||
Name: local.Name,
|
||||
TrackerURL: local.TrackerURL,
|
||||
IsActive: local.IsActive,
|
||||
IsSystem: local.IsSystem,
|
||||
CreatedAt: local.CreatedAt,
|
||||
@@ -242,7 +137,6 @@ func PricelistToLocal(pl *models.Pricelist) *LocalPricelist {
|
||||
|
||||
return &LocalPricelist{
|
||||
ServerID: pl.ID,
|
||||
Source: pl.Source,
|
||||
Version: pl.Version,
|
||||
Name: name,
|
||||
CreatedAt: pl.CreatedAt,
|
||||
@@ -255,7 +149,6 @@ func PricelistToLocal(pl *models.Pricelist) *LocalPricelist {
|
||||
func LocalToPricelist(local *LocalPricelist) *models.Pricelist {
|
||||
return &models.Pricelist{
|
||||
ID: local.ServerID,
|
||||
Source: local.Source,
|
||||
Version: local.Version,
|
||||
Notification: local.Name,
|
||||
CreatedAt: local.CreatedAt,
|
||||
@@ -265,30 +158,20 @@ func LocalToPricelist(local *LocalPricelist) *models.Pricelist {
|
||||
|
||||
// PricelistItemToLocal converts models.PricelistItem to LocalPricelistItem
|
||||
func PricelistItemToLocal(item *models.PricelistItem, localPricelistID uint) *LocalPricelistItem {
|
||||
partnumbers := make(LocalStringList, 0, len(item.Partnumbers))
|
||||
partnumbers = append(partnumbers, item.Partnumbers...)
|
||||
return &LocalPricelistItem{
|
||||
PricelistID: localPricelistID,
|
||||
LotName: item.LotName,
|
||||
LotCategory: item.LotCategory,
|
||||
Price: item.Price,
|
||||
AvailableQty: item.AvailableQty,
|
||||
Partnumbers: partnumbers,
|
||||
PricelistID: localPricelistID,
|
||||
LotName: item.LotName,
|
||||
Price: item.Price,
|
||||
}
|
||||
}
|
||||
|
||||
// LocalToPricelistItem converts LocalPricelistItem to models.PricelistItem
|
||||
func LocalToPricelistItem(local *LocalPricelistItem, serverPricelistID uint) *models.PricelistItem {
|
||||
partnumbers := make([]string, 0, len(local.Partnumbers))
|
||||
partnumbers = append(partnumbers, local.Partnumbers...)
|
||||
return &models.PricelistItem{
|
||||
ID: local.ID,
|
||||
PricelistID: serverPricelistID,
|
||||
LotName: local.LotName,
|
||||
LotCategory: local.LotCategory,
|
||||
Price: local.Price,
|
||||
AvailableQty: local.AvailableQty,
|
||||
Partnumbers: partnumbers,
|
||||
ID: local.ID,
|
||||
PricelistID: serverPricelistID,
|
||||
LotName: local.LotName,
|
||||
Price: local.Price,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,14 +199,17 @@ func ComponentToLocal(meta *models.LotMetadata) *LocalComponent {
|
||||
LotDescription: lotDesc,
|
||||
Category: category,
|
||||
Model: meta.Model,
|
||||
CurrentPrice: meta.CurrentPrice,
|
||||
SyncedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// LocalToComponent converts LocalComponent to models.LotMetadata
|
||||
func LocalToComponent(local *LocalComponent) *models.LotMetadata {
|
||||
return &models.LotMetadata{
|
||||
LotName: local.LotName,
|
||||
Model: local.Model,
|
||||
LotName: local.LotName,
|
||||
Model: local.Model,
|
||||
CurrentPrice: local.CurrentPrice,
|
||||
Lot: &models.Lot{
|
||||
LotName: local.LotName,
|
||||
LotDescription: local.LotDescription,
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package localdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
)
|
||||
|
||||
func TestPricelistItemToLocal_PreservesLotCategory(t *testing.T) {
|
||||
item := &models.PricelistItem{
|
||||
LotName: "CPU_A",
|
||||
LotCategory: "CPU",
|
||||
Price: 10,
|
||||
}
|
||||
|
||||
local := PricelistItemToLocal(item, 123)
|
||||
if local.LotCategory != "CPU" {
|
||||
t.Fatalf("expected LotCategory=CPU, got %q", local.LotCategory)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalToPricelistItem_PreservesLotCategory(t *testing.T) {
|
||||
local := &LocalPricelistItem{
|
||||
LotName: "CPU_A",
|
||||
LotCategory: "CPU",
|
||||
Price: 10,
|
||||
}
|
||||
|
||||
item := LocalToPricelistItem(local, 456)
|
||||
if item.LotCategory != "CPU" {
|
||||
t.Fatalf("expected LotCategory=CPU, got %q", item.LotCategory)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,104 +7,19 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/appstate"
|
||||
)
|
||||
|
||||
const encryptionKeyFileName = "local_encryption.key"
|
||||
|
||||
// getEncryptionKey resolves the active encryption key.
|
||||
// Preference order:
|
||||
// 1. QUOTEFORGE_ENCRYPTION_KEY env var
|
||||
// 2. application-managed random key file in the user state directory
|
||||
func getEncryptionKey() ([]byte, error) {
|
||||
// getEncryptionKey derives a 32-byte key from environment variable or machine ID
|
||||
func getEncryptionKey() []byte {
|
||||
key := os.Getenv("QUOTEFORGE_ENCRYPTION_KEY")
|
||||
if key != "" {
|
||||
hash := sha256.Sum256([]byte(key))
|
||||
return hash[:], nil
|
||||
if key == "" {
|
||||
// Fallback to a machine-based key (hostname + fixed salt)
|
||||
hostname, _ := os.Hostname()
|
||||
key = hostname + "quoteforge-salt-2024"
|
||||
}
|
||||
|
||||
stateDir, err := resolveEncryptionStateDir()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve encryption state dir: %w", err)
|
||||
}
|
||||
|
||||
return loadOrCreateEncryptionKey(filepath.Join(stateDir, encryptionKeyFileName))
|
||||
}
|
||||
|
||||
func resolveEncryptionStateDir() (string, error) {
|
||||
configPath, err := appstate.ResolveConfigPath("")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Dir(configPath), nil
|
||||
}
|
||||
|
||||
func loadOrCreateEncryptionKey(path string) ([]byte, error) {
|
||||
if data, err := os.ReadFile(path); err == nil {
|
||||
return parseEncryptionKeyFile(data)
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fmt.Errorf("read encryption key: %w", err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||
return nil, fmt.Errorf("create encryption key dir: %w", err)
|
||||
}
|
||||
|
||||
raw := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, raw); err != nil {
|
||||
return nil, fmt.Errorf("generate encryption key: %w", err)
|
||||
}
|
||||
|
||||
encoded := base64.StdEncoding.EncodeToString(raw)
|
||||
if err := writeKeyFile(path, []byte(encoded+"\n")); err != nil {
|
||||
if errors.Is(err, os.ErrExist) {
|
||||
data, readErr := os.ReadFile(path)
|
||||
if readErr != nil {
|
||||
return nil, fmt.Errorf("read concurrent encryption key: %w", readErr)
|
||||
}
|
||||
return parseEncryptionKeyFile(data)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func writeKeyFile(path string, data []byte) error {
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err := file.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return file.Sync()
|
||||
}
|
||||
|
||||
func parseEncryptionKeyFile(data []byte) ([]byte, error) {
|
||||
trimmed := strings.TrimSpace(string(data))
|
||||
decoded, err := base64.StdEncoding.DecodeString(trimmed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decode encryption key file: %w", err)
|
||||
}
|
||||
if len(decoded) != 32 {
|
||||
return nil, fmt.Errorf("invalid encryption key length: %d", len(decoded))
|
||||
}
|
||||
return decoded, nil
|
||||
}
|
||||
|
||||
func getLegacyEncryptionKey() []byte {
|
||||
hostname, _ := os.Hostname()
|
||||
key := hostname + "quoteforge-salt-2024"
|
||||
// Hash to get exactly 32 bytes for AES-256
|
||||
hash := sha256.Sum256([]byte(key))
|
||||
return hash[:]
|
||||
}
|
||||
@@ -115,10 +30,7 @@ func Encrypt(plaintext string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
key, err := getEncryptionKey()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
key := getEncryptionKey()
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -144,50 +56,12 @@ func Decrypt(ciphertext string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
key, err := getEncryptionKey()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
plaintext, legacy, err := decryptWithKeys(ciphertext, key, getLegacyEncryptionKey())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_ = legacy
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
func DecryptWithMetadata(ciphertext string) (string, bool, error) {
|
||||
if ciphertext == "" {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
key, err := getEncryptionKey()
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return decryptWithKeys(ciphertext, key, getLegacyEncryptionKey())
|
||||
}
|
||||
|
||||
func decryptWithKeys(ciphertext string, primaryKey, legacyKey []byte) (string, bool, error) {
|
||||
key := getEncryptionKey()
|
||||
data, err := base64.StdEncoding.DecodeString(ciphertext)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
plaintext, err := decryptWithKey(data, primaryKey)
|
||||
if err == nil {
|
||||
return plaintext, false, nil
|
||||
}
|
||||
|
||||
legacyPlaintext, legacyErr := decryptWithKey(data, legacyKey)
|
||||
if legacyErr == nil {
|
||||
return legacyPlaintext, true, nil
|
||||
}
|
||||
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
func decryptWithKey(data, key []byte) (string, error) {
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
package localdb
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncryptCreatesPersistentKeyFile(t *testing.T) {
|
||||
stateDir := t.TempDir()
|
||||
t.Setenv("QFS_STATE_DIR", stateDir)
|
||||
t.Setenv("QUOTEFORGE_ENCRYPTION_KEY", "")
|
||||
|
||||
ciphertext, err := Encrypt("secret-password")
|
||||
if err != nil {
|
||||
t.Fatalf("encrypt: %v", err)
|
||||
}
|
||||
if ciphertext == "" {
|
||||
t.Fatal("expected ciphertext")
|
||||
}
|
||||
|
||||
keyPath := filepath.Join(stateDir, encryptionKeyFileName)
|
||||
info, err := os.Stat(keyPath)
|
||||
if err != nil {
|
||||
t.Fatalf("stat key file: %v", err)
|
||||
}
|
||||
if info.Mode().Perm() != 0600 {
|
||||
t.Fatalf("expected 0600 key file, got %v", info.Mode().Perm())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptMigratesLegacyCiphertext(t *testing.T) {
|
||||
stateDir := t.TempDir()
|
||||
t.Setenv("QFS_STATE_DIR", stateDir)
|
||||
t.Setenv("QUOTEFORGE_ENCRYPTION_KEY", "")
|
||||
|
||||
legacyCiphertext := encryptWithKeyForTest(t, getLegacyEncryptionKey(), "legacy-password")
|
||||
|
||||
plaintext, migrated, err := DecryptWithMetadata(legacyCiphertext)
|
||||
if err != nil {
|
||||
t.Fatalf("decrypt legacy: %v", err)
|
||||
}
|
||||
if plaintext != "legacy-password" {
|
||||
t.Fatalf("unexpected plaintext: %q", plaintext)
|
||||
}
|
||||
if !migrated {
|
||||
t.Fatal("expected legacy ciphertext to require migration")
|
||||
}
|
||||
|
||||
currentCiphertext, err := Encrypt("legacy-password")
|
||||
if err != nil {
|
||||
t.Fatalf("encrypt current: %v", err)
|
||||
}
|
||||
plaintext, migrated, err = DecryptWithMetadata(currentCiphertext)
|
||||
if err != nil {
|
||||
t.Fatalf("decrypt current: %v", err)
|
||||
}
|
||||
if migrated {
|
||||
t.Fatal("did not expect current ciphertext to require migration")
|
||||
}
|
||||
}
|
||||
|
||||
func encryptWithKeyForTest(t *testing.T, key []byte, plaintext string) string {
|
||||
t.Helper()
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
t.Fatalf("new cipher: %v", err)
|
||||
}
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
t.Fatalf("new gcm: %v", err)
|
||||
}
|
||||
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||
t.Fatalf("read nonce: %v", err)
|
||||
}
|
||||
|
||||
ciphertext := gcm.Seal(nonce, nonce, []byte(plaintext), nil)
|
||||
return base64.StdEncoding.EncodeToString(ciphertext)
|
||||
}
|
||||
|
||||
func TestLegacyEncryptionKeyRemainsDeterministic(t *testing.T) {
|
||||
hostname, _ := os.Hostname()
|
||||
expected := sha256.Sum256([]byte(hostname + "quoteforge-salt-2024"))
|
||||
actual := getLegacyEncryptionKey()
|
||||
if string(actual) != string(expected[:]) {
|
||||
t.Fatal("legacy key derivation changed")
|
||||
}
|
||||
}
|
||||
@@ -3,12 +3,6 @@ package localdb
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
func TestRunLocalMigrationsBackfillsExistingConfigurations(t *testing.T) {
|
||||
@@ -76,520 +70,3 @@ func TestRunLocalMigrationsBackfillsExistingConfigurations(t *testing.T) {
|
||||
t.Fatalf("expected local migrations to be recorded")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunLocalMigrationsFixesPricelistVersionUniqueIndex(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "pricelist_index_fix.db")
|
||||
|
||||
local, err := New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
if err := local.SaveLocalPricelist(&LocalPricelist{
|
||||
ServerID: 10,
|
||||
Version: "2026-02-06-001",
|
||||
Name: "v1",
|
||||
CreatedAt: time.Now().Add(-time.Hour),
|
||||
SyncedAt: time.Now().Add(-time.Hour),
|
||||
}); err != nil {
|
||||
t.Fatalf("save first pricelist: %v", err)
|
||||
}
|
||||
|
||||
if err := local.DB().Exec(`
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_local_pricelists_version_legacy
|
||||
ON local_pricelists(version)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create legacy unique version index: %v", err)
|
||||
}
|
||||
|
||||
if err := local.DB().Where("id = ?", "2026_02_06_pricelist_index_fix").
|
||||
Delete(&LocalSchemaMigration{}).Error; err != nil {
|
||||
t.Fatalf("delete migration record: %v", err)
|
||||
}
|
||||
|
||||
if err := runLocalMigrations(local.DB()); err != nil {
|
||||
t.Fatalf("rerun local migrations: %v", err)
|
||||
}
|
||||
|
||||
if err := local.SaveLocalPricelist(&LocalPricelist{
|
||||
ServerID: 11,
|
||||
Version: "2026-02-06-001",
|
||||
Name: "v1-duplicate-version",
|
||||
CreatedAt: time.Now(),
|
||||
SyncedAt: time.Now(),
|
||||
}); err != nil {
|
||||
t.Fatalf("save second pricelist with duplicate version: %v", err)
|
||||
}
|
||||
|
||||
var count int64
|
||||
if err := local.DB().Model(&LocalPricelist{}).Count(&count).Error; err != nil {
|
||||
t.Fatalf("count pricelists: %v", err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Fatalf("expected 2 pricelists, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunLocalMigrationsDeduplicatesConfigurationVersionsBySpecAndPrice(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "versions_dedup.db")
|
||||
|
||||
local, err := New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
cfg := &LocalConfiguration{
|
||||
UUID: "dedup-cfg",
|
||||
Name: "Dedup",
|
||||
Items: LocalConfigItems{{LotName: "CPU_A", Quantity: 1, UnitPrice: 100}},
|
||||
ServerCount: 1,
|
||||
SyncStatus: "pending",
|
||||
OriginalUsername: "tester",
|
||||
IsActive: true,
|
||||
}
|
||||
if err := local.SaveConfiguration(cfg); err != nil {
|
||||
t.Fatalf("save seed config: %v", err)
|
||||
}
|
||||
|
||||
baseV1Data, err := BuildConfigurationSnapshot(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("build v1 snapshot: %v", err)
|
||||
}
|
||||
baseV1 := LocalConfigurationVersion{
|
||||
ID: uuid.NewString(),
|
||||
ConfigurationUUID: cfg.UUID,
|
||||
VersionNo: 1,
|
||||
Data: baseV1Data,
|
||||
AppVersion: "test",
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
if err := local.DB().Create(&baseV1).Error; err != nil {
|
||||
t.Fatalf("insert base v1: %v", err)
|
||||
}
|
||||
if err := local.DB().Model(&LocalConfiguration{}).
|
||||
Where("uuid = ?", cfg.UUID).
|
||||
Update("current_version_id", baseV1.ID).Error; err != nil {
|
||||
t.Fatalf("set current_version_id to v1: %v", err)
|
||||
}
|
||||
|
||||
v2 := LocalConfigurationVersion{
|
||||
ID: uuid.NewString(),
|
||||
ConfigurationUUID: cfg.UUID,
|
||||
VersionNo: 2,
|
||||
Data: baseV1.Data,
|
||||
AppVersion: "test",
|
||||
CreatedAt: time.Now().Add(1 * time.Second),
|
||||
}
|
||||
if err := local.DB().Create(&v2).Error; err != nil {
|
||||
t.Fatalf("insert duplicate v2: %v", err)
|
||||
}
|
||||
|
||||
modified := *cfg
|
||||
modified.Items = LocalConfigItems{{LotName: "CPU_A", Quantity: 2, UnitPrice: 100}}
|
||||
total := modified.Items.Total()
|
||||
modified.TotalPrice = &total
|
||||
modified.UpdatedAt = time.Now()
|
||||
v3Data, err := BuildConfigurationSnapshot(&modified)
|
||||
if err != nil {
|
||||
t.Fatalf("build v3 snapshot: %v", err)
|
||||
}
|
||||
|
||||
v3 := LocalConfigurationVersion{
|
||||
ID: uuid.NewString(),
|
||||
ConfigurationUUID: cfg.UUID,
|
||||
VersionNo: 3,
|
||||
Data: v3Data,
|
||||
AppVersion: "test",
|
||||
CreatedAt: time.Now().Add(2 * time.Second),
|
||||
}
|
||||
if err := local.DB().Create(&v3).Error; err != nil {
|
||||
t.Fatalf("insert v3: %v", err)
|
||||
}
|
||||
|
||||
v4 := LocalConfigurationVersion{
|
||||
ID: uuid.NewString(),
|
||||
ConfigurationUUID: cfg.UUID,
|
||||
VersionNo: 4,
|
||||
Data: v3Data,
|
||||
AppVersion: "test",
|
||||
CreatedAt: time.Now().Add(3 * time.Second),
|
||||
}
|
||||
if err := local.DB().Create(&v4).Error; err != nil {
|
||||
t.Fatalf("insert duplicate v4: %v", err)
|
||||
}
|
||||
|
||||
if err := local.DB().Model(&LocalConfiguration{}).
|
||||
Where("uuid = ?", cfg.UUID).
|
||||
Update("current_version_id", v4.ID).Error; err != nil {
|
||||
t.Fatalf("point current_version_id to duplicate v4: %v", err)
|
||||
}
|
||||
|
||||
if err := local.DB().Where("id = ?", "2026_02_19_configuration_versions_dedup_spec_price").
|
||||
Delete(&LocalSchemaMigration{}).Error; err != nil {
|
||||
t.Fatalf("delete dedup migration record: %v", err)
|
||||
}
|
||||
|
||||
if err := runLocalMigrations(local.DB()); err != nil {
|
||||
t.Fatalf("rerun local migrations: %v", err)
|
||||
}
|
||||
|
||||
var versions []LocalConfigurationVersion
|
||||
if err := local.DB().Where("configuration_uuid = ?", cfg.UUID).
|
||||
Order("version_no ASC").
|
||||
Find(&versions).Error; err != nil {
|
||||
t.Fatalf("load versions after dedup: %v", err)
|
||||
}
|
||||
if len(versions) != 2 {
|
||||
t.Fatalf("expected 2 versions after dedup, got %d", len(versions))
|
||||
}
|
||||
if versions[0].VersionNo != 1 || versions[1].VersionNo != 3 {
|
||||
t.Fatalf("expected kept version numbers [1,3], got [%d,%d]", versions[0].VersionNo, versions[1].VersionNo)
|
||||
}
|
||||
|
||||
var after LocalConfiguration
|
||||
if err := local.DB().Where("uuid = ?", cfg.UUID).First(&after).Error; err != nil {
|
||||
t.Fatalf("load config after dedup: %v", err)
|
||||
}
|
||||
if after.CurrentVersionID == nil || *after.CurrentVersionID != v3.ID {
|
||||
t.Fatalf("expected current_version_id to point to kept latest version v3")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunLocalMigrationsBackfillsConfigurationLineNo(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "line_no_backfill.db")
|
||||
|
||||
local, err := New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
projectUUID := "project-line"
|
||||
cfg1 := &LocalConfiguration{
|
||||
UUID: "line-cfg-1",
|
||||
ProjectUUID: &projectUUID,
|
||||
Name: "Cfg 1",
|
||||
Items: LocalConfigItems{},
|
||||
SyncStatus: "pending",
|
||||
OriginalUsername: "tester",
|
||||
IsActive: true,
|
||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
||||
}
|
||||
cfg2 := &LocalConfiguration{
|
||||
UUID: "line-cfg-2",
|
||||
ProjectUUID: &projectUUID,
|
||||
Name: "Cfg 2",
|
||||
Items: LocalConfigItems{},
|
||||
SyncStatus: "pending",
|
||||
OriginalUsername: "tester",
|
||||
IsActive: true,
|
||||
CreatedAt: time.Now().Add(-1 * time.Hour),
|
||||
}
|
||||
if err := local.SaveConfiguration(cfg1); err != nil {
|
||||
t.Fatalf("save cfg1: %v", err)
|
||||
}
|
||||
if err := local.SaveConfiguration(cfg2); err != nil {
|
||||
t.Fatalf("save cfg2: %v", err)
|
||||
}
|
||||
|
||||
if err := local.DB().Model(&LocalConfiguration{}).Where("uuid IN ?", []string{cfg1.UUID, cfg2.UUID}).Update("line_no", 0).Error; err != nil {
|
||||
t.Fatalf("reset line_no: %v", err)
|
||||
}
|
||||
if err := local.DB().Where("id = ?", "2026_02_19_local_config_line_no").Delete(&LocalSchemaMigration{}).Error; err != nil {
|
||||
t.Fatalf("delete migration record: %v", err)
|
||||
}
|
||||
|
||||
if err := runLocalMigrations(local.DB()); err != nil {
|
||||
t.Fatalf("rerun local migrations: %v", err)
|
||||
}
|
||||
|
||||
var rows []LocalConfiguration
|
||||
if err := local.DB().Where("uuid IN ?", []string{cfg1.UUID, cfg2.UUID}).Order("created_at ASC").Find(&rows).Error; err != nil {
|
||||
t.Fatalf("load configurations: %v", err)
|
||||
}
|
||||
if len(rows) != 2 {
|
||||
t.Fatalf("expected 2 configurations, got %d", len(rows))
|
||||
}
|
||||
if rows[0].Line != 10 || rows[1].Line != 20 {
|
||||
t.Fatalf("expected line_no [10,20], got [%d,%d]", rows[0].Line, rows[1].Line)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunLocalMigrationsDeduplicatesCanonicalPartnumberCatalog(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "partnumber_catalog_dedup.db")
|
||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("open sqlite: %v", err)
|
||||
}
|
||||
|
||||
firstLots := LocalPartnumberBookLots{
|
||||
{LotName: "LOT-A", Qty: 1},
|
||||
}
|
||||
secondLots := LocalPartnumberBookLots{
|
||||
{LotName: "LOT-B", Qty: 2},
|
||||
}
|
||||
|
||||
if err := db.Exec(`
|
||||
CREATE TABLE local_partnumber_book_items (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partnumber TEXT NOT NULL,
|
||||
lots_json TEXT NOT NULL,
|
||||
description TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create dirty local_partnumber_book_items: %v", err)
|
||||
}
|
||||
|
||||
if err := db.Create(&LocalPartnumberBookItem{
|
||||
Partnumber: "PN-001",
|
||||
LotsJSON: firstLots,
|
||||
Description: "",
|
||||
}).Error; err != nil {
|
||||
t.Fatalf("insert first duplicate row: %v", err)
|
||||
}
|
||||
if err := db.Create(&LocalPartnumberBookItem{
|
||||
Partnumber: "PN-001",
|
||||
LotsJSON: secondLots,
|
||||
Description: "Canonical description",
|
||||
}).Error; err != nil {
|
||||
t.Fatalf("insert second duplicate row: %v", err)
|
||||
}
|
||||
|
||||
if err := migrateLocalPartnumberBookCatalog(db); err != nil {
|
||||
t.Fatalf("migrate local partnumber catalog: %v", err)
|
||||
}
|
||||
|
||||
var items []LocalPartnumberBookItem
|
||||
if err := db.Order("partnumber ASC").Find(&items).Error; err != nil {
|
||||
t.Fatalf("load migrated partnumber items: %v", err)
|
||||
}
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 deduplicated item, got %d", len(items))
|
||||
}
|
||||
if items[0].Partnumber != "PN-001" {
|
||||
t.Fatalf("unexpected partnumber: %s", items[0].Partnumber)
|
||||
}
|
||||
if items[0].Description != "Canonical description" {
|
||||
t.Fatalf("expected merged description, got %q", items[0].Description)
|
||||
}
|
||||
if len(items[0].LotsJSON) != 2 {
|
||||
t.Fatalf("expected merged lots from duplicates, got %d", len(items[0].LotsJSON))
|
||||
}
|
||||
|
||||
var duplicateCount int64
|
||||
if err := db.Model(&LocalPartnumberBookItem{}).
|
||||
Where("partnumber = ?", "PN-001").
|
||||
Count(&duplicateCount).Error; err != nil {
|
||||
t.Fatalf("count deduplicated partnumber: %v", err)
|
||||
}
|
||||
if duplicateCount != 1 {
|
||||
t.Fatalf("expected unique partnumber row after migration, got %d", duplicateCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSanitizeLocalPartnumberBookCatalogRemovesRowsWithoutPartnumber(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "sanitize_partnumber_catalog.db")
|
||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("open sqlite: %v", err)
|
||||
}
|
||||
|
||||
if err := db.Exec(`
|
||||
CREATE TABLE local_partnumber_book_items (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partnumber TEXT NULL,
|
||||
lots_json TEXT NOT NULL,
|
||||
description TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create local_partnumber_book_items: %v", err)
|
||||
}
|
||||
if err := db.Exec(`
|
||||
INSERT INTO local_partnumber_book_items (partnumber, lots_json, description) VALUES
|
||||
(NULL, '[]', 'null pn'),
|
||||
('', '[]', 'empty pn'),
|
||||
('PN-OK', '[]', 'valid pn')
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("seed local_partnumber_book_items: %v", err)
|
||||
}
|
||||
|
||||
if err := sanitizeLocalPartnumberBookCatalog(db); err != nil {
|
||||
t.Fatalf("sanitize local partnumber catalog: %v", err)
|
||||
}
|
||||
|
||||
var items []LocalPartnumberBookItem
|
||||
if err := db.Order("id ASC").Find(&items).Error; err != nil {
|
||||
t.Fatalf("load sanitized items: %v", err)
|
||||
}
|
||||
if len(items) != 1 {
|
||||
t.Fatalf("expected 1 valid item after sanitize, got %d", len(items))
|
||||
}
|
||||
if items[0].Partnumber != "PN-OK" {
|
||||
t.Fatalf("expected remaining partnumber PN-OK, got %q", items[0].Partnumber)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMigratesLegacyPartnumberBookCatalogBeforeAutoMigrate(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "legacy_partnumber_catalog.db")
|
||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("open sqlite: %v", err)
|
||||
}
|
||||
|
||||
if err := db.Exec(`
|
||||
CREATE TABLE local_partnumber_book_items (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partnumber TEXT NOT NULL UNIQUE,
|
||||
lots_json TEXT NOT NULL,
|
||||
is_primary_pn INTEGER NOT NULL DEFAULT 0,
|
||||
description TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create legacy local_partnumber_book_items: %v", err)
|
||||
}
|
||||
if err := db.Exec(`
|
||||
INSERT INTO local_partnumber_book_items (partnumber, lots_json, is_primary_pn, description)
|
||||
VALUES ('PN-001', '[{"lot_name":"CPU_A","qty":1}]', 0, 'Legacy row')
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("seed legacy local_partnumber_book_items: %v", err)
|
||||
}
|
||||
|
||||
local, err := New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb with legacy catalog: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
var columns []struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
if err := local.DB().Raw(`SELECT name FROM pragma_table_info('local_partnumber_book_items')`).Scan(&columns).Error; err != nil {
|
||||
t.Fatalf("load local_partnumber_book_items columns: %v", err)
|
||||
}
|
||||
for _, column := range columns {
|
||||
if column.Name == "is_primary_pn" {
|
||||
t.Fatalf("expected legacy is_primary_pn column to be removed before automigrate")
|
||||
}
|
||||
}
|
||||
|
||||
var items []LocalPartnumberBookItem
|
||||
if err := local.DB().Find(&items).Error; err != nil {
|
||||
t.Fatalf("load migrated local_partnumber_book_items: %v", err)
|
||||
}
|
||||
if len(items) != 1 || items[0].Partnumber != "PN-001" {
|
||||
t.Fatalf("unexpected migrated rows: %#v", items)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRecoversBrokenPartnumberBookCatalogCache(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "broken_partnumber_catalog.db")
|
||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("open sqlite: %v", err)
|
||||
}
|
||||
|
||||
if err := db.Exec(`
|
||||
CREATE TABLE local_partnumber_book_items (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partnumber TEXT NOT NULL UNIQUE,
|
||||
lots_json TEXT NOT NULL,
|
||||
description TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create broken local_partnumber_book_items: %v", err)
|
||||
}
|
||||
if err := db.Exec(`
|
||||
INSERT INTO local_partnumber_book_items (partnumber, lots_json, description)
|
||||
VALUES ('PN-001', '{not-json}', 'Broken cache row')
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("seed broken local_partnumber_book_items: %v", err)
|
||||
}
|
||||
|
||||
local, err := New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb with broken catalog cache: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
var count int64
|
||||
if err := local.DB().Model(&LocalPartnumberBookItem{}).Count(&count).Error; err != nil {
|
||||
t.Fatalf("count recovered local_partnumber_book_items: %v", err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Fatalf("expected empty recovered local_partnumber_book_items, got %d rows", count)
|
||||
}
|
||||
|
||||
var quarantineTables []struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
if err := local.DB().Raw(`
|
||||
SELECT name
|
||||
FROM sqlite_master
|
||||
WHERE type = 'table' AND name LIKE 'local_partnumber_book_items_broken_%'
|
||||
`).Scan(&quarantineTables).Error; err != nil {
|
||||
t.Fatalf("load quarantine tables: %v", err)
|
||||
}
|
||||
if len(quarantineTables) != 1 {
|
||||
t.Fatalf("expected one quarantined broken catalog table, got %d", len(quarantineTables))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanupStaleReadOnlyCacheTempTablesDropsShadowTempWhenBaseExists(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "stale_cache_temp.db")
|
||||
db, err := gorm.Open(sqlite.Open(dbPath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("open sqlite: %v", err)
|
||||
}
|
||||
|
||||
if err := db.Exec(`
|
||||
CREATE TABLE local_pricelist_items (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
pricelist_id INTEGER NOT NULL,
|
||||
partnumber TEXT,
|
||||
brand TEXT NOT NULL DEFAULT '',
|
||||
lot_name TEXT NOT NULL,
|
||||
description TEXT,
|
||||
price REAL NOT NULL DEFAULT 0,
|
||||
quantity INTEGER NOT NULL DEFAULT 0,
|
||||
reserve INTEGER NOT NULL DEFAULT 0,
|
||||
available_qty REAL,
|
||||
partnumbers TEXT,
|
||||
lot_category TEXT,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME
|
||||
)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create local_pricelist_items: %v", err)
|
||||
}
|
||||
if err := db.Exec(`
|
||||
CREATE TABLE local_pricelist_items__temp (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
legacy TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
t.Fatalf("create local_pricelist_items__temp: %v", err)
|
||||
}
|
||||
|
||||
if err := cleanupStaleReadOnlyCacheTempTables(db); err != nil {
|
||||
t.Fatalf("cleanup stale read-only cache temp tables: %v", err)
|
||||
}
|
||||
|
||||
if db.Migrator().HasTable("local_pricelist_items__temp") {
|
||||
t.Fatalf("expected stale temp table to be dropped")
|
||||
}
|
||||
if !db.Migrator().HasTable("local_pricelist_items") {
|
||||
t.Fatalf("expected base local_pricelist_items table to remain")
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -51,8 +51,8 @@ func TestRunLocalMigrationsBackfillsDefaultProject(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("get system project: %v", err)
|
||||
}
|
||||
if project.Name == nil || *project.Name != "Без проекта" {
|
||||
t.Fatalf("expected system project name, got %v", project.Name)
|
||||
if project.Name != "Без проекта" {
|
||||
t.Fatalf("expected system project name, got %q", project.Name)
|
||||
}
|
||||
if !project.IsSystem {
|
||||
t.Fatalf("expected system project flag")
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -49,89 +47,6 @@ var localMigrations = []localMigration{
|
||||
name: "Attach existing configurations to latest local pricelist and recalc usage",
|
||||
run: backfillConfigurationPricelists,
|
||||
},
|
||||
{
|
||||
id: "2026_02_06_pricelist_index_fix",
|
||||
name: "Use unique server_id for local pricelists and allow duplicate versions",
|
||||
run: fixLocalPricelistIndexes,
|
||||
},
|
||||
{
|
||||
id: "2026_02_06_pricelist_source",
|
||||
name: "Backfill source for local pricelists and create source indexes",
|
||||
run: backfillLocalPricelistSource,
|
||||
},
|
||||
{
|
||||
id: "2026_02_09_drop_component_unused_fields",
|
||||
name: "Remove current_price and synced_at from local_components (unused fields)",
|
||||
run: dropComponentUnusedFields,
|
||||
},
|
||||
{
|
||||
id: "2026_02_09_add_warehouse_competitor_pricelists",
|
||||
name: "Add warehouse_pricelist_id and competitor_pricelist_id to local_configurations",
|
||||
run: addWarehouseCompetitorPriceLists,
|
||||
},
|
||||
{
|
||||
id: "2026_02_11_local_pricelist_item_category",
|
||||
name: "Add lot_category to local_pricelist_items and create indexes",
|
||||
run: addLocalPricelistItemCategoryAndIndexes,
|
||||
},
|
||||
{
|
||||
id: "2026_02_11_local_config_article",
|
||||
name: "Add article to local_configurations",
|
||||
run: addLocalConfigurationArticle,
|
||||
},
|
||||
{
|
||||
id: "2026_02_11_local_config_server_model",
|
||||
name: "Add server_model to local_configurations",
|
||||
run: addLocalConfigurationServerModel,
|
||||
},
|
||||
{
|
||||
id: "2026_02_11_local_config_support_code",
|
||||
name: "Add support_code to local_configurations",
|
||||
run: addLocalConfigurationSupportCode,
|
||||
},
|
||||
{
|
||||
id: "2026_02_13_local_project_code",
|
||||
name: "Add project code to local_projects and backfill",
|
||||
run: addLocalProjectCode,
|
||||
},
|
||||
{
|
||||
id: "2026_02_13_local_project_variant",
|
||||
name: "Add project variant to local_projects and backfill",
|
||||
run: addLocalProjectVariant,
|
||||
},
|
||||
{
|
||||
id: "2026_02_13_local_project_name_nullable",
|
||||
name: "Allow NULL project names in local_projects",
|
||||
run: allowLocalProjectNameNull,
|
||||
},
|
||||
{
|
||||
id: "2026_02_19_configuration_versions_dedup_spec_price",
|
||||
name: "Deduplicate configuration revisions by spec+price",
|
||||
run: deduplicateConfigurationVersionsBySpecAndPrice,
|
||||
},
|
||||
{
|
||||
id: "2026_02_19_local_config_line_no",
|
||||
name: "Add line_no to local_configurations and backfill ordering",
|
||||
run: addLocalConfigurationLineNo,
|
||||
},
|
||||
{
|
||||
id: "2026_03_07_local_partnumber_book_catalog",
|
||||
name: "Convert local partnumber book cache to book membership + deduplicated PN catalog",
|
||||
run: migrateLocalPartnumberBookCatalog,
|
||||
},
|
||||
{
|
||||
id: "2026_03_13_pricelist_items_dedup_unique",
|
||||
name: "Deduplicate local_pricelist_items and add unique index on (pricelist_id, lot_name)",
|
||||
run: deduplicatePricelistItemsAndAddUniqueIndex,
|
||||
},
|
||||
}
|
||||
|
||||
type localPartnumberCatalogRow struct {
|
||||
Partnumber string
|
||||
LotsJSON LocalPartnumberBookLots
|
||||
Description string
|
||||
CreatedAt time.Time
|
||||
ServerID int
|
||||
}
|
||||
|
||||
func runLocalMigrations(db *gorm.DB) error {
|
||||
@@ -268,8 +183,7 @@ func ensureDefaultProjectTx(tx *gorm.DB, ownerUsername string) (*LocalProject, e
|
||||
project = LocalProject{
|
||||
UUID: uuid.NewString(),
|
||||
OwnerUsername: ownerUsername,
|
||||
Code: "Без проекта",
|
||||
Name: ptrString("Без проекта"),
|
||||
Name: "Без проекта",
|
||||
IsActive: true,
|
||||
IsSystem: true,
|
||||
CreatedAt: now,
|
||||
@@ -283,142 +197,9 @@ func ensureDefaultProjectTx(tx *gorm.DB, ownerUsername string) (*LocalProject, e
|
||||
return &project, nil
|
||||
}
|
||||
|
||||
func addLocalProjectCode(tx *gorm.DB) error {
|
||||
if err := tx.Exec(`ALTER TABLE local_projects ADD COLUMN code TEXT`).Error; err != nil {
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") &&
|
||||
!strings.Contains(strings.ToLower(err.Error()), "exists") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Drop unique index if it already exists to allow de-duplication updates.
|
||||
if err := tx.Exec(`DROP INDEX IF EXISTS idx_local_projects_code`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy code from current project name.
|
||||
if err := tx.Exec(`
|
||||
UPDATE local_projects
|
||||
SET code = TRIM(COALESCE(name, ''))`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure any remaining blanks have a unique fallback.
|
||||
if err := tx.Exec(`
|
||||
UPDATE local_projects
|
||||
SET code = 'P-' || uuid
|
||||
WHERE code IS NULL OR TRIM(code) = ''`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// De-duplicate codes: OPS-1948-2, OPS-1948-3...
|
||||
if err := tx.Exec(`
|
||||
WITH ranked AS (
|
||||
SELECT id, code,
|
||||
ROW_NUMBER() OVER (PARTITION BY code ORDER BY id) AS rn
|
||||
FROM local_projects
|
||||
)
|
||||
UPDATE local_projects
|
||||
SET code = code || '-' || (SELECT rn FROM ranked WHERE ranked.id = local_projects.id)
|
||||
WHERE id IN (SELECT id FROM ranked WHERE rn > 1)`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create unique index for project codes (ignore if exists).
|
||||
if err := tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_projects_code ON local_projects(code)`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLocalProjectVariant(tx *gorm.DB) error {
|
||||
if err := tx.Exec(`ALTER TABLE local_projects ADD COLUMN variant TEXT NOT NULL DEFAULT ''`).Error; err != nil {
|
||||
if !strings.Contains(strings.ToLower(err.Error()), "duplicate") &&
|
||||
!strings.Contains(strings.ToLower(err.Error()), "exists") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Drop legacy code index if present.
|
||||
if err := tx.Exec(`DROP INDEX IF EXISTS idx_local_projects_code`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset code from name and clear variant.
|
||||
if err := tx.Exec(`
|
||||
UPDATE local_projects
|
||||
SET code = TRIM(COALESCE(name, '')),
|
||||
variant = ''`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// De-duplicate by assigning variant numbers: 2,3...
|
||||
if err := tx.Exec(`
|
||||
WITH ranked AS (
|
||||
SELECT id, code,
|
||||
ROW_NUMBER() OVER (PARTITION BY code ORDER BY id) AS rn
|
||||
FROM local_projects
|
||||
)
|
||||
UPDATE local_projects
|
||||
SET variant = CASE
|
||||
WHEN (SELECT rn FROM ranked WHERE ranked.id = local_projects.id) = 1 THEN ''
|
||||
ELSE '-' || CAST((SELECT rn FROM ranked WHERE ranked.id = local_projects.id) AS TEXT)
|
||||
END`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_projects_code_variant ON local_projects(code, variant)`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func allowLocalProjectNameNull(tx *gorm.DB) error {
|
||||
if err := tx.Exec(`ALTER TABLE local_projects RENAME TO local_projects_old`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE TABLE local_projects (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
uuid TEXT NOT NULL UNIQUE,
|
||||
server_id INTEGER NULL,
|
||||
owner_username TEXT NOT NULL,
|
||||
code TEXT NOT NULL,
|
||||
variant TEXT NOT NULL DEFAULT '',
|
||||
name TEXT NULL,
|
||||
tracker_url TEXT NULL,
|
||||
is_active INTEGER NOT NULL DEFAULT 1,
|
||||
is_system INTEGER NOT NULL DEFAULT 0,
|
||||
created_at DATETIME,
|
||||
updated_at DATETIME,
|
||||
synced_at DATETIME NULL,
|
||||
sync_status TEXT DEFAULT 'local'
|
||||
)`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = tx.Exec(`CREATE INDEX IF NOT EXISTS idx_local_projects_owner_username ON local_projects(owner_username)`).Error
|
||||
_ = tx.Exec(`CREATE INDEX IF NOT EXISTS idx_local_projects_is_active ON local_projects(is_active)`).Error
|
||||
_ = tx.Exec(`CREATE INDEX IF NOT EXISTS idx_local_projects_is_system ON local_projects(is_system)`).Error
|
||||
_ = tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_projects_code_variant ON local_projects(code, variant)`).Error
|
||||
|
||||
if err := tx.Exec(`
|
||||
INSERT INTO local_projects (id, uuid, server_id, owner_username, code, variant, name, tracker_url, is_active, is_system, created_at, updated_at, synced_at, sync_status)
|
||||
SELECT id, uuid, server_id, owner_username, code, variant, name, tracker_url, is_active, is_system, created_at, updated_at, synced_at, sync_status
|
||||
FROM local_projects_old`).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = tx.Exec(`DROP TABLE local_projects_old`).Error
|
||||
return nil
|
||||
}
|
||||
|
||||
func backfillConfigurationPricelists(tx *gorm.DB) error {
|
||||
var latest LocalPricelist
|
||||
if err := tx.Where("source = ?", "estimate").Order("created_at DESC").First(&latest).Error; err != nil {
|
||||
if err := tx.Order("created_at DESC").First(&latest).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil
|
||||
}
|
||||
@@ -456,667 +237,3 @@ func chooseNonZeroTime(candidate time.Time, fallback time.Time) time.Time {
|
||||
}
|
||||
return candidate
|
||||
}
|
||||
|
||||
func deduplicateConfigurationVersionsBySpecAndPrice(tx *gorm.DB) error {
|
||||
var configs []LocalConfiguration
|
||||
if err := tx.Select("uuid", "current_version_id").Find(&configs).Error; err != nil {
|
||||
return fmt.Errorf("load configurations for revision deduplication: %w", err)
|
||||
}
|
||||
|
||||
var removedTotal int
|
||||
for i := range configs {
|
||||
cfg := configs[i]
|
||||
|
||||
var versions []LocalConfigurationVersion
|
||||
if err := tx.Where("configuration_uuid = ?", cfg.UUID).
|
||||
Order("version_no ASC, created_at ASC").
|
||||
Find(&versions).Error; err != nil {
|
||||
return fmt.Errorf("load versions for %s: %w", cfg.UUID, err)
|
||||
}
|
||||
if len(versions) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
deleteIDs := make([]string, 0)
|
||||
deleteSet := make(map[string]struct{})
|
||||
kept := make([]LocalConfigurationVersion, 0, len(versions))
|
||||
var prevKey string
|
||||
hasPrev := false
|
||||
|
||||
for _, version := range versions {
|
||||
snapshotCfg, err := DecodeConfigurationSnapshot(version.Data)
|
||||
if err != nil {
|
||||
// Keep malformed snapshots untouched and reset chain to avoid accidental removals.
|
||||
kept = append(kept, version)
|
||||
hasPrev = false
|
||||
continue
|
||||
}
|
||||
|
||||
key, err := BuildConfigurationSpecPriceFingerprint(snapshotCfg)
|
||||
if err != nil {
|
||||
kept = append(kept, version)
|
||||
hasPrev = false
|
||||
continue
|
||||
}
|
||||
|
||||
if !hasPrev || key != prevKey {
|
||||
kept = append(kept, version)
|
||||
prevKey = key
|
||||
hasPrev = true
|
||||
continue
|
||||
}
|
||||
|
||||
deleteIDs = append(deleteIDs, version.ID)
|
||||
deleteSet[version.ID] = struct{}{}
|
||||
}
|
||||
|
||||
if len(deleteIDs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := tx.Where("id IN ?", deleteIDs).Delete(&LocalConfigurationVersion{}).Error; err != nil {
|
||||
return fmt.Errorf("delete duplicate versions for %s: %w", cfg.UUID, err)
|
||||
}
|
||||
removedTotal += len(deleteIDs)
|
||||
|
||||
latestKeptID := kept[len(kept)-1].ID
|
||||
if cfg.CurrentVersionID == nil || *cfg.CurrentVersionID == "" {
|
||||
if err := tx.Model(&LocalConfiguration{}).
|
||||
Where("uuid = ?", cfg.UUID).
|
||||
Update("current_version_id", latestKeptID).Error; err != nil {
|
||||
return fmt.Errorf("set missing current_version_id for %s: %w", cfg.UUID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, deleted := deleteSet[*cfg.CurrentVersionID]; deleted {
|
||||
if err := tx.Model(&LocalConfiguration{}).
|
||||
Where("uuid = ?", cfg.UUID).
|
||||
Update("current_version_id", latestKeptID).Error; err != nil {
|
||||
return fmt.Errorf("repair current_version_id for %s: %w", cfg.UUID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removedTotal > 0 {
|
||||
slog.Info("deduplicated configuration revisions", "removed_versions", removedTotal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fixLocalPricelistIndexes(tx *gorm.DB) error {
|
||||
type indexRow struct {
|
||||
Name string `gorm:"column:name"`
|
||||
Unique int `gorm:"column:unique"`
|
||||
}
|
||||
var indexes []indexRow
|
||||
if err := tx.Raw("PRAGMA index_list('local_pricelists')").Scan(&indexes).Error; err != nil {
|
||||
return fmt.Errorf("list local_pricelists indexes: %w", err)
|
||||
}
|
||||
|
||||
for _, idx := range indexes {
|
||||
if idx.Unique == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
type indexInfoRow struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
var info []indexInfoRow
|
||||
if err := tx.Raw(fmt.Sprintf("PRAGMA index_info('%s')", strings.ReplaceAll(idx.Name, "'", "''"))).Scan(&info).Error; err != nil {
|
||||
return fmt.Errorf("load index info for %s: %w", idx.Name, err)
|
||||
}
|
||||
if len(info) != 1 || info[0].Name != "version" {
|
||||
continue
|
||||
}
|
||||
|
||||
quoted := strings.ReplaceAll(idx.Name, `"`, `""`)
|
||||
if err := tx.Exec(fmt.Sprintf(`DROP INDEX IF EXISTS "%s"`, quoted)).Error; err != nil {
|
||||
return fmt.Errorf("drop unique version index %s: %w", idx.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_local_pricelists_server_id
|
||||
ON local_pricelists(server_id)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("ensure unique index local_pricelists(server_id): %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_pricelists_version
|
||||
ON local_pricelists(version)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("ensure index local_pricelists(version): %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func backfillLocalPricelistSource(tx *gorm.DB) error {
|
||||
if err := tx.Exec(`
|
||||
UPDATE local_pricelists
|
||||
SET source = 'estimate'
|
||||
WHERE source IS NULL OR source = ''
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("backfill local_pricelists.source: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_pricelists_source_created_at
|
||||
ON local_pricelists(source, created_at DESC)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("ensure idx_local_pricelists_source_created_at: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropComponentUnusedFields(tx *gorm.DB) error {
|
||||
// Check if columns exist
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_components')
|
||||
WHERE name IN ('current_price', 'synced_at')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check columns existence: %w", err)
|
||||
}
|
||||
|
||||
if len(columns) == 0 {
|
||||
slog.Info("unused fields already removed from local_components")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SQLite: recreate table without current_price and synced_at
|
||||
if err := tx.Exec(`
|
||||
CREATE TABLE local_components_new (
|
||||
lot_name TEXT PRIMARY KEY,
|
||||
lot_description TEXT,
|
||||
category TEXT,
|
||||
model TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("create new local_components table: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
INSERT INTO local_components_new (lot_name, lot_description, category, model)
|
||||
SELECT lot_name, lot_description, category, model
|
||||
FROM local_components
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("copy data to new table: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`DROP TABLE local_components`).Error; err != nil {
|
||||
return fmt.Errorf("drop old table: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`ALTER TABLE local_components_new RENAME TO local_components`).Error; err != nil {
|
||||
return fmt.Errorf("rename new table: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("dropped current_price and synced_at columns from local_components")
|
||||
return nil
|
||||
}
|
||||
|
||||
func addWarehouseCompetitorPriceLists(tx *gorm.DB) error {
|
||||
// Check if columns exist
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_configurations')
|
||||
WHERE name IN ('warehouse_pricelist_id', 'competitor_pricelist_id')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check columns existence: %w", err)
|
||||
}
|
||||
|
||||
if len(columns) == 2 {
|
||||
slog.Info("warehouse and competitor pricelist columns already exist")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add columns if they don't exist
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_configurations
|
||||
ADD COLUMN warehouse_pricelist_id INTEGER
|
||||
`).Error; err != nil {
|
||||
// Column might already exist, ignore
|
||||
if !strings.Contains(err.Error(), "duplicate column") {
|
||||
return fmt.Errorf("add warehouse_pricelist_id column: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_configurations
|
||||
ADD COLUMN competitor_pricelist_id INTEGER
|
||||
`).Error; err != nil {
|
||||
// Column might already exist, ignore
|
||||
if !strings.Contains(err.Error(), "duplicate column") {
|
||||
return fmt.Errorf("add competitor_pricelist_id column: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create indexes
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_configurations_warehouse_pricelist
|
||||
ON local_configurations(warehouse_pricelist_id)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("create warehouse pricelist index: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_configurations_competitor_pricelist
|
||||
ON local_configurations(competitor_pricelist_id)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("create competitor pricelist index: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("added warehouse and competitor pricelist fields to local_configurations")
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLocalPricelistItemCategoryAndIndexes(tx *gorm.DB) error {
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_pricelist_items')
|
||||
WHERE name IN ('lot_category')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check local_pricelist_items(lot_category) existence: %w", err)
|
||||
}
|
||||
|
||||
if len(columns) == 0 {
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_pricelist_items
|
||||
ADD COLUMN lot_category TEXT
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("add local_pricelist_items.lot_category: %w", err)
|
||||
}
|
||||
slog.Info("added lot_category to local_pricelist_items")
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_pricelist_items_pricelist_lot
|
||||
ON local_pricelist_items(pricelist_id, lot_name)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("ensure idx_local_pricelist_items_pricelist_lot: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_pricelist_items_lot_category
|
||||
ON local_pricelist_items(lot_category)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("ensure idx_local_pricelist_items_lot_category: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLocalConfigurationArticle(tx *gorm.DB) error {
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_configurations')
|
||||
WHERE name IN ('article')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check local_configurations(article) existence: %w", err)
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_configurations
|
||||
ADD COLUMN article TEXT
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("add local_configurations.article: %w", err)
|
||||
}
|
||||
slog.Info("added article to local_configurations")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLocalConfigurationServerModel(tx *gorm.DB) error {
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_configurations')
|
||||
WHERE name IN ('server_model')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check local_configurations(server_model) existence: %w", err)
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_configurations
|
||||
ADD COLUMN server_model TEXT
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("add local_configurations.server_model: %w", err)
|
||||
}
|
||||
slog.Info("added server_model to local_configurations")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLocalConfigurationSupportCode(tx *gorm.DB) error {
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_configurations')
|
||||
WHERE name IN ('support_code')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check local_configurations(support_code) existence: %w", err)
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_configurations
|
||||
ADD COLUMN support_code TEXT
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("add local_configurations.support_code: %w", err)
|
||||
}
|
||||
slog.Info("added support_code to local_configurations")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLocalConfigurationLineNo(tx *gorm.DB) error {
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
var columns []columnInfo
|
||||
if err := tx.Raw(`
|
||||
SELECT name FROM pragma_table_info('local_configurations')
|
||||
WHERE name IN ('line_no')
|
||||
`).Scan(&columns).Error; err != nil {
|
||||
return fmt.Errorf("check local_configurations(line_no) existence: %w", err)
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
if err := tx.Exec(`
|
||||
ALTER TABLE local_configurations
|
||||
ADD COLUMN line_no INTEGER
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("add local_configurations.line_no: %w", err)
|
||||
}
|
||||
slog.Info("added line_no to local_configurations")
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
WITH ranked AS (
|
||||
SELECT
|
||||
id,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY COALESCE(NULLIF(TRIM(project_uuid), ''), '__NO_PROJECT__')
|
||||
ORDER BY created_at ASC, id ASC
|
||||
) AS rn
|
||||
FROM local_configurations
|
||||
WHERE line_no IS NULL OR line_no <= 0
|
||||
)
|
||||
UPDATE local_configurations
|
||||
SET line_no = (
|
||||
SELECT rn * 10
|
||||
FROM ranked
|
||||
WHERE ranked.id = local_configurations.id
|
||||
)
|
||||
WHERE id IN (SELECT id FROM ranked)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("backfill local_configurations.line_no: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Exec(`
|
||||
CREATE INDEX IF NOT EXISTS idx_local_configurations_project_line_no
|
||||
ON local_configurations(project_uuid, line_no)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("ensure idx_local_configurations_project_line_no: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateLocalPartnumberBookCatalog(tx *gorm.DB) error {
|
||||
type columnInfo struct {
|
||||
Name string `gorm:"column:name"`
|
||||
}
|
||||
|
||||
hasBooksTable := tx.Migrator().HasTable(&LocalPartnumberBook{})
|
||||
hasItemsTable := tx.Migrator().HasTable(&LocalPartnumberBookItem{})
|
||||
if !hasItemsTable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if hasBooksTable {
|
||||
var bookCols []columnInfo
|
||||
if err := tx.Raw(`SELECT name FROM pragma_table_info('local_partnumber_books')`).Scan(&bookCols).Error; err != nil {
|
||||
return fmt.Errorf("load local_partnumber_books columns: %w", err)
|
||||
}
|
||||
hasPartnumbersJSON := false
|
||||
for _, c := range bookCols {
|
||||
if c.Name == "partnumbers_json" {
|
||||
hasPartnumbersJSON = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasPartnumbersJSON {
|
||||
if err := tx.Exec(`ALTER TABLE local_partnumber_books ADD COLUMN partnumbers_json TEXT NOT NULL DEFAULT '[]'`).Error; err != nil {
|
||||
return fmt.Errorf("add local_partnumber_books.partnumbers_json: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var itemCols []columnInfo
|
||||
if err := tx.Raw(`SELECT name FROM pragma_table_info('local_partnumber_book_items')`).Scan(&itemCols).Error; err != nil {
|
||||
return fmt.Errorf("load local_partnumber_book_items columns: %w", err)
|
||||
}
|
||||
hasBookID := false
|
||||
hasLotName := false
|
||||
hasLotsJSON := false
|
||||
for _, c := range itemCols {
|
||||
if c.Name == "book_id" {
|
||||
hasBookID = true
|
||||
}
|
||||
if c.Name == "lot_name" {
|
||||
hasLotName = true
|
||||
}
|
||||
if c.Name == "lots_json" {
|
||||
hasLotsJSON = true
|
||||
}
|
||||
}
|
||||
if !hasBookID && !hasLotName && !hasLotsJSON {
|
||||
return nil
|
||||
}
|
||||
|
||||
type legacyRow struct {
|
||||
BookID uint
|
||||
Partnumber string
|
||||
LotName string
|
||||
Description string
|
||||
CreatedAt time.Time
|
||||
ServerID int
|
||||
}
|
||||
bookPNs := make(map[uint]map[string]struct{})
|
||||
catalog := make(map[string]*localPartnumberCatalogRow)
|
||||
|
||||
if hasBookID || hasLotName {
|
||||
var rows []legacyRow
|
||||
if err := tx.Raw(`
|
||||
SELECT
|
||||
i.book_id,
|
||||
i.partnumber,
|
||||
i.lot_name,
|
||||
COALESCE(i.description, '') AS description,
|
||||
b.created_at,
|
||||
b.server_id
|
||||
FROM local_partnumber_book_items i
|
||||
INNER JOIN local_partnumber_books b ON b.id = i.book_id
|
||||
ORDER BY b.created_at DESC, b.id DESC, i.partnumber ASC, i.id ASC
|
||||
`).Scan(&rows).Error; err != nil {
|
||||
return fmt.Errorf("load legacy local partnumber book items: %w", err)
|
||||
}
|
||||
|
||||
for _, row := range rows {
|
||||
if _, ok := bookPNs[row.BookID]; !ok {
|
||||
bookPNs[row.BookID] = make(map[string]struct{})
|
||||
}
|
||||
bookPNs[row.BookID][row.Partnumber] = struct{}{}
|
||||
|
||||
entry, ok := catalog[row.Partnumber]
|
||||
if !ok {
|
||||
entry = &localPartnumberCatalogRow{
|
||||
Partnumber: row.Partnumber,
|
||||
Description: row.Description,
|
||||
CreatedAt: row.CreatedAt,
|
||||
ServerID: row.ServerID,
|
||||
}
|
||||
catalog[row.Partnumber] = entry
|
||||
}
|
||||
if row.CreatedAt.After(entry.CreatedAt) || (row.CreatedAt.Equal(entry.CreatedAt) && row.ServerID >= entry.ServerID) {
|
||||
entry.Description = row.Description
|
||||
entry.CreatedAt = row.CreatedAt
|
||||
entry.ServerID = row.ServerID
|
||||
}
|
||||
found := false
|
||||
for i := range entry.LotsJSON {
|
||||
if entry.LotsJSON[i].LotName == row.LotName {
|
||||
entry.LotsJSON[i].Qty += 1
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found && row.LotName != "" {
|
||||
entry.LotsJSON = append(entry.LotsJSON, LocalPartnumberBookLot{LotName: row.LotName, Qty: 1})
|
||||
}
|
||||
}
|
||||
|
||||
var books []LocalPartnumberBook
|
||||
if err := tx.Find(&books).Error; err != nil {
|
||||
return fmt.Errorf("load local partnumber books: %w", err)
|
||||
}
|
||||
for _, book := range books {
|
||||
pnSet := bookPNs[book.ID]
|
||||
partnumbers := make([]string, 0, len(pnSet))
|
||||
for pn := range pnSet {
|
||||
partnumbers = append(partnumbers, pn)
|
||||
}
|
||||
sort.Strings(partnumbers)
|
||||
if err := tx.Model(&LocalPartnumberBook{}).
|
||||
Where("id = ?", book.ID).
|
||||
Update("partnumbers_json", LocalStringList(partnumbers)).Error; err != nil {
|
||||
return fmt.Errorf("update partnumbers_json for local book %d: %w", book.ID, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var items []LocalPartnumberBookItem
|
||||
if err := tx.Order("id DESC").Find(&items).Error; err != nil {
|
||||
return fmt.Errorf("load canonical local partnumber book items: %w", err)
|
||||
}
|
||||
for _, item := range items {
|
||||
entry, ok := catalog[item.Partnumber]
|
||||
if !ok {
|
||||
copiedLots := append(LocalPartnumberBookLots(nil), item.LotsJSON...)
|
||||
catalog[item.Partnumber] = &localPartnumberCatalogRow{
|
||||
Partnumber: item.Partnumber,
|
||||
LotsJSON: copiedLots,
|
||||
Description: item.Description,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if entry.Description == "" && item.Description != "" {
|
||||
entry.Description = item.Description
|
||||
}
|
||||
for _, lot := range item.LotsJSON {
|
||||
merged := false
|
||||
for i := range entry.LotsJSON {
|
||||
if entry.LotsJSON[i].LotName == lot.LotName {
|
||||
if lot.Qty > entry.LotsJSON[i].Qty {
|
||||
entry.LotsJSON[i].Qty = lot.Qty
|
||||
}
|
||||
merged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !merged {
|
||||
entry.LotsJSON = append(entry.LotsJSON, lot)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rebuildLocalPartnumberBookCatalog(tx, catalog)
|
||||
}
|
||||
|
||||
func rebuildLocalPartnumberBookCatalog(tx *gorm.DB, catalog map[string]*localPartnumberCatalogRow) error {
|
||||
if err := tx.Exec(`
|
||||
CREATE TABLE local_partnumber_book_items_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
partnumber TEXT NOT NULL UNIQUE,
|
||||
lots_json TEXT NOT NULL,
|
||||
description TEXT
|
||||
)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("create new local_partnumber_book_items table: %w", err)
|
||||
}
|
||||
|
||||
orderedPartnumbers := make([]string, 0, len(catalog))
|
||||
for pn := range catalog {
|
||||
orderedPartnumbers = append(orderedPartnumbers, pn)
|
||||
}
|
||||
sort.Strings(orderedPartnumbers)
|
||||
for _, pn := range orderedPartnumbers {
|
||||
row := catalog[pn]
|
||||
sort.Slice(row.LotsJSON, func(i, j int) bool {
|
||||
return row.LotsJSON[i].LotName < row.LotsJSON[j].LotName
|
||||
})
|
||||
if err := tx.Table("local_partnumber_book_items_new").Create(&LocalPartnumberBookItem{
|
||||
Partnumber: row.Partnumber,
|
||||
LotsJSON: row.LotsJSON,
|
||||
Description: row.Description,
|
||||
}).Error; err != nil {
|
||||
return fmt.Errorf("insert new local_partnumber_book_items row for %s: %w", pn, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Exec(`DROP TABLE local_partnumber_book_items`).Error; err != nil {
|
||||
return fmt.Errorf("drop legacy local_partnumber_book_items: %w", err)
|
||||
}
|
||||
if err := tx.Exec(`ALTER TABLE local_partnumber_book_items_new RENAME TO local_partnumber_book_items`).Error; err != nil {
|
||||
return fmt.Errorf("rename new local_partnumber_book_items table: %w", err)
|
||||
}
|
||||
if err := tx.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_local_partnumber_book_items_partnumber ON local_partnumber_book_items(partnumber)`).Error; err != nil {
|
||||
return fmt.Errorf("create local_partnumber_book_items partnumber index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deduplicatePricelistItemsAndAddUniqueIndex(tx *gorm.DB) error {
|
||||
// Remove duplicate (pricelist_id, lot_name) rows keeping only the row with the lowest id.
|
||||
if err := tx.Exec(`
|
||||
DELETE FROM local_pricelist_items
|
||||
WHERE id NOT IN (
|
||||
SELECT MIN(id) FROM local_pricelist_items
|
||||
GROUP BY pricelist_id, lot_name
|
||||
)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("deduplicate local_pricelist_items: %w", err)
|
||||
}
|
||||
|
||||
// Add unique index to prevent future duplicates.
|
||||
if err := tx.Exec(`
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_local_pricelist_items_pricelist_lot_unique
|
||||
ON local_pricelist_items(pricelist_id, lot_name)
|
||||
`).Error; err != nil {
|
||||
return fmt.Errorf("create unique index on local_pricelist_items: %w", err)
|
||||
}
|
||||
slog.Info("deduplicated local_pricelist_items and added unique index")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -57,64 +57,31 @@ func (c LocalConfigItems) Total() float64 {
|
||||
return total
|
||||
}
|
||||
|
||||
// LocalStringList is a JSON-encoded list of strings stored as TEXT in SQLite.
|
||||
type LocalStringList []string
|
||||
|
||||
func (s LocalStringList) Value() (driver.Value, error) {
|
||||
return json.Marshal(s)
|
||||
}
|
||||
|
||||
func (s *LocalStringList) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
*s = make(LocalStringList, 0)
|
||||
return nil
|
||||
}
|
||||
var bytes []byte
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
bytes = v
|
||||
case string:
|
||||
bytes = []byte(v)
|
||||
default:
|
||||
return errors.New("type assertion failed for LocalStringList")
|
||||
}
|
||||
return json.Unmarshal(bytes, s)
|
||||
}
|
||||
|
||||
// LocalConfiguration stores configurations in local SQLite
|
||||
type LocalConfiguration struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
||||
ServerID *uint `json:"server_id"` // ID on MariaDB server, NULL if local only
|
||||
ProjectUUID *string `gorm:"index" json:"project_uuid,omitempty"`
|
||||
CurrentVersionID *string `gorm:"index" json:"current_version_id,omitempty"`
|
||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Items LocalConfigItems `gorm:"type:text" json:"items"` // JSON stored as text in SQLite
|
||||
TotalPrice *float64 `json:"total_price"`
|
||||
CustomPrice *float64 `json:"custom_price"`
|
||||
Notes string `json:"notes"`
|
||||
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
||||
ServerCount int `gorm:"default:1" json:"server_count"`
|
||||
ServerModel string `gorm:"size:100" json:"server_model,omitempty"`
|
||||
SupportCode string `gorm:"size:20" json:"support_code,omitempty"`
|
||||
Article string `gorm:"size:80" json:"article,omitempty"`
|
||||
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
||||
WarehousePricelistID *uint `gorm:"index" json:"warehouse_pricelist_id,omitempty"`
|
||||
CompetitorPricelistID *uint `gorm:"index" json:"competitor_pricelist_id,omitempty"`
|
||||
DisablePriceRefresh bool `gorm:"default:false" json:"disable_price_refresh"`
|
||||
OnlyInStock bool `gorm:"default:false" json:"only_in_stock"`
|
||||
VendorSpec VendorSpec `gorm:"type:text" json:"vendor_spec,omitempty"`
|
||||
Line int `gorm:"column:line_no;index" json:"line"`
|
||||
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
SyncedAt *time.Time `json:"synced_at"`
|
||||
SyncStatus string `gorm:"default:'local'" json:"sync_status"` // 'local', 'synced', 'modified'
|
||||
OriginalUserID uint `json:"original_user_id"` // UserID from MariaDB for reference
|
||||
OriginalUsername string `gorm:"not null;default:'';index" json:"original_username"`
|
||||
CurrentVersion *LocalConfigurationVersion `gorm:"foreignKey:CurrentVersionID;references:ID" json:"current_version,omitempty"`
|
||||
Versions []LocalConfigurationVersion `gorm:"foreignKey:ConfigurationUUID;references:UUID" json:"versions,omitempty"`
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
||||
ServerID *uint `json:"server_id"` // ID on MariaDB server, NULL if local only
|
||||
ProjectUUID *string `gorm:"index" json:"project_uuid,omitempty"`
|
||||
CurrentVersionID *string `gorm:"index" json:"current_version_id,omitempty"`
|
||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
Items LocalConfigItems `gorm:"type:text" json:"items"` // JSON stored as text in SQLite
|
||||
TotalPrice *float64 `json:"total_price"`
|
||||
CustomPrice *float64 `json:"custom_price"`
|
||||
Notes string `json:"notes"`
|
||||
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
||||
ServerCount int `gorm:"default:1" json:"server_count"`
|
||||
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
||||
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
SyncedAt *time.Time `json:"synced_at"`
|
||||
SyncStatus string `gorm:"default:'local'" json:"sync_status"` // 'local', 'synced', 'modified'
|
||||
OriginalUserID uint `json:"original_user_id"` // UserID from MariaDB for reference
|
||||
OriginalUsername string `gorm:"not null;default:'';index" json:"original_username"`
|
||||
CurrentVersion *LocalConfigurationVersion `gorm:"foreignKey:CurrentVersionID;references:ID" json:"current_version,omitempty"`
|
||||
Versions []LocalConfigurationVersion `gorm:"foreignKey:ConfigurationUUID;references:UUID" json:"versions,omitempty"`
|
||||
}
|
||||
|
||||
func (LocalConfiguration) TableName() string {
|
||||
@@ -126,10 +93,7 @@ type LocalProject struct {
|
||||
UUID string `gorm:"uniqueIndex;not null" json:"uuid"`
|
||||
ServerID *uint `json:"server_id,omitempty"`
|
||||
OwnerUsername string `gorm:"not null;index" json:"owner_username"`
|
||||
Code string `gorm:"not null;index:idx_local_projects_code_variant,priority:1" json:"code"`
|
||||
Variant string `gorm:"default:'';index:idx_local_projects_code_variant,priority:2" json:"variant"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
TrackerURL string `json:"tracker_url"`
|
||||
Name string `gorm:"not null" json:"name"`
|
||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||
IsSystem bool `gorm:"default:false;index" json:"is_system"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
@@ -162,11 +126,10 @@ func (LocalConfigurationVersion) TableName() string {
|
||||
// LocalPricelist stores cached pricelists from server
|
||||
type LocalPricelist struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
ServerID uint `gorm:"not null;uniqueIndex" json:"server_id"` // ID on MariaDB server
|
||||
Source string `gorm:"not null;default:'estimate';index:idx_local_pricelists_source_created_at,priority:1" json:"source"`
|
||||
Version string `gorm:"not null;index" json:"version"`
|
||||
ServerID uint `gorm:"not null" json:"server_id"` // ID on MariaDB server
|
||||
Version string `gorm:"uniqueIndex;not null" json:"version"`
|
||||
Name string `json:"name"`
|
||||
CreatedAt time.Time `gorm:"index:idx_local_pricelists_source_created_at,priority:2,sort:desc" json:"created_at"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
SyncedAt time.Time `json:"synced_at"`
|
||||
IsUsed bool `gorm:"default:false" json:"is_used"` // Used by any local configuration
|
||||
}
|
||||
@@ -177,47 +140,30 @@ func (LocalPricelist) TableName() string {
|
||||
|
||||
// LocalPricelistItem stores pricelist items
|
||||
type LocalPricelistItem struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
PricelistID uint `gorm:"not null;index" json:"pricelist_id"`
|
||||
LotName string `gorm:"not null" json:"lot_name"`
|
||||
LotCategory string `gorm:"column:lot_category" json:"lot_category,omitempty"`
|
||||
Price float64 `gorm:"not null" json:"price"`
|
||||
AvailableQty *float64 `json:"available_qty,omitempty"`
|
||||
Partnumbers LocalStringList `gorm:"type:text" json:"partnumbers,omitempty"`
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
PricelistID uint `gorm:"not null;index" json:"pricelist_id"`
|
||||
LotName string `gorm:"not null" json:"lot_name"`
|
||||
Price float64 `gorm:"not null" json:"price"`
|
||||
}
|
||||
|
||||
func (LocalPricelistItem) TableName() string {
|
||||
return "local_pricelist_items"
|
||||
}
|
||||
|
||||
// LocalComponent stores cached components for offline search (metadata only)
|
||||
// All pricing is now sourced from local_pricelist_items based on configuration pricelist selection
|
||||
// LocalComponent stores cached components for offline search
|
||||
type LocalComponent struct {
|
||||
LotName string `gorm:"primaryKey" json:"lot_name"`
|
||||
LotDescription string `json:"lot_description"`
|
||||
Category string `json:"category"`
|
||||
Model string `json:"model"`
|
||||
LotName string `gorm:"primaryKey" json:"lot_name"`
|
||||
LotDescription string `json:"lot_description"`
|
||||
Category string `json:"category"`
|
||||
Model string `json:"model"`
|
||||
CurrentPrice *float64 `json:"current_price"`
|
||||
SyncedAt time.Time `json:"synced_at"`
|
||||
}
|
||||
|
||||
func (LocalComponent) TableName() string {
|
||||
return "local_components"
|
||||
}
|
||||
|
||||
// LocalSyncGuardState stores latest sync readiness decision for UI and preflight checks.
|
||||
type LocalSyncGuardState struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
Status string `gorm:"size:32;not null;index" json:"status"` // ready|blocked|unknown
|
||||
ReasonCode string `gorm:"size:128" json:"reason_code,omitempty"`
|
||||
ReasonText string `gorm:"type:text" json:"reason_text,omitempty"`
|
||||
RequiredMinAppVersion *string `gorm:"size:64" json:"required_min_app_version,omitempty"`
|
||||
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
func (LocalSyncGuardState) TableName() string {
|
||||
return "local_sync_guard_state"
|
||||
}
|
||||
|
||||
// PendingChange stores changes that need to be synced to the server
|
||||
type PendingChange struct {
|
||||
ID int64 `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
@@ -233,112 +179,3 @@ type PendingChange struct {
|
||||
func (PendingChange) TableName() string {
|
||||
return "pending_changes"
|
||||
}
|
||||
|
||||
// LocalPartnumberBook stores a version snapshot of the PN→LOT mapping book (pull-only from PriceForge)
|
||||
type LocalPartnumberBook struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
ServerID int `gorm:"uniqueIndex;not null" json:"server_id"`
|
||||
Version string `gorm:"not null" json:"version"`
|
||||
CreatedAt time.Time `gorm:"not null" json:"created_at"`
|
||||
IsActive bool `gorm:"not null;default:true" json:"is_active"`
|
||||
PartnumbersJSON LocalStringList `gorm:"column:partnumbers_json;type:text" json:"partnumbers_json"`
|
||||
}
|
||||
|
||||
func (LocalPartnumberBook) TableName() string {
|
||||
return "local_partnumber_books"
|
||||
}
|
||||
|
||||
type LocalPartnumberBookLot struct {
|
||||
LotName string `json:"lot_name"`
|
||||
Qty float64 `json:"qty"`
|
||||
}
|
||||
|
||||
type LocalPartnumberBookLots []LocalPartnumberBookLot
|
||||
|
||||
func (l LocalPartnumberBookLots) Value() (driver.Value, error) {
|
||||
return json.Marshal(l)
|
||||
}
|
||||
|
||||
func (l *LocalPartnumberBookLots) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
*l = make(LocalPartnumberBookLots, 0)
|
||||
return nil
|
||||
}
|
||||
var bytes []byte
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
bytes = v
|
||||
case string:
|
||||
bytes = []byte(v)
|
||||
default:
|
||||
return errors.New("type assertion failed for LocalPartnumberBookLots")
|
||||
}
|
||||
return json.Unmarshal(bytes, l)
|
||||
}
|
||||
|
||||
// LocalPartnumberBookItem stores the canonical PN composition pulled from PriceForge.
|
||||
type LocalPartnumberBookItem struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
Partnumber string `gorm:"not null" json:"partnumber"`
|
||||
LotsJSON LocalPartnumberBookLots `gorm:"column:lots_json;type:text" json:"lots_json"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
func (LocalPartnumberBookItem) TableName() string {
|
||||
return "local_partnumber_book_items"
|
||||
}
|
||||
|
||||
// VendorSpecItem represents a single row in a vendor BOM specification
|
||||
type VendorSpecItem struct {
|
||||
SortOrder int `json:"sort_order"`
|
||||
VendorPartnumber string `json:"vendor_partnumber"`
|
||||
Quantity int `json:"quantity"`
|
||||
Description string `json:"description,omitempty"`
|
||||
UnitPrice *float64 `json:"unit_price,omitempty"`
|
||||
TotalPrice *float64 `json:"total_price,omitempty"`
|
||||
ResolvedLotName string `json:"resolved_lot_name,omitempty"`
|
||||
ResolutionSource string `json:"resolution_source,omitempty"` // "book", "manual", "unresolved"
|
||||
ManualLotSuggestion string `json:"manual_lot_suggestion,omitempty"`
|
||||
LotQtyPerPN int `json:"lot_qty_per_pn,omitempty"`
|
||||
LotAllocations []VendorSpecLotAllocation `json:"lot_allocations,omitempty"`
|
||||
LotMappings []VendorSpecLotMapping `json:"lot_mappings,omitempty"`
|
||||
}
|
||||
|
||||
type VendorSpecLotAllocation struct {
|
||||
LotName string `json:"lot_name"`
|
||||
Quantity int `json:"quantity"` // quantity of LOT per 1 vendor PN
|
||||
}
|
||||
|
||||
// VendorSpecLotMapping is the canonical persisted LOT mapping for a vendor PN row.
|
||||
// It stores all mapped LOTs (base + bundle) uniformly.
|
||||
type VendorSpecLotMapping struct {
|
||||
LotName string `json:"lot_name"`
|
||||
QuantityPerPN int `json:"quantity_per_pn"`
|
||||
}
|
||||
|
||||
// VendorSpec is a JSON-encodable slice of VendorSpecItem
|
||||
type VendorSpec []VendorSpecItem
|
||||
|
||||
func (v VendorSpec) Value() (driver.Value, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (v *VendorSpec) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
*v = nil
|
||||
return nil
|
||||
}
|
||||
var bytes []byte
|
||||
switch val := value.(type) {
|
||||
case []byte:
|
||||
bytes = val
|
||||
case string:
|
||||
bytes = []byte(val)
|
||||
default:
|
||||
return errors.New("type assertion failed for VendorSpec")
|
||||
}
|
||||
return json.Unmarshal(bytes, v)
|
||||
}
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
package localdb
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetLatestLocalPricelistBySource_SkipsPricelistWithoutItems(t *testing.T) {
|
||||
local, err := New(filepath.Join(t.TempDir(), "latest_without_items.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
base := time.Now().Add(-time.Minute)
|
||||
withItems := &LocalPricelist{
|
||||
ServerID: 1001,
|
||||
Source: "estimate",
|
||||
Version: "E-1",
|
||||
Name: "with-items",
|
||||
CreatedAt: base,
|
||||
SyncedAt: base,
|
||||
}
|
||||
if err := local.SaveLocalPricelist(withItems); err != nil {
|
||||
t.Fatalf("save pricelist with items: %v", err)
|
||||
}
|
||||
storedWithItems, err := local.GetLocalPricelistByServerID(withItems.ServerID)
|
||||
if err != nil {
|
||||
t.Fatalf("load pricelist with items: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]LocalPricelistItem{
|
||||
{
|
||||
PricelistID: storedWithItems.ID,
|
||||
LotName: "CPU_A",
|
||||
Price: 100,
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("save pricelist items: %v", err)
|
||||
}
|
||||
|
||||
withoutItems := &LocalPricelist{
|
||||
ServerID: 1002,
|
||||
Source: "estimate",
|
||||
Version: "E-2",
|
||||
Name: "without-items",
|
||||
CreatedAt: base.Add(2 * time.Second),
|
||||
SyncedAt: base.Add(2 * time.Second),
|
||||
}
|
||||
if err := local.SaveLocalPricelist(withoutItems); err != nil {
|
||||
t.Fatalf("save pricelist without items: %v", err)
|
||||
}
|
||||
|
||||
got, err := local.GetLatestLocalPricelistBySource("estimate")
|
||||
if err != nil {
|
||||
t.Fatalf("GetLatestLocalPricelistBySource: %v", err)
|
||||
}
|
||||
if got.ServerID != withItems.ServerID {
|
||||
t.Fatalf("expected server_id=%d, got %d", withItems.ServerID, got.ServerID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLatestLocalPricelistBySource_TieBreaksByID(t *testing.T) {
|
||||
local, err := New(filepath.Join(t.TempDir(), "latest_tie_break.db"))
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
base := time.Now().Add(-time.Minute)
|
||||
first := &LocalPricelist{
|
||||
ServerID: 2001,
|
||||
Source: "warehouse",
|
||||
Version: "S-1",
|
||||
Name: "first",
|
||||
CreatedAt: base,
|
||||
SyncedAt: base,
|
||||
}
|
||||
if err := local.SaveLocalPricelist(first); err != nil {
|
||||
t.Fatalf("save first pricelist: %v", err)
|
||||
}
|
||||
storedFirst, err := local.GetLocalPricelistByServerID(first.ServerID)
|
||||
if err != nil {
|
||||
t.Fatalf("load first pricelist: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]LocalPricelistItem{
|
||||
{
|
||||
PricelistID: storedFirst.ID,
|
||||
LotName: "CPU_A",
|
||||
Price: 101,
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("save first items: %v", err)
|
||||
}
|
||||
|
||||
second := &LocalPricelist{
|
||||
ServerID: 2002,
|
||||
Source: "warehouse",
|
||||
Version: "S-2",
|
||||
Name: "second",
|
||||
CreatedAt: base,
|
||||
SyncedAt: base,
|
||||
}
|
||||
if err := local.SaveLocalPricelist(second); err != nil {
|
||||
t.Fatalf("save second pricelist: %v", err)
|
||||
}
|
||||
storedSecond, err := local.GetLocalPricelistByServerID(second.ServerID)
|
||||
if err != nil {
|
||||
t.Fatalf("load second pricelist: %v", err)
|
||||
}
|
||||
if err := local.SaveLocalPricelistItems([]LocalPricelistItem{
|
||||
{
|
||||
PricelistID: storedSecond.ID,
|
||||
LotName: "CPU_A",
|
||||
Price: 102,
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatalf("save second items: %v", err)
|
||||
}
|
||||
|
||||
got, err := local.GetLatestLocalPricelistBySource("warehouse")
|
||||
if err != nil {
|
||||
t.Fatalf("GetLatestLocalPricelistBySource: %v", err)
|
||||
}
|
||||
if got.ServerID != second.ServerID {
|
||||
t.Fatalf("expected server_id=%d, got %d", second.ServerID, got.ServerID)
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package localdb
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSaveProjectPreservingUpdatedAtKeepsProvidedTimestamp(t *testing.T) {
|
||||
dbPath := filepath.Join(t.TempDir(), "project_sync_timestamp.db")
|
||||
|
||||
local, err := New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("open localdb: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = local.Close() })
|
||||
|
||||
createdAt := time.Date(2026, 2, 1, 10, 0, 0, 0, time.UTC)
|
||||
updatedAt := time.Date(2026, 2, 3, 12, 30, 0, 0, time.UTC)
|
||||
project := &LocalProject{
|
||||
UUID: "project-1",
|
||||
OwnerUsername: "tester",
|
||||
Code: "OPS-1",
|
||||
Variant: "Lenovo",
|
||||
IsActive: true,
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
SyncStatus: "synced",
|
||||
}
|
||||
|
||||
if err := local.SaveProjectPreservingUpdatedAt(project); err != nil {
|
||||
t.Fatalf("save project: %v", err)
|
||||
}
|
||||
|
||||
syncedAt := time.Date(2026, 3, 16, 8, 45, 0, 0, time.UTC)
|
||||
project.SyncedAt = &syncedAt
|
||||
project.SyncStatus = "synced"
|
||||
|
||||
if err := local.SaveProjectPreservingUpdatedAt(project); err != nil {
|
||||
t.Fatalf("save project second time: %v", err)
|
||||
}
|
||||
|
||||
stored, err := local.GetProjectByUUID(project.UUID)
|
||||
if err != nil {
|
||||
t.Fatalf("get project: %v", err)
|
||||
}
|
||||
if !stored.UpdatedAt.Equal(updatedAt) {
|
||||
t.Fatalf("updated_at changed during sync save: got %s want %s", stored.UpdatedAt, updatedAt)
|
||||
}
|
||||
if stored.SyncedAt == nil || !stored.SyncedAt.Equal(syncedAt) {
|
||||
t.Fatalf("synced_at not updated correctly: got %+v want %s", stored.SyncedAt, syncedAt)
|
||||
}
|
||||
}
|
||||
@@ -3,43 +3,33 @@ package localdb
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BuildConfigurationSnapshot serializes the full local configuration state.
|
||||
func BuildConfigurationSnapshot(localCfg *LocalConfiguration) (string, error) {
|
||||
snapshot := map[string]interface{}{
|
||||
"id": localCfg.ID,
|
||||
"uuid": localCfg.UUID,
|
||||
"server_id": localCfg.ServerID,
|
||||
"project_uuid": localCfg.ProjectUUID,
|
||||
"current_version_id": localCfg.CurrentVersionID,
|
||||
"is_active": localCfg.IsActive,
|
||||
"name": localCfg.Name,
|
||||
"items": localCfg.Items,
|
||||
"total_price": localCfg.TotalPrice,
|
||||
"custom_price": localCfg.CustomPrice,
|
||||
"notes": localCfg.Notes,
|
||||
"is_template": localCfg.IsTemplate,
|
||||
"server_count": localCfg.ServerCount,
|
||||
"server_model": localCfg.ServerModel,
|
||||
"support_code": localCfg.SupportCode,
|
||||
"article": localCfg.Article,
|
||||
"pricelist_id": localCfg.PricelistID,
|
||||
"warehouse_pricelist_id": localCfg.WarehousePricelistID,
|
||||
"competitor_pricelist_id": localCfg.CompetitorPricelistID,
|
||||
"disable_price_refresh": localCfg.DisablePriceRefresh,
|
||||
"only_in_stock": localCfg.OnlyInStock,
|
||||
"vendor_spec": localCfg.VendorSpec,
|
||||
"line": localCfg.Line,
|
||||
"price_updated_at": localCfg.PriceUpdatedAt,
|
||||
"created_at": localCfg.CreatedAt,
|
||||
"updated_at": localCfg.UpdatedAt,
|
||||
"synced_at": localCfg.SyncedAt,
|
||||
"sync_status": localCfg.SyncStatus,
|
||||
"original_user_id": localCfg.OriginalUserID,
|
||||
"original_username": localCfg.OriginalUsername,
|
||||
"id": localCfg.ID,
|
||||
"uuid": localCfg.UUID,
|
||||
"server_id": localCfg.ServerID,
|
||||
"project_uuid": localCfg.ProjectUUID,
|
||||
"current_version_id": localCfg.CurrentVersionID,
|
||||
"is_active": localCfg.IsActive,
|
||||
"name": localCfg.Name,
|
||||
"items": localCfg.Items,
|
||||
"total_price": localCfg.TotalPrice,
|
||||
"custom_price": localCfg.CustomPrice,
|
||||
"notes": localCfg.Notes,
|
||||
"is_template": localCfg.IsTemplate,
|
||||
"server_count": localCfg.ServerCount,
|
||||
"pricelist_id": localCfg.PricelistID,
|
||||
"price_updated_at": localCfg.PriceUpdatedAt,
|
||||
"created_at": localCfg.CreatedAt,
|
||||
"updated_at": localCfg.UpdatedAt,
|
||||
"synced_at": localCfg.SyncedAt,
|
||||
"sync_status": localCfg.SyncStatus,
|
||||
"original_user_id": localCfg.OriginalUserID,
|
||||
"original_username": localCfg.OriginalUsername,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(snapshot)
|
||||
@@ -52,28 +42,19 @@ func BuildConfigurationSnapshot(localCfg *LocalConfiguration) (string, error) {
|
||||
// DecodeConfigurationSnapshot returns editable fields from one saved snapshot.
|
||||
func DecodeConfigurationSnapshot(data string) (*LocalConfiguration, error) {
|
||||
var snapshot struct {
|
||||
ProjectUUID *string `json:"project_uuid"`
|
||||
IsActive *bool `json:"is_active"`
|
||||
Name string `json:"name"`
|
||||
Items LocalConfigItems `json:"items"`
|
||||
TotalPrice *float64 `json:"total_price"`
|
||||
CustomPrice *float64 `json:"custom_price"`
|
||||
Notes string `json:"notes"`
|
||||
IsTemplate bool `json:"is_template"`
|
||||
ServerCount int `json:"server_count"`
|
||||
ServerModel string `json:"server_model"`
|
||||
SupportCode string `json:"support_code"`
|
||||
Article string `json:"article"`
|
||||
PricelistID *uint `json:"pricelist_id"`
|
||||
WarehousePricelistID *uint `json:"warehouse_pricelist_id"`
|
||||
CompetitorPricelistID *uint `json:"competitor_pricelist_id"`
|
||||
DisablePriceRefresh bool `json:"disable_price_refresh"`
|
||||
OnlyInStock bool `json:"only_in_stock"`
|
||||
VendorSpec VendorSpec `json:"vendor_spec"`
|
||||
Line int `json:"line"`
|
||||
PriceUpdatedAt *time.Time `json:"price_updated_at"`
|
||||
OriginalUserID uint `json:"original_user_id"`
|
||||
OriginalUsername string `json:"original_username"`
|
||||
ProjectUUID *string `json:"project_uuid"`
|
||||
IsActive *bool `json:"is_active"`
|
||||
Name string `json:"name"`
|
||||
Items LocalConfigItems `json:"items"`
|
||||
TotalPrice *float64 `json:"total_price"`
|
||||
CustomPrice *float64 `json:"custom_price"`
|
||||
Notes string `json:"notes"`
|
||||
IsTemplate bool `json:"is_template"`
|
||||
ServerCount int `json:"server_count"`
|
||||
PricelistID *uint `json:"pricelist_id"`
|
||||
PriceUpdatedAt *time.Time `json:"price_updated_at"`
|
||||
OriginalUserID uint `json:"original_user_id"`
|
||||
OriginalUsername string `json:"original_username"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(data), &snapshot); err != nil {
|
||||
@@ -86,87 +67,18 @@ func DecodeConfigurationSnapshot(data string) (*LocalConfiguration, error) {
|
||||
}
|
||||
|
||||
return &LocalConfiguration{
|
||||
IsActive: isActive,
|
||||
ProjectUUID: snapshot.ProjectUUID,
|
||||
Name: snapshot.Name,
|
||||
Items: snapshot.Items,
|
||||
TotalPrice: snapshot.TotalPrice,
|
||||
CustomPrice: snapshot.CustomPrice,
|
||||
Notes: snapshot.Notes,
|
||||
IsTemplate: snapshot.IsTemplate,
|
||||
ServerCount: snapshot.ServerCount,
|
||||
ServerModel: snapshot.ServerModel,
|
||||
SupportCode: snapshot.SupportCode,
|
||||
Article: snapshot.Article,
|
||||
PricelistID: snapshot.PricelistID,
|
||||
WarehousePricelistID: snapshot.WarehousePricelistID,
|
||||
CompetitorPricelistID: snapshot.CompetitorPricelistID,
|
||||
DisablePriceRefresh: snapshot.DisablePriceRefresh,
|
||||
OnlyInStock: snapshot.OnlyInStock,
|
||||
VendorSpec: snapshot.VendorSpec,
|
||||
Line: snapshot.Line,
|
||||
PriceUpdatedAt: snapshot.PriceUpdatedAt,
|
||||
OriginalUserID: snapshot.OriginalUserID,
|
||||
OriginalUsername: snapshot.OriginalUsername,
|
||||
IsActive: isActive,
|
||||
ProjectUUID: snapshot.ProjectUUID,
|
||||
Name: snapshot.Name,
|
||||
Items: snapshot.Items,
|
||||
TotalPrice: snapshot.TotalPrice,
|
||||
CustomPrice: snapshot.CustomPrice,
|
||||
Notes: snapshot.Notes,
|
||||
IsTemplate: snapshot.IsTemplate,
|
||||
ServerCount: snapshot.ServerCount,
|
||||
PricelistID: snapshot.PricelistID,
|
||||
PriceUpdatedAt: snapshot.PriceUpdatedAt,
|
||||
OriginalUserID: snapshot.OriginalUserID,
|
||||
OriginalUsername: snapshot.OriginalUsername,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type configurationSpecPriceFingerprint struct {
|
||||
Items []configurationSpecPriceFingerprintItem `json:"items"`
|
||||
ServerCount int `json:"server_count"`
|
||||
TotalPrice *float64 `json:"total_price,omitempty"`
|
||||
CustomPrice *float64 `json:"custom_price,omitempty"`
|
||||
PricelistID *uint `json:"pricelist_id,omitempty"`
|
||||
WarehousePricelistID *uint `json:"warehouse_pricelist_id,omitempty"`
|
||||
CompetitorPricelistID *uint `json:"competitor_pricelist_id,omitempty"`
|
||||
DisablePriceRefresh bool `json:"disable_price_refresh"`
|
||||
OnlyInStock bool `json:"only_in_stock"`
|
||||
VendorSpec VendorSpec `json:"vendor_spec,omitempty"`
|
||||
}
|
||||
|
||||
type configurationSpecPriceFingerprintItem struct {
|
||||
LotName string `json:"lot_name"`
|
||||
Quantity int `json:"quantity"`
|
||||
UnitPrice float64 `json:"unit_price"`
|
||||
}
|
||||
|
||||
// BuildConfigurationSpecPriceFingerprint returns a stable JSON key based on
|
||||
// spec + price fields only, used for revision deduplication.
|
||||
func BuildConfigurationSpecPriceFingerprint(localCfg *LocalConfiguration) (string, error) {
|
||||
items := make([]configurationSpecPriceFingerprintItem, 0, len(localCfg.Items))
|
||||
for _, item := range localCfg.Items {
|
||||
items = append(items, configurationSpecPriceFingerprintItem{
|
||||
LotName: item.LotName,
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
})
|
||||
}
|
||||
sort.Slice(items, func(i, j int) bool {
|
||||
if items[i].LotName != items[j].LotName {
|
||||
return items[i].LotName < items[j].LotName
|
||||
}
|
||||
if items[i].Quantity != items[j].Quantity {
|
||||
return items[i].Quantity < items[j].Quantity
|
||||
}
|
||||
return items[i].UnitPrice < items[j].UnitPrice
|
||||
})
|
||||
|
||||
payload := configurationSpecPriceFingerprint{
|
||||
Items: items,
|
||||
ServerCount: localCfg.ServerCount,
|
||||
TotalPrice: localCfg.TotalPrice,
|
||||
CustomPrice: localCfg.CustomPrice,
|
||||
PricelistID: localCfg.PricelistID,
|
||||
WarehousePricelistID: localCfg.WarehousePricelistID,
|
||||
CompetitorPricelistID: localCfg.CompetitorPricelistID,
|
||||
DisablePriceRefresh: localCfg.DisablePriceRefresh,
|
||||
OnlyInStock: localCfg.OnlyInStock,
|
||||
VendorSpec: localCfg.VendorSpec,
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshal spec+price fingerprint: %w", err)
|
||||
}
|
||||
return string(raw), nil
|
||||
}
|
||||
|
||||
110
internal/middleware/auth.go
Normal file
110
internal/middleware/auth.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/services"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
AuthUserKey = "auth_user"
|
||||
AuthClaimsKey = "auth_claims"
|
||||
)
|
||||
|
||||
func Auth(authService *services.AuthService) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
authHeader := c.GetHeader("Authorization")
|
||||
if authHeader == "" {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "authorization header required",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.SplitN(authHeader, " ", 2)
|
||||
if len(parts) != 2 || parts[0] != "Bearer" {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "invalid authorization header format",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
claims, err := authService.ValidateToken(parts[1])
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.Set(AuthClaimsKey, claims)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func RequireRole(roles ...models.UserRole) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
claims, exists := c.Get(AuthClaimsKey)
|
||||
if !exists {
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "authentication required",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
authClaims := claims.(*services.Claims)
|
||||
|
||||
for _, role := range roles {
|
||||
if authClaims.Role == role {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
|
||||
"error": "insufficient permissions",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func RequireEditor() gin.HandlerFunc {
|
||||
return RequireRole(models.RoleEditor, models.RolePricingAdmin, models.RoleAdmin)
|
||||
}
|
||||
|
||||
func RequirePricingAdmin() gin.HandlerFunc {
|
||||
return RequireRole(models.RolePricingAdmin, models.RoleAdmin)
|
||||
}
|
||||
|
||||
func RequireAdmin() gin.HandlerFunc {
|
||||
return RequireRole(models.RoleAdmin)
|
||||
}
|
||||
|
||||
// GetClaims extracts auth claims from context
|
||||
func GetClaims(c *gin.Context) *services.Claims {
|
||||
claims, exists := c.Get(AuthClaimsKey)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return claims.(*services.Claims)
|
||||
}
|
||||
|
||||
// GetUserID extracts user ID from context
|
||||
func GetUserID(c *gin.Context) uint {
|
||||
claims := GetClaims(c)
|
||||
if claims == nil {
|
||||
return 0
|
||||
}
|
||||
return claims.UserID
|
||||
}
|
||||
|
||||
// GetUsername extracts username from context
|
||||
func GetUsername(c *gin.Context) string {
|
||||
claims := GetClaims(c)
|
||||
if claims == nil {
|
||||
return ""
|
||||
}
|
||||
return claims.Username
|
||||
}
|
||||
@@ -1,55 +1,22 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func CORS() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
origin := strings.TrimSpace(c.GetHeader("Origin"))
|
||||
if origin != "" {
|
||||
if isLoopbackOrigin(origin) {
|
||||
c.Header("Access-Control-Allow-Origin", origin)
|
||||
c.Header("Vary", "Origin")
|
||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
||||
c.Header("Access-Control-Allow-Headers", "Origin, Content-Type, Accept, Authorization")
|
||||
c.Header("Access-Control-Expose-Headers", "Content-Length, Content-Disposition")
|
||||
c.Header("Access-Control-Max-Age", "86400")
|
||||
} else if c.Request.Method == http.MethodOptions {
|
||||
c.AbortWithStatus(http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
}
|
||||
c.Header("Access-Control-Allow-Origin", "*")
|
||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
||||
c.Header("Access-Control-Allow-Headers", "Origin, Content-Type, Accept, Authorization")
|
||||
c.Header("Access-Control-Expose-Headers", "Content-Length, Content-Disposition")
|
||||
c.Header("Access-Control-Max-Age", "86400")
|
||||
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
c.AbortWithStatus(204)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func isLoopbackOrigin(origin string) bool {
|
||||
u, err := url.Parse(origin)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return false
|
||||
}
|
||||
host := strings.TrimSpace(u.Hostname())
|
||||
if host == "" {
|
||||
return false
|
||||
}
|
||||
if strings.EqualFold(host, "localhost") {
|
||||
return true
|
||||
}
|
||||
ip := net.ParseIP(host)
|
||||
return ip != nil && ip.IsLoopback()
|
||||
}
|
||||
|
||||
@@ -39,84 +39,25 @@ func (c ConfigItems) Total() float64 {
|
||||
return total
|
||||
}
|
||||
|
||||
type VendorSpecLotAllocation struct {
|
||||
LotName string `json:"lot_name"`
|
||||
Quantity int `json:"quantity"`
|
||||
}
|
||||
|
||||
type VendorSpecLotMapping struct {
|
||||
LotName string `json:"lot_name"`
|
||||
QuantityPerPN int `json:"quantity_per_pn"`
|
||||
}
|
||||
|
||||
type VendorSpecItem struct {
|
||||
SortOrder int `json:"sort_order"`
|
||||
VendorPartnumber string `json:"vendor_partnumber"`
|
||||
Quantity int `json:"quantity"`
|
||||
Description string `json:"description,omitempty"`
|
||||
UnitPrice *float64 `json:"unit_price,omitempty"`
|
||||
TotalPrice *float64 `json:"total_price,omitempty"`
|
||||
ResolvedLotName string `json:"resolved_lot_name,omitempty"`
|
||||
ResolutionSource string `json:"resolution_source,omitempty"`
|
||||
ManualLotSuggestion string `json:"manual_lot_suggestion,omitempty"`
|
||||
LotQtyPerPN int `json:"lot_qty_per_pn,omitempty"`
|
||||
LotAllocations []VendorSpecLotAllocation `json:"lot_allocations,omitempty"`
|
||||
LotMappings []VendorSpecLotMapping `json:"lot_mappings,omitempty"`
|
||||
}
|
||||
|
||||
type VendorSpec []VendorSpecItem
|
||||
|
||||
func (v VendorSpec) Value() (driver.Value, error) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (v *VendorSpec) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
*v = nil
|
||||
return nil
|
||||
}
|
||||
var bytes []byte
|
||||
switch val := value.(type) {
|
||||
case []byte:
|
||||
bytes = val
|
||||
case string:
|
||||
bytes = []byte(val)
|
||||
default:
|
||||
return errors.New("type assertion failed for VendorSpec")
|
||||
}
|
||||
return json.Unmarshal(bytes, v)
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
||||
UserID *uint `json:"user_id,omitempty"` // Legacy field, no longer required for ownership
|
||||
OwnerUsername string `gorm:"size:100;not null;default:'';index" json:"owner_username"`
|
||||
ProjectUUID *string `gorm:"size:36;index" json:"project_uuid,omitempty"`
|
||||
AppVersion string `gorm:"size:64" json:"app_version,omitempty"`
|
||||
Name string `gorm:"size:200;not null" json:"name"`
|
||||
Items ConfigItems `gorm:"type:json;not null" json:"items"`
|
||||
TotalPrice *float64 `gorm:"type:decimal(12,2)" json:"total_price"`
|
||||
CustomPrice *float64 `gorm:"type:decimal(12,2)" json:"custom_price"`
|
||||
Notes string `gorm:"type:text" json:"notes"`
|
||||
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
||||
ServerCount int `gorm:"default:1" json:"server_count"`
|
||||
ServerModel string `gorm:"size:100" json:"server_model,omitempty"`
|
||||
SupportCode string `gorm:"size:20" json:"support_code,omitempty"`
|
||||
Article string `gorm:"size:80" json:"article,omitempty"`
|
||||
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
||||
WarehousePricelistID *uint `gorm:"index" json:"warehouse_pricelist_id,omitempty"`
|
||||
CompetitorPricelistID *uint `gorm:"index" json:"competitor_pricelist_id,omitempty"`
|
||||
VendorSpec VendorSpec `gorm:"type:json" json:"vendor_spec,omitempty"`
|
||||
DisablePriceRefresh bool `gorm:"default:false" json:"disable_price_refresh"`
|
||||
OnlyInStock bool `gorm:"default:false" json:"only_in_stock"`
|
||||
Line int `gorm:"column:line_no;index" json:"line"`
|
||||
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
||||
CurrentVersionNo int `gorm:"-" json:"current_version_no,omitempty"`
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
||||
UserID *uint `json:"user_id,omitempty"` // Legacy field, no longer required for ownership
|
||||
OwnerUsername string `gorm:"size:100;not null;default:'';index" json:"owner_username"`
|
||||
ProjectUUID *string `gorm:"size:36;index" json:"project_uuid,omitempty"`
|
||||
AppVersion string `gorm:"size:64" json:"app_version,omitempty"`
|
||||
Name string `gorm:"size:200;not null" json:"name"`
|
||||
Items ConfigItems `gorm:"type:json;not null" json:"items"`
|
||||
TotalPrice *float64 `gorm:"type:decimal(12,2)" json:"total_price"`
|
||||
CustomPrice *float64 `gorm:"type:decimal(12,2)" json:"custom_price"`
|
||||
Notes string `gorm:"type:text" json:"notes"`
|
||||
IsTemplate bool `gorm:"default:false" json:"is_template"`
|
||||
ServerCount int `gorm:"default:1" json:"server_count"`
|
||||
PricelistID *uint `gorm:"index" json:"pricelist_id,omitempty"`
|
||||
PriceUpdatedAt *time.Time `gorm:"type:timestamp" json:"price_updated_at,omitempty"`
|
||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
||||
|
||||
User *User `gorm:"foreignKey:UserID" json:"user,omitempty"`
|
||||
}
|
||||
|
||||
func (Configuration) TableName() string {
|
||||
@@ -131,6 +72,8 @@ type PriceOverride struct {
|
||||
ValidUntil *time.Time `gorm:"type:date" json:"valid_until"`
|
||||
Reason string `gorm:"type:text" json:"reason"`
|
||||
CreatedBy uint `gorm:"not null" json:"created_by"`
|
||||
|
||||
Creator *User `gorm:"foreignKey:CreatedBy" json:"creator,omitempty"`
|
||||
}
|
||||
|
||||
func (PriceOverride) TableName() string {
|
||||
|
||||
@@ -37,33 +37,3 @@ type Supplier struct {
|
||||
func (Supplier) TableName() string {
|
||||
return "supplier"
|
||||
}
|
||||
|
||||
// StockLog stores warehouse stock snapshots imported from external files.
|
||||
type StockLog struct {
|
||||
StockLogID uint `gorm:"column:stock_log_id;primaryKey;autoIncrement"`
|
||||
Partnumber string `gorm:"column:partnumber;size:255;not null"`
|
||||
Supplier *string `gorm:"column:supplier;size:255"`
|
||||
Date time.Time `gorm:"column:date;type:date;not null"`
|
||||
Price float64 `gorm:"column:price;not null"`
|
||||
Quality *string `gorm:"column:quality;size:255"`
|
||||
Comments *string `gorm:"column:comments;size:15000"`
|
||||
Vendor *string `gorm:"column:vendor;size:255"`
|
||||
Qty *float64 `gorm:"column:qty"`
|
||||
}
|
||||
|
||||
func (StockLog) TableName() string {
|
||||
return "stock_log"
|
||||
}
|
||||
|
||||
// StockIgnoreRule contains import ignore pattern rules.
|
||||
type StockIgnoreRule struct {
|
||||
ID uint `gorm:"column:id;primaryKey;autoIncrement" json:"id"`
|
||||
Target string `gorm:"column:target;size:20;not null" json:"target"` // partnumber|description
|
||||
MatchType string `gorm:"column:match_type;size:20;not null" json:"match_type"` // exact|prefix|suffix
|
||||
Pattern string `gorm:"column:pattern;size:500;not null" json:"pattern"`
|
||||
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime" json:"created_at"`
|
||||
}
|
||||
|
||||
func (StockIgnoreRule) TableName() string {
|
||||
return "stock_ignore_rules"
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
// AllModels returns all models for auto-migration
|
||||
func AllModels() []interface{} {
|
||||
return []interface{}{
|
||||
&User{},
|
||||
&Category{},
|
||||
&LotMetadata{},
|
||||
&Project{},
|
||||
@@ -51,3 +52,54 @@ func SeedCategories(db *gorm.DB) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SeedAdminUser creates default admin user if not exists
|
||||
// Default credentials: admin / admin123
|
||||
func SeedAdminUser(db *gorm.DB, passwordHash string) error {
|
||||
var count int64
|
||||
db.Model(&User{}).Where("username = ?", "admin").Count(&count)
|
||||
if count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
admin := &User{
|
||||
Username: "admin",
|
||||
Email: "admin@example.com",
|
||||
PasswordHash: passwordHash,
|
||||
Role: RoleAdmin,
|
||||
IsActive: true,
|
||||
}
|
||||
return db.Create(admin).Error
|
||||
}
|
||||
|
||||
// EnsureDBUser creates or returns the user corresponding to the database connection username.
|
||||
// This is used when RBAC is disabled - configurations are owned by the DB user.
|
||||
// Returns the user ID that should be used for all operations.
|
||||
func EnsureDBUser(db *gorm.DB, dbUsername string) (uint, error) {
|
||||
if dbUsername == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var user User
|
||||
err := db.Where("username = ?", dbUsername).First(&user).Error
|
||||
if err == nil {
|
||||
return user.ID, nil
|
||||
}
|
||||
|
||||
// User doesn't exist, create it
|
||||
user = User{
|
||||
Username: dbUsername,
|
||||
Email: dbUsername + "@db.local",
|
||||
PasswordHash: "-", // No password - this is a DB user, not an app user
|
||||
Role: RoleAdmin,
|
||||
IsActive: true,
|
||||
}
|
||||
|
||||
if err := db.Create(&user).Error; err != nil {
|
||||
slog.Error("failed to create DB user", "username", dbUsername, "error", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
slog.Info("created DB user for configurations", "username", dbUsername, "user_id", user.ID)
|
||||
return user.ID, nil
|
||||
}
|
||||
|
||||
@@ -4,41 +4,12 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type PricelistSource string
|
||||
|
||||
const (
|
||||
PricelistSourceEstimate PricelistSource = "estimate"
|
||||
PricelistSourceWarehouse PricelistSource = "warehouse"
|
||||
PricelistSourceCompetitor PricelistSource = "competitor"
|
||||
)
|
||||
|
||||
func (s PricelistSource) IsValid() bool {
|
||||
switch s {
|
||||
case PricelistSourceEstimate, PricelistSourceWarehouse, PricelistSourceCompetitor:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func NormalizePricelistSource(source string) PricelistSource {
|
||||
switch PricelistSource(source) {
|
||||
case PricelistSourceWarehouse:
|
||||
return PricelistSourceWarehouse
|
||||
case PricelistSourceCompetitor:
|
||||
return PricelistSourceCompetitor
|
||||
default:
|
||||
return PricelistSourceEstimate
|
||||
}
|
||||
}
|
||||
|
||||
// Pricelist represents a versioned snapshot of prices
|
||||
type Pricelist struct {
|
||||
ID uint `gorm:"primaryKey" json:"id"`
|
||||
Source string `gorm:"size:20;not null;default:'estimate';uniqueIndex:idx_qt_pricelists_source_version,priority:1;index:idx_qt_pricelists_source_created_at,priority:1" json:"source"`
|
||||
Version string `gorm:"size:20;not null;uniqueIndex:idx_qt_pricelists_source_version,priority:2" json:"version"` // Format: YYYY-MM-DD-NNN
|
||||
Notification string `gorm:"size:500" json:"notification"` // Notification shown in configurator
|
||||
CreatedAt time.Time `gorm:"index:idx_qt_pricelists_source_created_at,priority:2,sort:desc" json:"created_at"`
|
||||
Version string `gorm:"size:20;uniqueIndex;not null" json:"version"` // Format: YYYY-MM-DD-NNN
|
||||
Notification string `gorm:"size:500" json:"notification"` // Notification shown in configurator
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
CreatedBy string `gorm:"size:100" json:"created_by"`
|
||||
IsActive bool `gorm:"default:true" json:"is_active"`
|
||||
UsageCount int `gorm:"default:0" json:"usage_count"`
|
||||
@@ -55,7 +26,6 @@ type PricelistItem struct {
|
||||
ID uint `gorm:"primaryKey" json:"id"`
|
||||
PricelistID uint `gorm:"not null;index:idx_pricelist_lot" json:"pricelist_id"`
|
||||
LotName string `gorm:"size:255;not null;index:idx_pricelist_lot" json:"lot_name"`
|
||||
LotCategory string `gorm:"column:lot_category;size:50" json:"lot_category,omitempty"`
|
||||
Price float64 `gorm:"type:decimal(12,2);not null" json:"price"`
|
||||
PriceMethod string `gorm:"size:20" json:"price_method"`
|
||||
|
||||
@@ -66,10 +36,8 @@ type PricelistItem struct {
|
||||
MetaPrices string `gorm:"size:1000" json:"meta_prices,omitempty"`
|
||||
|
||||
// Virtual fields for display
|
||||
LotDescription string `gorm:"-" json:"lot_description,omitempty"`
|
||||
Category string `gorm:"-" json:"category,omitempty"`
|
||||
AvailableQty *float64 `gorm:"-" json:"available_qty,omitempty"`
|
||||
Partnumbers []string `gorm:"-" json:"partnumbers,omitempty"`
|
||||
LotDescription string `gorm:"-" json:"lot_description,omitempty"`
|
||||
Category string `gorm:"-" json:"category,omitempty"`
|
||||
}
|
||||
|
||||
func (PricelistItem) TableName() string {
|
||||
@@ -79,7 +47,6 @@ func (PricelistItem) TableName() string {
|
||||
// PricelistSummary is used for list views
|
||||
type PricelistSummary struct {
|
||||
ID uint `json:"id"`
|
||||
Source string `json:"source"`
|
||||
Version string `json:"version"`
|
||||
Notification string `json:"notification"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
|
||||
@@ -6,10 +6,7 @@ type Project struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
UUID string `gorm:"size:36;uniqueIndex;not null" json:"uuid"`
|
||||
OwnerUsername string `gorm:"size:100;not null;index" json:"owner_username"`
|
||||
Code string `gorm:"size:100;not null;index:idx_qt_projects_code_variant,priority:1" json:"code"`
|
||||
Variant string `gorm:"size:100;not null;default:'';index:idx_qt_projects_code_variant,priority:2" json:"variant"`
|
||||
Name *string `gorm:"size:200" json:"name,omitempty"`
|
||||
TrackerURL string `gorm:"size:500" json:"tracker_url"`
|
||||
Name string `gorm:"size:200;not null" json:"name"`
|
||||
IsActive bool `gorm:"default:true;index" json:"is_active"`
|
||||
IsSystem bool `gorm:"default:false;index" json:"is_system"`
|
||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
||||
|
||||
39
internal/models/user.go
Normal file
39
internal/models/user.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package models
|
||||
|
||||
import "time"
|
||||
|
||||
type UserRole string
|
||||
|
||||
const (
|
||||
RoleViewer UserRole = "viewer"
|
||||
RoleEditor UserRole = "editor"
|
||||
RolePricingAdmin UserRole = "pricing_admin"
|
||||
RoleAdmin UserRole = "admin"
|
||||
)
|
||||
|
||||
type User struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
Username string `gorm:"size:100;uniqueIndex;not null" json:"username"`
|
||||
Email string `gorm:"size:255;uniqueIndex;not null" json:"email"`
|
||||
PasswordHash string `gorm:"size:255;not null" json:"-"`
|
||||
Role UserRole `gorm:"type:enum('viewer','editor','pricing_admin','admin');default:'viewer'" json:"role"`
|
||||
IsActive bool `gorm:"default:true" json:"is_active"`
|
||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"created_at"`
|
||||
UpdatedAt time.Time `gorm:"autoUpdateTime" json:"updated_at"`
|
||||
}
|
||||
|
||||
func (User) TableName() string {
|
||||
return "qt_users"
|
||||
}
|
||||
|
||||
func (u *User) CanEdit() bool {
|
||||
return u.Role == RoleEditor || u.Role == RolePricingAdmin || u.Role == RoleAdmin
|
||||
}
|
||||
|
||||
func (u *User) CanManagePricing() bool {
|
||||
return u.Role == RolePricingAdmin || u.Role == RoleAdmin
|
||||
}
|
||||
|
||||
func (u *User) CanManageUsers() bool {
|
||||
return u.Role == RoleAdmin
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -16,13 +14,7 @@ func NewConfigurationRepository(db *gorm.DB) *ConfigurationRepository {
|
||||
}
|
||||
|
||||
func (r *ConfigurationRepository) Create(config *models.Configuration) error {
|
||||
if err := r.db.Create(config).Error; err != nil {
|
||||
if isUnknownLineNoColumnError(err) {
|
||||
return r.db.Omit("line_no").Create(config).Error
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return r.db.Create(config).Error
|
||||
}
|
||||
|
||||
func (r *ConfigurationRepository) GetByID(id uint) (*models.Configuration, error) {
|
||||
@@ -44,21 +36,7 @@ func (r *ConfigurationRepository) GetByUUID(uuid string) (*models.Configuration,
|
||||
}
|
||||
|
||||
func (r *ConfigurationRepository) Update(config *models.Configuration) error {
|
||||
if err := r.db.Save(config).Error; err != nil {
|
||||
if isUnknownLineNoColumnError(err) {
|
||||
return r.db.Omit("line_no").Save(config).Error
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isUnknownLineNoColumnError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
msg := strings.ToLower(err.Error())
|
||||
return strings.Contains(msg, "unknown column 'line_no'") || strings.Contains(msg, "no column named line_no")
|
||||
return r.db.Save(config).Error
|
||||
}
|
||||
|
||||
func (r *ConfigurationRepository) Delete(id uint) error {
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// PartnumberBookRepository provides read-only access to local partnumber book snapshots.
|
||||
type PartnumberBookRepository struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewPartnumberBookRepository(db *gorm.DB) *PartnumberBookRepository {
|
||||
return &PartnumberBookRepository{db: db}
|
||||
}
|
||||
|
||||
// GetActiveBook returns the most recently active local partnumber book.
|
||||
func (r *PartnumberBookRepository) GetActiveBook() (*localdb.LocalPartnumberBook, error) {
|
||||
var book localdb.LocalPartnumberBook
|
||||
err := r.db.Where("is_active = 1").Order("created_at DESC, id DESC").First(&book).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &book, nil
|
||||
}
|
||||
|
||||
// GetBookItems returns all items for the given local book ID.
|
||||
func (r *PartnumberBookRepository) GetBookItems(bookID uint) ([]localdb.LocalPartnumberBookItem, error) {
|
||||
book, err := r.getBook(bookID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items, _, err := r.listCatalogItems(book.PartnumbersJSON, "", 0, 0)
|
||||
return items, err
|
||||
}
|
||||
|
||||
// GetBookItemsPage returns items for the given local book ID with optional search and pagination.
|
||||
func (r *PartnumberBookRepository) GetBookItemsPage(bookID uint, search string, page, perPage int) ([]localdb.LocalPartnumberBookItem, int64, error) {
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 {
|
||||
perPage = 100
|
||||
}
|
||||
|
||||
book, err := r.getBook(bookID)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return r.listCatalogItems(book.PartnumbersJSON, search, page, perPage)
|
||||
}
|
||||
|
||||
// FindLotByPartnumber looks up a partnumber in the active book and returns the matching items.
|
||||
func (r *PartnumberBookRepository) FindLotByPartnumber(bookID uint, partnumber string) ([]localdb.LocalPartnumberBookItem, error) {
|
||||
book, err := r.getBook(bookID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
found := false
|
||||
for _, pn := range book.PartnumbersJSON {
|
||||
if pn == partnumber {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
var items []localdb.LocalPartnumberBookItem
|
||||
err = r.db.Where("partnumber = ?", partnumber).Find(&items).Error
|
||||
return items, err
|
||||
}
|
||||
|
||||
// ListBooks returns all local partnumber books ordered newest first.
|
||||
func (r *PartnumberBookRepository) ListBooks() ([]localdb.LocalPartnumberBook, error) {
|
||||
var books []localdb.LocalPartnumberBook
|
||||
err := r.db.Order("created_at DESC, id DESC").Find(&books).Error
|
||||
return books, err
|
||||
}
|
||||
|
||||
// SaveBook saves a new partnumber book snapshot.
|
||||
func (r *PartnumberBookRepository) SaveBook(book *localdb.LocalPartnumberBook) error {
|
||||
return r.db.Save(book).Error
|
||||
}
|
||||
|
||||
// SaveBookItems upserts canonical PN catalog rows.
|
||||
func (r *PartnumberBookRepository) SaveBookItems(items []localdb.LocalPartnumberBookItem) error {
|
||||
if len(items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return r.db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "partnumber"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{
|
||||
"lots_json",
|
||||
"description",
|
||||
}),
|
||||
}).CreateInBatches(items, 500).Error
|
||||
}
|
||||
|
||||
// CountBookItems returns the number of items for a given local book ID.
|
||||
func (r *PartnumberBookRepository) CountBookItems(bookID uint) int64 {
|
||||
book, err := r.getBook(bookID)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return int64(len(book.PartnumbersJSON))
|
||||
}
|
||||
|
||||
func (r *PartnumberBookRepository) CountDistinctLots(bookID uint) int64 {
|
||||
items, err := r.GetBookItems(bookID)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
seen := make(map[string]struct{})
|
||||
for _, item := range items {
|
||||
for _, lot := range item.LotsJSON {
|
||||
if lot.LotName == "" {
|
||||
continue
|
||||
}
|
||||
seen[lot.LotName] = struct{}{}
|
||||
}
|
||||
}
|
||||
return int64(len(seen))
|
||||
}
|
||||
|
||||
func (r *PartnumberBookRepository) HasAllBookItems(bookID uint) bool {
|
||||
book, err := r.getBook(bookID)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if len(book.PartnumbersJSON) == 0 {
|
||||
return true
|
||||
}
|
||||
var count int64
|
||||
if err := r.db.Model(&localdb.LocalPartnumberBookItem{}).
|
||||
Where("partnumber IN ?", []string(book.PartnumbersJSON)).
|
||||
Count(&count).Error; err != nil {
|
||||
return false
|
||||
}
|
||||
return count == int64(len(book.PartnumbersJSON))
|
||||
}
|
||||
|
||||
func (r *PartnumberBookRepository) getBook(bookID uint) (*localdb.LocalPartnumberBook, error) {
|
||||
var book localdb.LocalPartnumberBook
|
||||
if err := r.db.First(&book, bookID).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &book, nil
|
||||
}
|
||||
|
||||
func (r *PartnumberBookRepository) listCatalogItems(partnumbers localdb.LocalStringList, search string, page, perPage int) ([]localdb.LocalPartnumberBookItem, int64, error) {
|
||||
if len(partnumbers) == 0 {
|
||||
return []localdb.LocalPartnumberBookItem{}, 0, nil
|
||||
}
|
||||
|
||||
query := r.db.Model(&localdb.LocalPartnumberBookItem{}).Where("partnumber IN ?", []string(partnumbers))
|
||||
if search != "" {
|
||||
trimmedSearch := "%" + search + "%"
|
||||
query = query.Where("partnumber LIKE ? OR lots_json LIKE ? OR description LIKE ?", trimmedSearch, trimmedSearch, trimmedSearch)
|
||||
}
|
||||
|
||||
var total int64
|
||||
if err := query.Count(&total).Error; err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var items []localdb.LocalPartnumberBookItem
|
||||
if page > 0 && perPage > 0 {
|
||||
query = query.Offset((page - 1) * perPage).Limit(perPage)
|
||||
}
|
||||
err := query.Order("partnumber ASC, id ASC").Find(&items).Error
|
||||
return items, total, err
|
||||
}
|
||||
@@ -21,24 +21,13 @@ func NewPricelistRepository(db *gorm.DB) *PricelistRepository {
|
||||
|
||||
// List returns pricelists with pagination
|
||||
func (r *PricelistRepository) List(offset, limit int) ([]models.PricelistSummary, int64, error) {
|
||||
return r.ListBySource("", offset, limit)
|
||||
}
|
||||
|
||||
// ListBySource returns pricelists filtered by source when provided.
|
||||
func (r *PricelistRepository) ListBySource(source string, offset, limit int) ([]models.PricelistSummary, int64, error) {
|
||||
query := r.db.Model(&models.Pricelist{}).
|
||||
Where("EXISTS (SELECT 1 FROM qt_pricelist_items WHERE qt_pricelist_items.pricelist_id = qt_pricelists.id)")
|
||||
if source != "" {
|
||||
query = query.Where("source = ?", source)
|
||||
}
|
||||
|
||||
var total int64
|
||||
if err := query.Count(&total).Error; err != nil {
|
||||
if err := r.db.Model(&models.Pricelist{}).Count(&total).Error; err != nil {
|
||||
return nil, 0, fmt.Errorf("counting pricelists: %w", err)
|
||||
}
|
||||
|
||||
var pricelists []models.Pricelist
|
||||
if err := query.Order("created_at DESC, id DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
||||
if err := r.db.Order("created_at DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
||||
return nil, 0, fmt.Errorf("listing pricelists: %w", err)
|
||||
}
|
||||
|
||||
@@ -47,25 +36,13 @@ func (r *PricelistRepository) ListBySource(source string, offset, limit int) ([]
|
||||
|
||||
// ListActive returns active pricelists with pagination.
|
||||
func (r *PricelistRepository) ListActive(offset, limit int) ([]models.PricelistSummary, int64, error) {
|
||||
return r.ListActiveBySource("", offset, limit)
|
||||
}
|
||||
|
||||
// ListActiveBySource returns active pricelists filtered by source when provided.
|
||||
func (r *PricelistRepository) ListActiveBySource(source string, offset, limit int) ([]models.PricelistSummary, int64, error) {
|
||||
query := r.db.Model(&models.Pricelist{}).
|
||||
Where("is_active = ?", true).
|
||||
Where("EXISTS (SELECT 1 FROM qt_pricelist_items WHERE qt_pricelist_items.pricelist_id = qt_pricelists.id)")
|
||||
if source != "" {
|
||||
query = query.Where("source = ?", source)
|
||||
}
|
||||
|
||||
var total int64
|
||||
if err := query.Count(&total).Error; err != nil {
|
||||
if err := r.db.Model(&models.Pricelist{}).Where("is_active = ?", true).Count(&total).Error; err != nil {
|
||||
return nil, 0, fmt.Errorf("counting active pricelists: %w", err)
|
||||
}
|
||||
|
||||
var pricelists []models.Pricelist
|
||||
if err := query.Order("created_at DESC, id DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
||||
if err := r.db.Where("is_active = ?", true).Order("created_at DESC").Offset(offset).Limit(limit).Find(&pricelists).Error; err != nil {
|
||||
return nil, 0, fmt.Errorf("listing active pricelists: %w", err)
|
||||
}
|
||||
|
||||
@@ -91,7 +68,6 @@ func (r *PricelistRepository) toSummaries(pricelists []models.Pricelist) []model
|
||||
|
||||
summaries[i] = models.PricelistSummary{
|
||||
ID: pl.ID,
|
||||
Source: pl.Source,
|
||||
Version: pl.Version,
|
||||
Notification: pl.Notification,
|
||||
CreatedAt: pl.CreatedAt,
|
||||
@@ -126,13 +102,8 @@ func (r *PricelistRepository) GetByID(id uint) (*models.Pricelist, error) {
|
||||
|
||||
// GetByVersion returns a pricelist by version string
|
||||
func (r *PricelistRepository) GetByVersion(version string) (*models.Pricelist, error) {
|
||||
return r.GetBySourceAndVersion(string(models.PricelistSourceEstimate), version)
|
||||
}
|
||||
|
||||
// GetBySourceAndVersion returns a pricelist by source/version.
|
||||
func (r *PricelistRepository) GetBySourceAndVersion(source, version string) (*models.Pricelist, error) {
|
||||
var pricelist models.Pricelist
|
||||
if err := r.db.Where("source = ? AND version = ?", source, version).First(&pricelist).Error; err != nil {
|
||||
if err := r.db.Where("version = ?", version).First(&pricelist).Error; err != nil {
|
||||
return nil, fmt.Errorf("getting pricelist by version: %w", err)
|
||||
}
|
||||
return &pricelist, nil
|
||||
@@ -140,17 +111,8 @@ func (r *PricelistRepository) GetBySourceAndVersion(source, version string) (*mo
|
||||
|
||||
// GetLatestActive returns the most recent active pricelist
|
||||
func (r *PricelistRepository) GetLatestActive() (*models.Pricelist, error) {
|
||||
return r.GetLatestActiveBySource(string(models.PricelistSourceEstimate))
|
||||
}
|
||||
|
||||
// GetLatestActiveBySource returns the most recent active pricelist by source.
|
||||
func (r *PricelistRepository) GetLatestActiveBySource(source string) (*models.Pricelist, error) {
|
||||
var pricelist models.Pricelist
|
||||
if err := r.db.
|
||||
Where("is_active = ? AND source = ?", true, source).
|
||||
Where("EXISTS (SELECT 1 FROM qt_pricelist_items WHERE qt_pricelist_items.pricelist_id = qt_pricelists.id)").
|
||||
Order("created_at DESC, id DESC").
|
||||
First(&pricelist).Error; err != nil {
|
||||
if err := r.db.Where("is_active = ?", true).Order("created_at DESC").First(&pricelist).Error; err != nil {
|
||||
return nil, fmt.Errorf("getting latest pricelist: %w", err)
|
||||
}
|
||||
return &pricelist, nil
|
||||
@@ -240,25 +202,16 @@ func (r *PricelistRepository) GetItems(pricelistID uint, offset, limit int, sear
|
||||
if err := r.db.Where("lot_name = ?", items[i].LotName).First(&lot).Error; err == nil {
|
||||
items[i].LotDescription = lot.LotDescription
|
||||
}
|
||||
items[i].Category = strings.TrimSpace(items[i].LotCategory)
|
||||
// Parse category from lot_name (e.g., "CPU_AMD_9654" -> "CPU")
|
||||
parts := strings.SplitN(items[i].LotName, "_", 2)
|
||||
if len(parts) >= 1 {
|
||||
items[i].Category = parts[0]
|
||||
}
|
||||
}
|
||||
|
||||
return items, total, nil
|
||||
}
|
||||
|
||||
// GetLotNames returns distinct lot names from pricelist items.
|
||||
func (r *PricelistRepository) GetLotNames(pricelistID uint) ([]string, error) {
|
||||
var lotNames []string
|
||||
if err := r.db.Model(&models.PricelistItem{}).
|
||||
Where("pricelist_id = ?", pricelistID).
|
||||
Distinct("lot_name").
|
||||
Order("lot_name ASC").
|
||||
Pluck("lot_name", &lotNames).Error; err != nil {
|
||||
return nil, fmt.Errorf("listing pricelist lot names: %w", err)
|
||||
}
|
||||
return lotNames, nil
|
||||
}
|
||||
|
||||
// GetPriceForLot returns item price for a lot within a pricelist.
|
||||
func (r *PricelistRepository) GetPriceForLot(pricelistID uint, lotName string) (float64, error) {
|
||||
var item models.PricelistItem
|
||||
@@ -268,28 +221,6 @@ func (r *PricelistRepository) GetPriceForLot(pricelistID uint, lotName string) (
|
||||
return item.Price, nil
|
||||
}
|
||||
|
||||
// GetPricesForLots returns price map for given lots within a pricelist.
|
||||
func (r *PricelistRepository) GetPricesForLots(pricelistID uint, lotNames []string) (map[string]float64, error) {
|
||||
result := make(map[string]float64, len(lotNames))
|
||||
if pricelistID == 0 || len(lotNames) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var rows []models.PricelistItem
|
||||
if err := r.db.Select("lot_name, price").
|
||||
Where("pricelist_id = ? AND lot_name IN ?", pricelistID, lotNames).
|
||||
Find(&rows).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, row := range rows {
|
||||
if row.Price > 0 {
|
||||
result[row.LotName] = row.Price
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SetActive toggles active flag on a pricelist.
|
||||
func (r *PricelistRepository) SetActive(id uint, isActive bool) error {
|
||||
return r.db.Model(&models.Pricelist{}).Where("id = ?", id).Update("is_active", isActive).Error
|
||||
@@ -297,24 +228,18 @@ func (r *PricelistRepository) SetActive(id uint, isActive bool) error {
|
||||
|
||||
// GenerateVersion generates a new version string in format YYYY-MM-DD-NNN
|
||||
func (r *PricelistRepository) GenerateVersion() (string, error) {
|
||||
return r.GenerateVersionBySource(string(models.PricelistSourceEstimate))
|
||||
}
|
||||
|
||||
// GenerateVersionBySource generates a new version string in format YYYY-MM-DD-NNN scoped by source.
|
||||
func (r *PricelistRepository) GenerateVersionBySource(source string) (string, error) {
|
||||
today := time.Now().Format("2006-01-02")
|
||||
prefix := versionPrefixBySource(source)
|
||||
|
||||
var last models.Pricelist
|
||||
err := r.db.Model(&models.Pricelist{}).
|
||||
Select("version").
|
||||
Where("source = ? AND version LIKE ?", source, prefix+"-"+today+"-%").
|
||||
Where("version LIKE ?", today+"-%").
|
||||
Order("version DESC").
|
||||
Limit(1).
|
||||
Take(&last).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return fmt.Sprintf("%s-%s-001", prefix, today), nil
|
||||
return fmt.Sprintf("%s-001", today), nil
|
||||
}
|
||||
return "", fmt.Errorf("loading latest today's pricelist version: %w", err)
|
||||
}
|
||||
@@ -329,31 +254,7 @@ func (r *PricelistRepository) GenerateVersionBySource(source string) (string, er
|
||||
return "", fmt.Errorf("parsing pricelist sequence %q: %w", parts[len(parts)-1], err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s-%s-%03d", prefix, today, n+1), nil
|
||||
}
|
||||
|
||||
func versionPrefixBySource(source string) string {
|
||||
switch models.NormalizePricelistSource(source) {
|
||||
case models.PricelistSourceWarehouse:
|
||||
return "S"
|
||||
case models.PricelistSourceCompetitor:
|
||||
return "B"
|
||||
default:
|
||||
return "E"
|
||||
}
|
||||
}
|
||||
|
||||
// GetPriceForLotBySource returns item price for a lot from latest active pricelist of source.
|
||||
func (r *PricelistRepository) GetPriceForLotBySource(source, lotName string) (float64, uint, error) {
|
||||
latest, err := r.GetLatestActiveBySource(source)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
price, err := r.GetPriceForLot(latest.ID, lotName)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return price, latest.ID, nil
|
||||
return fmt.Sprintf("%s-%03d", today, n+1), nil
|
||||
}
|
||||
|
||||
// CanWrite checks if the current database user has INSERT permission on qt_pricelists
|
||||
|
||||
@@ -13,13 +13,13 @@ import (
|
||||
func TestGenerateVersion_FirstOfDay(t *testing.T) {
|
||||
repo := newTestPricelistRepository(t)
|
||||
|
||||
version, err := repo.GenerateVersionBySource(string(models.PricelistSourceEstimate))
|
||||
version, err := repo.GenerateVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
||||
t.Fatalf("GenerateVersion returned error: %v", err)
|
||||
}
|
||||
|
||||
today := time.Now().Format("2006-01-02")
|
||||
want := fmt.Sprintf("E-%s-001", today)
|
||||
want := fmt.Sprintf("%s-001", today)
|
||||
if version != want {
|
||||
t.Fatalf("expected %s, got %s", want, version)
|
||||
}
|
||||
@@ -30,8 +30,8 @@ func TestGenerateVersion_UsesMaxSuffixNotCount(t *testing.T) {
|
||||
today := time.Now().Format("2006-01-02")
|
||||
|
||||
seed := []models.Pricelist{
|
||||
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("E-%s-001", today), CreatedBy: "test", IsActive: true},
|
||||
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("E-%s-003", today), CreatedBy: "test", IsActive: true},
|
||||
{Version: fmt.Sprintf("%s-001", today), CreatedBy: "test", IsActive: true},
|
||||
{Version: fmt.Sprintf("%s-003", today), CreatedBy: "test", IsActive: true},
|
||||
}
|
||||
for _, pl := range seed {
|
||||
if err := repo.Create(&pl); err != nil {
|
||||
@@ -39,137 +39,17 @@ func TestGenerateVersion_UsesMaxSuffixNotCount(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
version, err := repo.GenerateVersionBySource(string(models.PricelistSourceEstimate))
|
||||
version, err := repo.GenerateVersion()
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
||||
t.Fatalf("GenerateVersion returned error: %v", err)
|
||||
}
|
||||
|
||||
want := fmt.Sprintf("E-%s-004", today)
|
||||
want := fmt.Sprintf("%s-004", today)
|
||||
if version != want {
|
||||
t.Fatalf("expected %s, got %s", want, version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateVersion_IsolatedBySource(t *testing.T) {
|
||||
repo := newTestPricelistRepository(t)
|
||||
today := time.Now().Format("2006-01-02")
|
||||
|
||||
seed := []models.Pricelist{
|
||||
{Source: string(models.PricelistSourceEstimate), Version: fmt.Sprintf("E-%s-009", today), CreatedBy: "test", IsActive: true},
|
||||
{Source: string(models.PricelistSourceWarehouse), Version: fmt.Sprintf("S-%s-002", today), CreatedBy: "test", IsActive: true},
|
||||
}
|
||||
for _, pl := range seed {
|
||||
if err := repo.Create(&pl); err != nil {
|
||||
t.Fatalf("seed insert failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
version, err := repo.GenerateVersionBySource(string(models.PricelistSourceWarehouse))
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateVersionBySource returned error: %v", err)
|
||||
}
|
||||
|
||||
want := fmt.Sprintf("S-%s-003", today)
|
||||
if version != want {
|
||||
t.Fatalf("expected %s, got %s", want, version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLatestActiveBySource_SkipsPricelistsWithoutItems(t *testing.T) {
|
||||
repo := newTestPricelistRepository(t)
|
||||
db := repo.db
|
||||
ts := time.Now().Add(-time.Minute)
|
||||
source := "test-estimate-skip-empty"
|
||||
|
||||
emptyLatest := models.Pricelist{
|
||||
Source: source,
|
||||
Version: "E-empty",
|
||||
CreatedBy: "test",
|
||||
IsActive: true,
|
||||
CreatedAt: ts.Add(2 * time.Second),
|
||||
}
|
||||
if err := db.Create(&emptyLatest).Error; err != nil {
|
||||
t.Fatalf("create empty pricelist: %v", err)
|
||||
}
|
||||
|
||||
withItems := models.Pricelist{
|
||||
Source: source,
|
||||
Version: "E-with-items",
|
||||
CreatedBy: "test",
|
||||
IsActive: true,
|
||||
CreatedAt: ts,
|
||||
}
|
||||
if err := db.Create(&withItems).Error; err != nil {
|
||||
t.Fatalf("create pricelist with items: %v", err)
|
||||
}
|
||||
if err := db.Create(&models.PricelistItem{
|
||||
PricelistID: withItems.ID,
|
||||
LotName: "CPU_A",
|
||||
Price: 100,
|
||||
}).Error; err != nil {
|
||||
t.Fatalf("create pricelist item: %v", err)
|
||||
}
|
||||
|
||||
got, err := repo.GetLatestActiveBySource(source)
|
||||
if err != nil {
|
||||
t.Fatalf("GetLatestActiveBySource: %v", err)
|
||||
}
|
||||
if got.ID != withItems.ID {
|
||||
t.Fatalf("expected pricelist with items id=%d, got id=%d", withItems.ID, got.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetLatestActiveBySource_TieBreaksByID(t *testing.T) {
|
||||
repo := newTestPricelistRepository(t)
|
||||
db := repo.db
|
||||
ts := time.Now().Add(-time.Minute)
|
||||
source := "test-warehouse-tie-break"
|
||||
|
||||
first := models.Pricelist{
|
||||
Source: source,
|
||||
Version: "S-1",
|
||||
CreatedBy: "test",
|
||||
IsActive: true,
|
||||
CreatedAt: ts,
|
||||
}
|
||||
if err := db.Create(&first).Error; err != nil {
|
||||
t.Fatalf("create first pricelist: %v", err)
|
||||
}
|
||||
if err := db.Create(&models.PricelistItem{
|
||||
PricelistID: first.ID,
|
||||
LotName: "CPU_A",
|
||||
Price: 101,
|
||||
}).Error; err != nil {
|
||||
t.Fatalf("create first item: %v", err)
|
||||
}
|
||||
|
||||
second := models.Pricelist{
|
||||
Source: source,
|
||||
Version: "S-2",
|
||||
CreatedBy: "test",
|
||||
IsActive: true,
|
||||
CreatedAt: ts,
|
||||
}
|
||||
if err := db.Create(&second).Error; err != nil {
|
||||
t.Fatalf("create second pricelist: %v", err)
|
||||
}
|
||||
if err := db.Create(&models.PricelistItem{
|
||||
PricelistID: second.ID,
|
||||
LotName: "CPU_A",
|
||||
Price: 102,
|
||||
}).Error; err != nil {
|
||||
t.Fatalf("create second item: %v", err)
|
||||
}
|
||||
|
||||
got, err := repo.GetLatestActiveBySource(source)
|
||||
if err != nil {
|
||||
t.Fatalf("GetLatestActiveBySource: %v", err)
|
||||
}
|
||||
if got.ID != second.ID {
|
||||
t.Fatalf("expected later inserted pricelist id=%d, got id=%d", second.ID, got.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestPricelistRepository(t *testing.T) *PricelistRepository {
|
||||
t.Helper()
|
||||
|
||||
@@ -177,7 +57,7 @@ func newTestPricelistRepository(t *testing.T) *PricelistRepository {
|
||||
if err != nil {
|
||||
t.Fatalf("open sqlite: %v", err)
|
||||
}
|
||||
if err := db.AutoMigrate(&models.Pricelist{}, &models.PricelistItem{}, &models.Lot{}, &models.StockLog{}); err != nil {
|
||||
if err := db.AutoMigrate(&models.Pricelist{}); err != nil {
|
||||
t.Fatalf("migrate: %v", err)
|
||||
}
|
||||
return NewPricelistRepository(db)
|
||||
|
||||
@@ -3,7 +3,6 @@ package repository
|
||||
import (
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
type ProjectRepository struct {
|
||||
@@ -22,32 +21,6 @@ func (r *ProjectRepository) Update(project *models.Project) error {
|
||||
return r.db.Save(project).Error
|
||||
}
|
||||
|
||||
func (r *ProjectRepository) UpsertByUUID(project *models.Project) error {
|
||||
if err := r.db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "uuid"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{
|
||||
"owner_username",
|
||||
"code",
|
||||
"variant",
|
||||
"name",
|
||||
"tracker_url",
|
||||
"is_active",
|
||||
"is_system",
|
||||
"updated_at",
|
||||
}),
|
||||
}).Create(project).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure caller always gets canonical server ID.
|
||||
var persisted models.Project
|
||||
if err := r.db.Where("uuid = ?", project.UUID).First(&persisted).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
project.ID = persisted.ID
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ProjectRepository) GetByUUID(uuid string) (*models.Project, error) {
|
||||
var project models.Project
|
||||
if err := r.db.Where("uuid = ?", uuid).First(&project).Error; err != nil {
|
||||
|
||||
@@ -83,6 +83,10 @@ func (r *UnifiedRepo) getComponentsOffline(filter ComponentFilter, offset, limit
|
||||
search := "%" + filter.Search + "%"
|
||||
query = query.Where("lot_name LIKE ? OR lot_description LIKE ? OR model LIKE ?", search, search, search)
|
||||
}
|
||||
if filter.HasPrice {
|
||||
query = query.Where("current_price IS NOT NULL AND current_price > 0")
|
||||
}
|
||||
|
||||
var total int64
|
||||
query.Count(&total)
|
||||
|
||||
@@ -92,6 +96,8 @@ func (r *UnifiedRepo) getComponentsOffline(filter ComponentFilter, offset, limit
|
||||
sortDir = "DESC"
|
||||
}
|
||||
switch filter.SortField {
|
||||
case "current_price":
|
||||
query = query.Order("current_price " + sortDir)
|
||||
case "lot_name":
|
||||
query = query.Order("lot_name " + sortDir)
|
||||
default:
|
||||
@@ -106,8 +112,9 @@ func (r *UnifiedRepo) getComponentsOffline(filter ComponentFilter, offset, limit
|
||||
result := make([]models.LotMetadata, len(components))
|
||||
for i, comp := range components {
|
||||
result[i] = models.LotMetadata{
|
||||
LotName: comp.LotName,
|
||||
Model: comp.Model,
|
||||
LotName: comp.LotName,
|
||||
Model: comp.Model,
|
||||
CurrentPrice: comp.CurrentPrice,
|
||||
Lot: &models.Lot{
|
||||
LotName: comp.LotName,
|
||||
LotDescription: comp.LotDescription,
|
||||
@@ -131,8 +138,9 @@ func (r *UnifiedRepo) GetComponent(lotName string) (*models.LotMetadata, error)
|
||||
}
|
||||
|
||||
return &models.LotMetadata{
|
||||
LotName: comp.LotName,
|
||||
Model: comp.Model,
|
||||
LotName: comp.LotName,
|
||||
Model: comp.Model,
|
||||
CurrentPrice: comp.CurrentPrice,
|
||||
Lot: &models.Lot{
|
||||
LotName: comp.LotName,
|
||||
LotDescription: comp.LotDescription,
|
||||
|
||||
62
internal/repository/user.go
Normal file
62
internal/repository/user.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type UserRepository struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewUserRepository(db *gorm.DB) *UserRepository {
|
||||
return &UserRepository{db: db}
|
||||
}
|
||||
|
||||
func (r *UserRepository) Create(user *models.User) error {
|
||||
return r.db.Create(user).Error
|
||||
}
|
||||
|
||||
func (r *UserRepository) GetByID(id uint) (*models.User, error) {
|
||||
var user models.User
|
||||
err := r.db.First(&user, id).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
func (r *UserRepository) GetByUsername(username string) (*models.User, error) {
|
||||
var user models.User
|
||||
err := r.db.Where("username = ?", username).First(&user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
func (r *UserRepository) GetByEmail(email string) (*models.User, error) {
|
||||
var user models.User
|
||||
err := r.db.Where("email = ?", email).First(&user).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &user, nil
|
||||
}
|
||||
|
||||
func (r *UserRepository) Update(user *models.User) error {
|
||||
return r.db.Save(user).Error
|
||||
}
|
||||
|
||||
func (r *UserRepository) Delete(id uint) error {
|
||||
return r.db.Delete(&models.User{}, id).Error
|
||||
}
|
||||
|
||||
func (r *UserRepository) List(offset, limit int) ([]models.User, int64, error) {
|
||||
var users []models.User
|
||||
var total int64
|
||||
|
||||
r.db.Model(&models.User{}).Count(&total)
|
||||
err := r.db.Offset(offset).Limit(limit).Find(&users).Error
|
||||
return users, total, err
|
||||
}
|
||||
199
internal/services/alerts/service.go
Normal file
199
internal/services/alerts/service.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package alerts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
alertRepo *repository.AlertRepository
|
||||
componentRepo *repository.ComponentRepository
|
||||
priceRepo *repository.PriceRepository
|
||||
statsRepo *repository.StatsRepository
|
||||
config config.AlertsConfig
|
||||
pricingConfig config.PricingConfig
|
||||
}
|
||||
|
||||
func NewService(
|
||||
alertRepo *repository.AlertRepository,
|
||||
componentRepo *repository.ComponentRepository,
|
||||
priceRepo *repository.PriceRepository,
|
||||
statsRepo *repository.StatsRepository,
|
||||
alertCfg config.AlertsConfig,
|
||||
pricingCfg config.PricingConfig,
|
||||
) *Service {
|
||||
return &Service{
|
||||
alertRepo: alertRepo,
|
||||
componentRepo: componentRepo,
|
||||
priceRepo: priceRepo,
|
||||
statsRepo: statsRepo,
|
||||
config: alertCfg,
|
||||
pricingConfig: pricingCfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) List(filter repository.AlertFilter, page, perPage int) ([]models.PricingAlert, int64, error) {
|
||||
if page < 1 {
|
||||
page = 1
|
||||
}
|
||||
if perPage < 1 || perPage > 100 {
|
||||
perPage = 20
|
||||
}
|
||||
offset := (page - 1) * perPage
|
||||
|
||||
return s.alertRepo.List(filter, offset, perPage)
|
||||
}
|
||||
|
||||
func (s *Service) Acknowledge(id uint) error {
|
||||
return s.alertRepo.UpdateStatus(id, models.AlertStatusAcknowledged)
|
||||
}
|
||||
|
||||
func (s *Service) Resolve(id uint) error {
|
||||
return s.alertRepo.UpdateStatus(id, models.AlertStatusResolved)
|
||||
}
|
||||
|
||||
func (s *Service) Ignore(id uint) error {
|
||||
return s.alertRepo.UpdateStatus(id, models.AlertStatusIgnored)
|
||||
}
|
||||
|
||||
func (s *Service) GetNewAlertsCount() (int64, error) {
|
||||
return s.alertRepo.CountByStatus(models.AlertStatusNew)
|
||||
}
|
||||
|
||||
// CheckAndGenerateAlerts scans components and creates alerts
|
||||
func (s *Service) CheckAndGenerateAlerts() error {
|
||||
if !s.config.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get top components by usage
|
||||
topComponents, err := s.statsRepo.GetTopComponents(100)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, stats := range topComponents {
|
||||
component, err := s.componentRepo.GetByLotName(stats.LotName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check high demand + stale price
|
||||
if err := s.checkHighDemandStalePrice(component, &stats); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check trending without price
|
||||
if err := s.checkTrendingNoPrice(component, &stats); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check no recent quotes
|
||||
if err := s.checkNoRecentQuotes(component, &stats); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) checkHighDemandStalePrice(comp *models.LotMetadata, stats *models.ComponentUsageStats) error {
|
||||
// high_demand_stale_price: >= 5 quotes/month AND price > 60 days old
|
||||
if stats.QuotesLast30d < s.config.HighDemandThreshold {
|
||||
return nil
|
||||
}
|
||||
|
||||
if comp.PriceUpdatedAt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
daysSinceUpdate := int(time.Since(*comp.PriceUpdatedAt).Hours() / 24)
|
||||
if daysSinceUpdate <= s.pricingConfig.FreshnessYellowDays {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if alert already exists
|
||||
exists, _ := s.alertRepo.ExistsByLotAndType(comp.LotName, models.AlertHighDemandStalePrice)
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
alert := &models.PricingAlert{
|
||||
LotName: comp.LotName,
|
||||
AlertType: models.AlertHighDemandStalePrice,
|
||||
Severity: models.SeverityCritical,
|
||||
Message: fmt.Sprintf("Компонент %s: высокий спрос (%d КП/мес), но цена устарела (%d дней)", comp.LotName, stats.QuotesLast30d, daysSinceUpdate),
|
||||
Details: models.AlertDetails{
|
||||
"quotes_30d": stats.QuotesLast30d,
|
||||
"days_since_update": daysSinceUpdate,
|
||||
},
|
||||
}
|
||||
|
||||
return s.alertRepo.Create(alert)
|
||||
}
|
||||
|
||||
func (s *Service) checkTrendingNoPrice(comp *models.LotMetadata, stats *models.ComponentUsageStats) error {
|
||||
// trending_no_price: trend > 50% AND no price
|
||||
if stats.TrendDirection != models.TrendUp || stats.TrendPercent < float64(s.config.TrendingThresholdPercent) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if comp.CurrentPrice != nil && *comp.CurrentPrice > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
exists, _ := s.alertRepo.ExistsByLotAndType(comp.LotName, models.AlertTrendingNoPrice)
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
alert := &models.PricingAlert{
|
||||
LotName: comp.LotName,
|
||||
AlertType: models.AlertTrendingNoPrice,
|
||||
Severity: models.SeverityHigh,
|
||||
Message: fmt.Sprintf("Компонент %s: рост спроса +%.0f%%, но цена не установлена", comp.LotName, stats.TrendPercent),
|
||||
Details: models.AlertDetails{
|
||||
"trend_percent": stats.TrendPercent,
|
||||
},
|
||||
}
|
||||
|
||||
return s.alertRepo.Create(alert)
|
||||
}
|
||||
|
||||
func (s *Service) checkNoRecentQuotes(comp *models.LotMetadata, stats *models.ComponentUsageStats) error {
|
||||
// no_recent_quotes: popular component, no supplier quotes > 90 days
|
||||
if stats.QuotesLast30d < 3 {
|
||||
return nil
|
||||
}
|
||||
|
||||
quoteCount, err := s.priceRepo.GetQuoteCount(comp.LotName, s.pricingConfig.FreshnessRedDays)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if quoteCount > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
exists, _ := s.alertRepo.ExistsByLotAndType(comp.LotName, models.AlertNoRecentQuotes)
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
alert := &models.PricingAlert{
|
||||
LotName: comp.LotName,
|
||||
AlertType: models.AlertNoRecentQuotes,
|
||||
Severity: models.SeverityMedium,
|
||||
Message: fmt.Sprintf("Компонент %s: популярный (%d КП), но нет новых котировок >%d дней", comp.LotName, stats.QuotesLast30d, s.pricingConfig.FreshnessRedDays),
|
||||
Details: models.AlertDetails{
|
||||
"quotes_30d": stats.QuotesLast30d,
|
||||
"no_quotes_days": s.pricingConfig.FreshnessRedDays,
|
||||
},
|
||||
}
|
||||
|
||||
return s.alertRepo.Create(alert)
|
||||
}
|
||||
180
internal/services/auth.go
Normal file
180
internal/services/auth.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidCredentials = errors.New("invalid username or password")
|
||||
ErrUserNotFound = errors.New("user not found")
|
||||
ErrUserInactive = errors.New("user account is inactive")
|
||||
ErrInvalidToken = errors.New("invalid token")
|
||||
ErrTokenExpired = errors.New("token expired")
|
||||
)
|
||||
|
||||
type AuthService struct {
|
||||
userRepo *repository.UserRepository
|
||||
config config.AuthConfig
|
||||
}
|
||||
|
||||
func NewAuthService(userRepo *repository.UserRepository, cfg config.AuthConfig) *AuthService {
|
||||
return &AuthService{
|
||||
userRepo: userRepo,
|
||||
config: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
type TokenPair struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresAt int64 `json:"expires_at"`
|
||||
}
|
||||
|
||||
type Claims struct {
|
||||
UserID uint `json:"user_id"`
|
||||
Username string `json:"username"`
|
||||
Role models.UserRole `json:"role"`
|
||||
jwt.RegisteredClaims
|
||||
}
|
||||
|
||||
func (s *AuthService) Login(username, password string) (*TokenPair, *models.User, error) {
|
||||
user, err := s.userRepo.GetByUsername(username)
|
||||
if err != nil {
|
||||
return nil, nil, ErrInvalidCredentials
|
||||
}
|
||||
|
||||
if !user.IsActive {
|
||||
return nil, nil, ErrUserInactive
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(user.PasswordHash), []byte(password)); err != nil {
|
||||
return nil, nil, ErrInvalidCredentials
|
||||
}
|
||||
|
||||
tokens, err := s.generateTokenPair(user)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return tokens, user, nil
|
||||
}
|
||||
|
||||
func (s *AuthService) RefreshTokens(refreshToken string) (*TokenPair, error) {
|
||||
claims, err := s.ValidateToken(refreshToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user, err := s.userRepo.GetByID(claims.UserID)
|
||||
if err != nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
|
||||
if !user.IsActive {
|
||||
return nil, ErrUserInactive
|
||||
}
|
||||
|
||||
return s.generateTokenPair(user)
|
||||
}
|
||||
|
||||
func (s *AuthService) ValidateToken(tokenString string) (*Claims, error) {
|
||||
token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) {
|
||||
return []byte(s.config.JWTSecret), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, jwt.ErrTokenExpired) {
|
||||
return nil, ErrTokenExpired
|
||||
}
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
claims, ok := token.Claims.(*Claims)
|
||||
if !ok || !token.Valid {
|
||||
return nil, ErrInvalidToken
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func (s *AuthService) generateTokenPair(user *models.User) (*TokenPair, error) {
|
||||
now := time.Now()
|
||||
accessExpiry := now.Add(s.config.TokenExpiry)
|
||||
refreshExpiry := now.Add(s.config.RefreshExpiry)
|
||||
|
||||
accessClaims := &Claims{
|
||||
UserID: user.ID,
|
||||
Username: user.Username,
|
||||
Role: user.Role,
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
ExpiresAt: jwt.NewNumericDate(accessExpiry),
|
||||
IssuedAt: jwt.NewNumericDate(now),
|
||||
Subject: user.Username,
|
||||
},
|
||||
}
|
||||
|
||||
accessToken := jwt.NewWithClaims(jwt.SigningMethodHS256, accessClaims)
|
||||
accessTokenString, err := accessToken.SignedString([]byte(s.config.JWTSecret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
refreshClaims := &Claims{
|
||||
UserID: user.ID,
|
||||
Username: user.Username,
|
||||
Role: user.Role,
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
ExpiresAt: jwt.NewNumericDate(refreshExpiry),
|
||||
IssuedAt: jwt.NewNumericDate(now),
|
||||
Subject: user.Username,
|
||||
},
|
||||
}
|
||||
|
||||
refreshToken := jwt.NewWithClaims(jwt.SigningMethodHS256, refreshClaims)
|
||||
refreshTokenString, err := refreshToken.SignedString([]byte(s.config.JWTSecret))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TokenPair{
|
||||
AccessToken: accessTokenString,
|
||||
RefreshToken: refreshTokenString,
|
||||
ExpiresAt: accessExpiry.Unix(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *AuthService) HashPassword(password string) (string, error) {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(hash), nil
|
||||
}
|
||||
|
||||
func (s *AuthService) CreateUser(username, email, password string, role models.UserRole) (*models.User, error) {
|
||||
hash, err := s.HashPassword(password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user := &models.User{
|
||||
Username: username,
|
||||
Email: email,
|
||||
PasswordHash: hash,
|
||||
Role: role,
|
||||
IsActive: true,
|
||||
}
|
||||
|
||||
if err := s.userRepo.Create(user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
@@ -53,6 +53,7 @@ type ComponentView struct {
|
||||
Category string `json:"category"`
|
||||
CategoryName string `json:"category_name"`
|
||||
Model string `json:"model"`
|
||||
CurrentPrice *float64 `json:"current_price"`
|
||||
PriceFreshness models.PriceFreshness `json:"price_freshness"`
|
||||
PopularityScore float64 `json:"popularity_score"`
|
||||
Specs models.Specs `json:"specs,omitempty"`
|
||||
@@ -91,6 +92,7 @@ func (s *ComponentService) List(filter repository.ComponentFilter, page, perPage
|
||||
view := ComponentView{
|
||||
LotName: c.LotName,
|
||||
Model: c.Model,
|
||||
CurrentPrice: c.CurrentPrice,
|
||||
PriceFreshness: c.GetPriceFreshness(30, 60, 90, 3),
|
||||
PopularityScore: c.PopularityScore,
|
||||
Specs: c.Specs,
|
||||
@@ -132,6 +134,7 @@ func (s *ComponentService) GetByLotName(lotName string) (*ComponentView, error)
|
||||
view := &ComponentView{
|
||||
LotName: c.LotName,
|
||||
Model: c.Model,
|
||||
CurrentPrice: c.CurrentPrice,
|
||||
PriceFreshness: c.GetPriceFreshness(30, 60, 90, 3),
|
||||
PopularityScore: c.PopularityScore,
|
||||
Specs: c.Specs,
|
||||
|
||||
@@ -45,27 +45,13 @@ func NewConfigurationService(
|
||||
}
|
||||
|
||||
type CreateConfigRequest struct {
|
||||
Name string `json:"name"`
|
||||
Items models.ConfigItems `json:"items"`
|
||||
ProjectUUID *string `json:"project_uuid,omitempty"`
|
||||
CustomPrice *float64 `json:"custom_price"`
|
||||
Notes string `json:"notes"`
|
||||
IsTemplate bool `json:"is_template"`
|
||||
ServerCount int `json:"server_count"`
|
||||
ServerModel string `json:"server_model,omitempty"`
|
||||
SupportCode string `json:"support_code,omitempty"`
|
||||
Article string `json:"article,omitempty"`
|
||||
PricelistID *uint `json:"pricelist_id,omitempty"`
|
||||
WarehousePricelistID *uint `json:"warehouse_pricelist_id,omitempty"`
|
||||
CompetitorPricelistID *uint `json:"competitor_pricelist_id,omitempty"`
|
||||
DisablePriceRefresh bool `json:"disable_price_refresh"`
|
||||
OnlyInStock bool `json:"only_in_stock"`
|
||||
}
|
||||
|
||||
type ArticlePreviewRequest struct {
|
||||
Name string `json:"name"`
|
||||
Items models.ConfigItems `json:"items"`
|
||||
ServerModel string `json:"server_model"`
|
||||
SupportCode string `json:"support_code,omitempty"`
|
||||
ProjectUUID *string `json:"project_uuid,omitempty"`
|
||||
CustomPrice *float64 `json:"custom_price"`
|
||||
Notes string `json:"notes"`
|
||||
IsTemplate bool `json:"is_template"`
|
||||
ServerCount int `json:"server_count"`
|
||||
PricelistID *uint `json:"pricelist_id,omitempty"`
|
||||
}
|
||||
|
||||
@@ -87,24 +73,17 @@ func (s *ConfigurationService) Create(ownerUsername string, req *CreateConfigReq
|
||||
}
|
||||
|
||||
config := &models.Configuration{
|
||||
UUID: uuid.New().String(),
|
||||
OwnerUsername: ownerUsername,
|
||||
ProjectUUID: projectUUID,
|
||||
Name: req.Name,
|
||||
Items: req.Items,
|
||||
TotalPrice: &total,
|
||||
CustomPrice: req.CustomPrice,
|
||||
Notes: req.Notes,
|
||||
IsTemplate: req.IsTemplate,
|
||||
ServerCount: req.ServerCount,
|
||||
ServerModel: req.ServerModel,
|
||||
SupportCode: req.SupportCode,
|
||||
Article: req.Article,
|
||||
PricelistID: pricelistID,
|
||||
WarehousePricelistID: req.WarehousePricelistID,
|
||||
CompetitorPricelistID: req.CompetitorPricelistID,
|
||||
DisablePriceRefresh: req.DisablePriceRefresh,
|
||||
OnlyInStock: req.OnlyInStock,
|
||||
UUID: uuid.New().String(),
|
||||
OwnerUsername: ownerUsername,
|
||||
ProjectUUID: projectUUID,
|
||||
Name: req.Name,
|
||||
Items: req.Items,
|
||||
TotalPrice: &total,
|
||||
CustomPrice: req.CustomPrice,
|
||||
Notes: req.Notes,
|
||||
IsTemplate: req.IsTemplate,
|
||||
ServerCount: req.ServerCount,
|
||||
PricelistID: pricelistID,
|
||||
}
|
||||
|
||||
if err := s.configRepo.Create(config); err != nil {
|
||||
@@ -165,14 +144,7 @@ func (s *ConfigurationService) Update(uuid string, ownerUsername string, req *Cr
|
||||
config.Notes = req.Notes
|
||||
config.IsTemplate = req.IsTemplate
|
||||
config.ServerCount = req.ServerCount
|
||||
config.ServerModel = req.ServerModel
|
||||
config.SupportCode = req.SupportCode
|
||||
config.Article = req.Article
|
||||
config.PricelistID = pricelistID
|
||||
config.WarehousePricelistID = req.WarehousePricelistID
|
||||
config.CompetitorPricelistID = req.CompetitorPricelistID
|
||||
config.DisablePriceRefresh = req.DisablePriceRefresh
|
||||
config.OnlyInStock = req.OnlyInStock
|
||||
|
||||
if err := s.configRepo.Update(config); err != nil {
|
||||
return nil, err
|
||||
@@ -239,24 +211,17 @@ func (s *ConfigurationService) CloneToProject(configUUID string, ownerUsername s
|
||||
}
|
||||
|
||||
clone := &models.Configuration{
|
||||
UUID: uuid.New().String(),
|
||||
OwnerUsername: ownerUsername,
|
||||
ProjectUUID: resolvedProjectUUID,
|
||||
Name: newName,
|
||||
Items: original.Items,
|
||||
TotalPrice: &total,
|
||||
CustomPrice: original.CustomPrice,
|
||||
Notes: original.Notes,
|
||||
IsTemplate: false, // Clone is never a template
|
||||
ServerCount: original.ServerCount,
|
||||
ServerModel: original.ServerModel,
|
||||
SupportCode: original.SupportCode,
|
||||
Article: original.Article,
|
||||
PricelistID: original.PricelistID,
|
||||
WarehousePricelistID: original.WarehousePricelistID,
|
||||
CompetitorPricelistID: original.CompetitorPricelistID,
|
||||
DisablePriceRefresh: original.DisablePriceRefresh,
|
||||
OnlyInStock: original.OnlyInStock,
|
||||
UUID: uuid.New().String(),
|
||||
OwnerUsername: ownerUsername,
|
||||
ProjectUUID: resolvedProjectUUID,
|
||||
Name: newName,
|
||||
Items: original.Items,
|
||||
TotalPrice: &total,
|
||||
CustomPrice: original.CustomPrice,
|
||||
Notes: original.Notes,
|
||||
IsTemplate: false, // Clone is never a template
|
||||
ServerCount: original.ServerCount,
|
||||
PricelistID: original.PricelistID,
|
||||
}
|
||||
|
||||
if err := s.configRepo.Create(clone); err != nil {
|
||||
@@ -329,14 +294,7 @@ func (s *ConfigurationService) UpdateNoAuth(uuid string, req *CreateConfigReques
|
||||
config.Notes = req.Notes
|
||||
config.IsTemplate = req.IsTemplate
|
||||
config.ServerCount = req.ServerCount
|
||||
config.ServerModel = req.ServerModel
|
||||
config.SupportCode = req.SupportCode
|
||||
config.Article = req.Article
|
||||
config.PricelistID = pricelistID
|
||||
config.WarehousePricelistID = req.WarehousePricelistID
|
||||
config.CompetitorPricelistID = req.CompetitorPricelistID
|
||||
config.DisablePriceRefresh = req.DisablePriceRefresh
|
||||
config.OnlyInStock = req.OnlyInStock
|
||||
|
||||
if err := s.configRepo.Update(config); err != nil {
|
||||
return nil, err
|
||||
@@ -404,7 +362,6 @@ func (s *ConfigurationService) CloneNoAuthToProject(configUUID string, newName s
|
||||
IsTemplate: false,
|
||||
ServerCount: original.ServerCount,
|
||||
PricelistID: original.PricelistID,
|
||||
OnlyInStock: original.OnlyInStock,
|
||||
}
|
||||
|
||||
if err := s.configRepo.Create(clone); err != nil {
|
||||
@@ -608,7 +565,13 @@ func (s *ConfigurationService) isOwner(config *models.Configuration, ownerUserna
|
||||
if config == nil || ownerUsername == "" {
|
||||
return false
|
||||
}
|
||||
return config.OwnerUsername == ownerUsername
|
||||
if config.OwnerUsername != "" {
|
||||
return config.OwnerUsername == ownerUsername
|
||||
}
|
||||
if config.User != nil {
|
||||
return config.User.Username == ownerUsername
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// // Export configuration as JSON
|
||||
|
||||
@@ -4,33 +4,33 @@ import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/localdb"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/repository"
|
||||
)
|
||||
|
||||
type ExportService struct {
|
||||
config config.ExportConfig
|
||||
config config.ExportConfig
|
||||
categoryRepo *repository.CategoryRepository
|
||||
localDB *localdb.LocalDB
|
||||
}
|
||||
|
||||
func NewExportService(cfg config.ExportConfig, categoryRepo *repository.CategoryRepository, local *localdb.LocalDB) *ExportService {
|
||||
func NewExportService(cfg config.ExportConfig, categoryRepo *repository.CategoryRepository) *ExportService {
|
||||
return &ExportService{
|
||||
config: cfg,
|
||||
categoryRepo: categoryRepo,
|
||||
localDB: local,
|
||||
}
|
||||
}
|
||||
|
||||
// ExportItem represents a single component in an export block.
|
||||
type ExportData struct {
|
||||
Name string
|
||||
Items []ExportItem
|
||||
Total float64
|
||||
Notes string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
type ExportItem struct {
|
||||
LotName string
|
||||
Description string
|
||||
@@ -40,91 +40,14 @@ type ExportItem struct {
|
||||
TotalPrice float64
|
||||
}
|
||||
|
||||
// ConfigExportBlock represents one configuration (server) in the export.
|
||||
type ConfigExportBlock struct {
|
||||
Article string
|
||||
Line int
|
||||
ServerCount int
|
||||
UnitPrice float64 // sum of component prices for one server
|
||||
Items []ExportItem
|
||||
}
|
||||
|
||||
// ProjectExportData holds all configuration blocks for a project-level export.
|
||||
type ProjectExportData struct {
|
||||
Configs []ConfigExportBlock
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
type ProjectPricingExportOptions struct {
|
||||
IncludeLOT bool `json:"include_lot"`
|
||||
IncludeBOM bool `json:"include_bom"`
|
||||
IncludeEstimate bool `json:"include_estimate"`
|
||||
IncludeStock bool `json:"include_stock"`
|
||||
IncludeCompetitor bool `json:"include_competitor"`
|
||||
Basis string `json:"basis"` // "fob" or "ddp"; empty defaults to "fob"
|
||||
SaleMarkup float64 `json:"sale_markup"` // DDP multiplier; 0 defaults to 1.3
|
||||
}
|
||||
|
||||
func (o ProjectPricingExportOptions) saleMarkupFactor() float64 {
|
||||
if o.SaleMarkup > 0 {
|
||||
return o.SaleMarkup
|
||||
}
|
||||
return 1.3
|
||||
}
|
||||
|
||||
func (o ProjectPricingExportOptions) isDDP() bool {
|
||||
return strings.EqualFold(strings.TrimSpace(o.Basis), "ddp")
|
||||
}
|
||||
|
||||
type ProjectPricingExportData struct {
|
||||
Configs []ProjectPricingExportConfig
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
type ProjectPricingExportConfig struct {
|
||||
Name string
|
||||
Article string
|
||||
Line int
|
||||
ServerCount int
|
||||
Rows []ProjectPricingExportRow
|
||||
}
|
||||
|
||||
type ProjectPricingExportRow struct {
|
||||
LotDisplay string
|
||||
VendorPN string
|
||||
Description string
|
||||
Quantity int
|
||||
BOMTotal *float64
|
||||
Estimate *float64
|
||||
Stock *float64
|
||||
Competitor *float64
|
||||
}
|
||||
|
||||
// ToCSV writes project export data in the new structured CSV format.
|
||||
//
|
||||
// Format:
|
||||
//
|
||||
// Line;Type;p/n;Description;Qty (1 pcs.);Qty (total);Price (1 pcs.);Price (total)
|
||||
// 10;;DL380-ARTICLE;;;10;10470;104 700
|
||||
// ;;MB_INTEL_...;;1;;2074,5;
|
||||
// ...
|
||||
// (empty row)
|
||||
// 20;;DL380-ARTICLE-2;;;2;10470;20 940
|
||||
// ...
|
||||
func (s *ExportService) ToCSV(w io.Writer, data *ProjectExportData) error {
|
||||
// Write UTF-8 BOM for Excel compatibility
|
||||
if _, err := w.Write([]byte{0xEF, 0xBB, 0xBF}); err != nil {
|
||||
return fmt.Errorf("failed to write BOM: %w", err)
|
||||
}
|
||||
|
||||
csvWriter := csv.NewWriter(w)
|
||||
csvWriter.Comma = ';'
|
||||
defer csvWriter.Flush()
|
||||
func (s *ExportService) ToCSV(data *ExportData) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
w := csv.NewWriter(&buf)
|
||||
|
||||
// Header
|
||||
headers := []string{"Line", "Type", "p/n", "Description", "Qty (1 pcs.)", "Qty (total)", "Price (1 pcs.)", "Price (total)"}
|
||||
if err := csvWriter.Write(headers); err != nil {
|
||||
return fmt.Errorf("failed to write header: %w", err)
|
||||
headers := []string{"Артикул", "Описание", "Категория", "Количество", "Цена за единицу", "Сумма"}
|
||||
if err := w.Write(headers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get category hierarchy for sorting
|
||||
@@ -138,766 +61,87 @@ func (s *ExportService) ToCSV(w io.Writer, data *ProjectExportData) error {
|
||||
}
|
||||
}
|
||||
|
||||
for i, block := range data.Configs {
|
||||
lineNo := block.Line
|
||||
if lineNo <= 0 {
|
||||
lineNo = (i + 1) * 10
|
||||
}
|
||||
// Sort items by category display order
|
||||
sortedItems := make([]ExportItem, len(data.Items))
|
||||
copy(sortedItems, data.Items)
|
||||
|
||||
serverCount := block.ServerCount
|
||||
if serverCount < 1 {
|
||||
serverCount = 1
|
||||
}
|
||||
// Sort using category display order (items without category go to the end)
|
||||
for i := 0; i < len(sortedItems)-1; i++ {
|
||||
for j := i + 1; j < len(sortedItems); j++ {
|
||||
orderI, hasI := categoryOrder[sortedItems[i].Category]
|
||||
orderJ, hasJ := categoryOrder[sortedItems[j].Category]
|
||||
|
||||
totalPrice := block.UnitPrice * float64(serverCount)
|
||||
|
||||
// Server summary row
|
||||
serverRow := []string{
|
||||
fmt.Sprintf("%d", lineNo), // Line
|
||||
"", // Type
|
||||
block.Article, // p/n
|
||||
"", // Description
|
||||
"", // Qty (1 pcs.)
|
||||
fmt.Sprintf("%d", serverCount), // Qty (total)
|
||||
formatPriceInt(block.UnitPrice), // Price (1 pcs.)
|
||||
formatPriceWithSpace(totalPrice), // Price (total)
|
||||
}
|
||||
if err := csvWriter.Write(serverRow); err != nil {
|
||||
return fmt.Errorf("failed to write server row: %w", err)
|
||||
}
|
||||
|
||||
// Sort items by category display order
|
||||
sortedItems := make([]ExportItem, len(block.Items))
|
||||
copy(sortedItems, block.Items)
|
||||
sortItemsByCategory(sortedItems, categoryOrder)
|
||||
|
||||
// Component rows
|
||||
for _, item := range sortedItems {
|
||||
componentRow := []string{
|
||||
"", // Line
|
||||
item.Category, // Type
|
||||
item.LotName, // p/n
|
||||
"", // Description
|
||||
fmt.Sprintf("%d", item.Quantity), // Qty (1 pcs.)
|
||||
"", // Qty (total)
|
||||
formatPriceComma(item.UnitPrice), // Price (1 pcs.)
|
||||
"", // Price (total)
|
||||
}
|
||||
if err := csvWriter.Write(componentRow); err != nil {
|
||||
return fmt.Errorf("failed to write component row: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty separator row between blocks (skip after last)
|
||||
if i < len(data.Configs)-1 {
|
||||
if err := csvWriter.Write([]string{"", "", "", "", "", "", "", ""}); err != nil {
|
||||
return fmt.Errorf("failed to write separator row: %w", err)
|
||||
// Items without category go to the end
|
||||
if !hasI && hasJ {
|
||||
sortedItems[i], sortedItems[j] = sortedItems[j], sortedItems[i]
|
||||
} else if hasI && hasJ {
|
||||
// Both have categories, sort by display order
|
||||
if orderI > orderJ {
|
||||
sortedItems[i], sortedItems[j] = sortedItems[j], sortedItems[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
csvWriter.Flush()
|
||||
if err := csvWriter.Error(); err != nil {
|
||||
return fmt.Errorf("csv writer error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToCSVBytes is a backward-compatible wrapper that returns CSV data as bytes.
|
||||
func (s *ExportService) ToCSVBytes(data *ProjectExportData) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
if err := s.ToCSV(&buf, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (s *ExportService) ProjectToPricingExportData(configs []models.Configuration, opts ProjectPricingExportOptions) (*ProjectPricingExportData, error) {
|
||||
sortedConfigs := make([]models.Configuration, len(configs))
|
||||
copy(sortedConfigs, configs)
|
||||
sort.Slice(sortedConfigs, func(i, j int) bool {
|
||||
leftLine := sortedConfigs[i].Line
|
||||
rightLine := sortedConfigs[j].Line
|
||||
|
||||
if leftLine <= 0 {
|
||||
leftLine = int(^uint(0) >> 1)
|
||||
// Items
|
||||
for _, item := range sortedItems {
|
||||
row := []string{
|
||||
item.LotName,
|
||||
item.Description,
|
||||
item.Category,
|
||||
fmt.Sprintf("%d", item.Quantity),
|
||||
fmt.Sprintf("%.2f", item.UnitPrice),
|
||||
fmt.Sprintf("%.2f", item.TotalPrice),
|
||||
}
|
||||
if rightLine <= 0 {
|
||||
rightLine = int(^uint(0) >> 1)
|
||||
}
|
||||
if leftLine != rightLine {
|
||||
return leftLine < rightLine
|
||||
}
|
||||
if !sortedConfigs[i].CreatedAt.Equal(sortedConfigs[j].CreatedAt) {
|
||||
return sortedConfigs[i].CreatedAt.After(sortedConfigs[j].CreatedAt)
|
||||
}
|
||||
return sortedConfigs[i].UUID > sortedConfigs[j].UUID
|
||||
})
|
||||
|
||||
blocks := make([]ProjectPricingExportConfig, 0, len(sortedConfigs))
|
||||
for i := range sortedConfigs {
|
||||
block, err := s.buildPricingExportBlock(&sortedConfigs[i], opts)
|
||||
if err != nil {
|
||||
if err := w.Write(row); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return &ProjectPricingExportData{
|
||||
Configs: blocks,
|
||||
CreatedAt: time.Now(),
|
||||
}, nil
|
||||
// Total row
|
||||
if err := w.Write([]string{"", "", "", "", "ИТОГО:", fmt.Sprintf("%.2f", data.Total)}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
return buf.Bytes(), w.Error()
|
||||
}
|
||||
|
||||
func (s *ExportService) ToPricingCSV(w io.Writer, data *ProjectPricingExportData, opts ProjectPricingExportOptions) error {
|
||||
if _, err := w.Write([]byte{0xEF, 0xBB, 0xBF}); err != nil {
|
||||
return fmt.Errorf("failed to write BOM: %w", err)
|
||||
}
|
||||
func (s *ExportService) ConfigToExportData(config *models.Configuration, componentService *ComponentService) *ExportData {
|
||||
items := make([]ExportItem, len(config.Items))
|
||||
var total float64
|
||||
|
||||
csvWriter := csv.NewWriter(w)
|
||||
csvWriter.Comma = ';'
|
||||
defer csvWriter.Flush()
|
||||
|
||||
headers := pricingCSVHeaders(opts)
|
||||
if err := csvWriter.Write(headers); err != nil {
|
||||
return fmt.Errorf("failed to write pricing header: %w", err)
|
||||
}
|
||||
|
||||
writeRows := opts.IncludeLOT || opts.IncludeBOM
|
||||
for _, cfg := range data.Configs {
|
||||
if err := csvWriter.Write(pricingConfigSummaryRow(cfg, opts)); err != nil {
|
||||
return fmt.Errorf("failed to write config summary row: %w", err)
|
||||
}
|
||||
if writeRows {
|
||||
for _, row := range cfg.Rows {
|
||||
if err := csvWriter.Write(pricingCSVRow(row, opts)); err != nil {
|
||||
return fmt.Errorf("failed to write pricing row: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
csvWriter.Flush()
|
||||
if err := csvWriter.Error(); err != nil {
|
||||
return fmt.Errorf("csv writer error: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigToExportData converts a single configuration into ProjectExportData.
|
||||
func (s *ExportService) ConfigToExportData(cfg *models.Configuration) *ProjectExportData {
|
||||
block := s.buildExportBlock(cfg)
|
||||
return &ProjectExportData{
|
||||
Configs: []ConfigExportBlock{block},
|
||||
CreatedAt: cfg.CreatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectToExportData converts multiple configurations into ProjectExportData.
|
||||
func (s *ExportService) ProjectToExportData(configs []models.Configuration) *ProjectExportData {
|
||||
sortedConfigs := make([]models.Configuration, len(configs))
|
||||
copy(sortedConfigs, configs)
|
||||
sort.Slice(sortedConfigs, func(i, j int) bool {
|
||||
leftLine := sortedConfigs[i].Line
|
||||
rightLine := sortedConfigs[j].Line
|
||||
|
||||
if leftLine <= 0 {
|
||||
leftLine = int(^uint(0) >> 1)
|
||||
}
|
||||
if rightLine <= 0 {
|
||||
rightLine = int(^uint(0) >> 1)
|
||||
}
|
||||
if leftLine != rightLine {
|
||||
return leftLine < rightLine
|
||||
}
|
||||
if !sortedConfigs[i].CreatedAt.Equal(sortedConfigs[j].CreatedAt) {
|
||||
return sortedConfigs[i].CreatedAt.After(sortedConfigs[j].CreatedAt)
|
||||
}
|
||||
return sortedConfigs[i].UUID > sortedConfigs[j].UUID
|
||||
})
|
||||
|
||||
blocks := make([]ConfigExportBlock, 0, len(configs))
|
||||
for i := range sortedConfigs {
|
||||
blocks = append(blocks, s.buildExportBlock(&sortedConfigs[i]))
|
||||
}
|
||||
return &ProjectExportData{
|
||||
Configs: blocks,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ExportService) buildExportBlock(cfg *models.Configuration) ConfigExportBlock {
|
||||
// Batch-fetch categories from local data (pricelist items → local_components fallback)
|
||||
lotNames := make([]string, len(cfg.Items))
|
||||
for i, item := range cfg.Items {
|
||||
lotNames[i] = item.LotName
|
||||
}
|
||||
categories := s.resolveCategories(cfg.PricelistID, lotNames)
|
||||
|
||||
items := make([]ExportItem, len(cfg.Items))
|
||||
var unitTotal float64
|
||||
|
||||
for i, item := range cfg.Items {
|
||||
for i, item := range config.Items {
|
||||
itemTotal := item.UnitPrice * float64(item.Quantity)
|
||||
items[i] = ExportItem{
|
||||
LotName: item.LotName,
|
||||
Category: categories[item.LotName],
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: itemTotal,
|
||||
}
|
||||
unitTotal += itemTotal
|
||||
}
|
||||
|
||||
serverCount := cfg.ServerCount
|
||||
if serverCount < 1 {
|
||||
serverCount = 1
|
||||
}
|
||||
|
||||
return ConfigExportBlock{
|
||||
Article: cfg.Article,
|
||||
Line: cfg.Line,
|
||||
ServerCount: serverCount,
|
||||
UnitPrice: unitTotal,
|
||||
Items: items,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ExportService) buildPricingExportBlock(cfg *models.Configuration, opts ProjectPricingExportOptions) (ProjectPricingExportConfig, error) {
|
||||
block := ProjectPricingExportConfig{
|
||||
Name: cfg.Name,
|
||||
Article: cfg.Article,
|
||||
Line: cfg.Line,
|
||||
ServerCount: exportPositiveInt(cfg.ServerCount, 1),
|
||||
Rows: make([]ProjectPricingExportRow, 0),
|
||||
}
|
||||
if s.localDB == nil {
|
||||
for _, item := range cfg.Items {
|
||||
block.Rows = append(block.Rows, ProjectPricingExportRow{
|
||||
LotDisplay: item.LotName,
|
||||
VendorPN: "—",
|
||||
Quantity: item.Quantity,
|
||||
Estimate: floatPtr(item.UnitPrice * float64(item.Quantity)),
|
||||
})
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
localCfg, err := s.localDB.GetConfigurationByUUID(cfg.UUID)
|
||||
if err != nil {
|
||||
localCfg = nil
|
||||
}
|
||||
|
||||
priceMap := s.resolvePricingTotals(cfg, localCfg, opts)
|
||||
componentDescriptions := s.resolveLotDescriptions(cfg, localCfg)
|
||||
if opts.IncludeBOM && localCfg != nil && len(localCfg.VendorSpec) > 0 {
|
||||
coveredLots := make(map[string]struct{})
|
||||
for _, row := range localCfg.VendorSpec {
|
||||
rowMappings := normalizeLotMappings(row.LotMappings)
|
||||
for _, mapping := range rowMappings {
|
||||
coveredLots[mapping.LotName] = struct{}{}
|
||||
}
|
||||
|
||||
description := strings.TrimSpace(row.Description)
|
||||
if description == "" && len(rowMappings) > 0 {
|
||||
description = componentDescriptions[rowMappings[0].LotName]
|
||||
}
|
||||
|
||||
pricingRow := ProjectPricingExportRow{
|
||||
LotDisplay: formatLotDisplay(rowMappings),
|
||||
VendorPN: row.VendorPartnumber,
|
||||
Description: description,
|
||||
Quantity: exportPositiveInt(row.Quantity, 1),
|
||||
BOMTotal: vendorRowTotal(row),
|
||||
Estimate: computeMappingTotal(priceMap, rowMappings, row.Quantity, func(p pricingLevels) *float64 { return p.Estimate }),
|
||||
Stock: computeMappingTotal(priceMap, rowMappings, row.Quantity, func(p pricingLevels) *float64 { return p.Stock }),
|
||||
Competitor: computeMappingTotal(priceMap, rowMappings, row.Quantity, func(p pricingLevels) *float64 { return p.Competitor }),
|
||||
}
|
||||
block.Rows = append(block.Rows, pricingRow)
|
||||
}
|
||||
|
||||
for _, item := range cfg.Items {
|
||||
if item.LotName == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := coveredLots[item.LotName]; ok {
|
||||
continue
|
||||
}
|
||||
estimate := estimateOnlyTotal(priceMap[item.LotName].Estimate, item.UnitPrice, item.Quantity)
|
||||
block.Rows = append(block.Rows, ProjectPricingExportRow{
|
||||
LotDisplay: item.LotName,
|
||||
VendorPN: "—",
|
||||
Description: componentDescriptions[item.LotName],
|
||||
Quantity: exportPositiveInt(item.Quantity, 1),
|
||||
Estimate: estimate,
|
||||
Stock: totalForUnitPrice(priceMap[item.LotName].Stock, item.Quantity),
|
||||
Competitor: totalForUnitPrice(priceMap[item.LotName].Competitor, item.Quantity),
|
||||
})
|
||||
}
|
||||
if opts.isDDP() {
|
||||
applyDDPMarkup(block.Rows, opts.saleMarkupFactor())
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
for _, item := range cfg.Items {
|
||||
if item.LotName == "" {
|
||||
continue
|
||||
}
|
||||
estimate := estimateOnlyTotal(priceMap[item.LotName].Estimate, item.UnitPrice, item.Quantity)
|
||||
block.Rows = append(block.Rows, ProjectPricingExportRow{
|
||||
LotDisplay: item.LotName,
|
||||
VendorPN: "—",
|
||||
Description: componentDescriptions[item.LotName],
|
||||
Quantity: exportPositiveInt(item.Quantity, 1),
|
||||
Estimate: estimate,
|
||||
Stock: totalForUnitPrice(priceMap[item.LotName].Stock, item.Quantity),
|
||||
Competitor: totalForUnitPrice(priceMap[item.LotName].Competitor, item.Quantity),
|
||||
})
|
||||
}
|
||||
|
||||
if opts.isDDP() {
|
||||
applyDDPMarkup(block.Rows, opts.saleMarkupFactor())
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func applyDDPMarkup(rows []ProjectPricingExportRow, factor float64) {
|
||||
for i := range rows {
|
||||
rows[i].Estimate = scaleFloatPtr(rows[i].Estimate, factor)
|
||||
rows[i].Stock = scaleFloatPtr(rows[i].Stock, factor)
|
||||
rows[i].Competitor = scaleFloatPtr(rows[i].Competitor, factor)
|
||||
}
|
||||
}
|
||||
|
||||
func scaleFloatPtr(v *float64, factor float64) *float64 {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
result := *v * factor
|
||||
return &result
|
||||
}
|
||||
|
||||
// resolveCategories returns lot_name → category map.
|
||||
// Primary source: pricelist items (lot_category). Fallback: local_components table.
|
||||
func (s *ExportService) resolveCategories(pricelistID *uint, lotNames []string) map[string]string {
|
||||
if len(lotNames) == 0 || s.localDB == nil {
|
||||
return map[string]string{}
|
||||
}
|
||||
|
||||
categories := make(map[string]string, len(lotNames))
|
||||
|
||||
// Primary: pricelist items
|
||||
if pricelistID != nil && *pricelistID > 0 {
|
||||
if cats, err := s.localDB.GetLocalLotCategoriesByServerPricelistID(*pricelistID, lotNames); err == nil {
|
||||
for lot, cat := range cats {
|
||||
if strings.TrimSpace(cat) != "" {
|
||||
categories[lot] = cat
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: local_components for any still missing
|
||||
var missing []string
|
||||
for _, lot := range lotNames {
|
||||
if categories[lot] == "" {
|
||||
missing = append(missing, lot)
|
||||
}
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
if fallback, err := s.localDB.GetLocalComponentCategoriesByLotNames(missing); err == nil {
|
||||
for lot, cat := range fallback {
|
||||
if strings.TrimSpace(cat) != "" {
|
||||
categories[lot] = cat
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return categories
|
||||
}
|
||||
|
||||
// sortItemsByCategory sorts items by category display order (items without category go to the end).
|
||||
func sortItemsByCategory(items []ExportItem, categoryOrder map[string]int) {
|
||||
for i := 0; i < len(items)-1; i++ {
|
||||
for j := i + 1; j < len(items); j++ {
|
||||
orderI, hasI := categoryOrder[items[i].Category]
|
||||
orderJ, hasJ := categoryOrder[items[j].Category]
|
||||
|
||||
if !hasI && hasJ {
|
||||
items[i], items[j] = items[j], items[i]
|
||||
} else if hasI && hasJ && orderI > orderJ {
|
||||
items[i], items[j] = items[j], items[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type pricingLevels struct {
|
||||
Estimate *float64
|
||||
Stock *float64
|
||||
Competitor *float64
|
||||
}
|
||||
|
||||
func (s *ExportService) resolvePricingTotals(cfg *models.Configuration, localCfg *localdb.LocalConfiguration, opts ProjectPricingExportOptions) map[string]pricingLevels {
|
||||
result := map[string]pricingLevels{}
|
||||
lots := collectPricingLots(cfg, localCfg, opts.IncludeBOM)
|
||||
if len(lots) == 0 || s.localDB == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
estimateID := cfg.PricelistID
|
||||
if estimateID == nil || *estimateID == 0 {
|
||||
if latest, err := s.localDB.GetLatestLocalPricelistBySource("estimate"); err == nil && latest != nil {
|
||||
estimateID = &latest.ServerID
|
||||
}
|
||||
}
|
||||
|
||||
var warehouseID *uint
|
||||
var competitorID *uint
|
||||
if localCfg != nil {
|
||||
warehouseID = localCfg.WarehousePricelistID
|
||||
competitorID = localCfg.CompetitorPricelistID
|
||||
}
|
||||
if warehouseID == nil || *warehouseID == 0 {
|
||||
if latest, err := s.localDB.GetLatestLocalPricelistBySource("warehouse"); err == nil && latest != nil {
|
||||
warehouseID = &latest.ServerID
|
||||
}
|
||||
}
|
||||
if competitorID == nil || *competitorID == 0 {
|
||||
if latest, err := s.localDB.GetLatestLocalPricelistBySource("competitor"); err == nil && latest != nil {
|
||||
competitorID = &latest.ServerID
|
||||
}
|
||||
}
|
||||
|
||||
for _, lot := range lots {
|
||||
level := pricingLevels{}
|
||||
level.Estimate = s.lookupPricePointer(estimateID, lot)
|
||||
level.Stock = s.lookupPricePointer(warehouseID, lot)
|
||||
level.Competitor = s.lookupPricePointer(competitorID, lot)
|
||||
result[lot] = level
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *ExportService) lookupPricePointer(serverPricelistID *uint, lotName string) *float64 {
|
||||
if s.localDB == nil || serverPricelistID == nil || *serverPricelistID == 0 || strings.TrimSpace(lotName) == "" {
|
||||
return nil
|
||||
}
|
||||
localPL, err := s.localDB.GetLocalPricelistByServerID(*serverPricelistID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
price, err := s.localDB.GetLocalPriceForLot(localPL.ID, lotName)
|
||||
if err != nil || price <= 0 {
|
||||
return nil
|
||||
}
|
||||
return floatPtr(price)
|
||||
}
|
||||
|
||||
func (s *ExportService) resolveLotDescriptions(cfg *models.Configuration, localCfg *localdb.LocalConfiguration) map[string]string {
|
||||
lots := collectPricingLots(cfg, localCfg, true)
|
||||
result := make(map[string]string, len(lots))
|
||||
if s.localDB == nil {
|
||||
return result
|
||||
}
|
||||
for _, lot := range lots {
|
||||
component, err := s.localDB.GetLocalComponent(lot)
|
||||
// Получаем информацию о компоненте для заполнения категории
|
||||
componentView, err := componentService.GetByLotName(item.LotName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result[lot] = component.LotDescription
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func collectPricingLots(cfg *models.Configuration, localCfg *localdb.LocalConfiguration, includeBOM bool) []string {
|
||||
seen := map[string]struct{}{}
|
||||
out := make([]string, 0)
|
||||
if includeBOM && localCfg != nil {
|
||||
for _, row := range localCfg.VendorSpec {
|
||||
for _, mapping := range normalizeLotMappings(row.LotMappings) {
|
||||
if _, ok := seen[mapping.LotName]; ok {
|
||||
continue
|
||||
}
|
||||
seen[mapping.LotName] = struct{}{}
|
||||
out = append(out, mapping.LotName)
|
||||
// Если не удалось получить информацию о компоненте, используем только основные данные
|
||||
items[i] = ExportItem{
|
||||
LotName: item.LotName,
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: itemTotal,
|
||||
}
|
||||
} else {
|
||||
items[i] = ExportItem{
|
||||
LotName: item.LotName,
|
||||
Description: componentView.Description,
|
||||
Category: componentView.Category,
|
||||
Quantity: item.Quantity,
|
||||
UnitPrice: item.UnitPrice,
|
||||
TotalPrice: itemTotal,
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, item := range cfg.Items {
|
||||
lot := strings.TrimSpace(item.LotName)
|
||||
if lot == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[lot]; ok {
|
||||
continue
|
||||
}
|
||||
seen[lot] = struct{}{}
|
||||
out = append(out, lot)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func normalizeLotMappings(mappings []localdb.VendorSpecLotMapping) []localdb.VendorSpecLotMapping {
|
||||
if len(mappings) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]localdb.VendorSpecLotMapping, 0, len(mappings))
|
||||
for _, mapping := range mappings {
|
||||
lot := strings.TrimSpace(mapping.LotName)
|
||||
if lot == "" {
|
||||
continue
|
||||
}
|
||||
qty := mapping.QuantityPerPN
|
||||
if qty < 1 {
|
||||
qty = 1
|
||||
}
|
||||
out = append(out, localdb.VendorSpecLotMapping{
|
||||
LotName: lot,
|
||||
QuantityPerPN: qty,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func vendorRowTotal(row localdb.VendorSpecItem) *float64 {
|
||||
if row.TotalPrice != nil {
|
||||
return floatPtr(*row.TotalPrice)
|
||||
}
|
||||
if row.UnitPrice == nil {
|
||||
return nil
|
||||
}
|
||||
return floatPtr(*row.UnitPrice * float64(exportPositiveInt(row.Quantity, 1)))
|
||||
}
|
||||
|
||||
func computeMappingTotal(priceMap map[string]pricingLevels, mappings []localdb.VendorSpecLotMapping, pnQty int, selector func(pricingLevels) *float64) *float64 {
|
||||
if len(mappings) == 0 {
|
||||
return nil
|
||||
}
|
||||
total := 0.0
|
||||
hasValue := false
|
||||
qty := exportPositiveInt(pnQty, 1)
|
||||
for _, mapping := range mappings {
|
||||
price := selector(priceMap[mapping.LotName])
|
||||
if price == nil || *price <= 0 {
|
||||
continue
|
||||
}
|
||||
total += *price * float64(qty*mapping.QuantityPerPN)
|
||||
hasValue = true
|
||||
}
|
||||
if !hasValue {
|
||||
return nil
|
||||
}
|
||||
return floatPtr(total)
|
||||
}
|
||||
|
||||
func totalForUnitPrice(unitPrice *float64, quantity int) *float64 {
|
||||
if unitPrice == nil || *unitPrice <= 0 {
|
||||
return nil
|
||||
}
|
||||
total := *unitPrice * float64(exportPositiveInt(quantity, 1))
|
||||
return &total
|
||||
}
|
||||
|
||||
func estimateOnlyTotal(estimatePrice *float64, fallbackUnitPrice float64, quantity int) *float64 {
|
||||
if estimatePrice != nil && *estimatePrice > 0 {
|
||||
return totalForUnitPrice(estimatePrice, quantity)
|
||||
}
|
||||
if fallbackUnitPrice <= 0 {
|
||||
return nil
|
||||
}
|
||||
total := fallbackUnitPrice * float64(maxInt(quantity, 1))
|
||||
return &total
|
||||
}
|
||||
|
||||
func pricingCSVHeaders(opts ProjectPricingExportOptions) []string {
|
||||
headers := make([]string, 0, 8)
|
||||
headers = append(headers, "Line Item")
|
||||
if opts.IncludeLOT {
|
||||
headers = append(headers, "LOT")
|
||||
}
|
||||
headers = append(headers, "PN вендора", "Описание", "Кол-во")
|
||||
if opts.IncludeBOM {
|
||||
headers = append(headers, "BOM")
|
||||
}
|
||||
if opts.IncludeEstimate {
|
||||
headers = append(headers, "Estimate")
|
||||
}
|
||||
if opts.IncludeStock {
|
||||
headers = append(headers, "Stock")
|
||||
}
|
||||
if opts.IncludeCompetitor {
|
||||
headers = append(headers, "Конкуренты")
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
func pricingCSVRow(row ProjectPricingExportRow, opts ProjectPricingExportOptions) []string {
|
||||
record := make([]string, 0, 8)
|
||||
record = append(record, "")
|
||||
if opts.IncludeLOT {
|
||||
record = append(record, emptyDash(row.LotDisplay))
|
||||
}
|
||||
record = append(record,
|
||||
emptyDash(row.VendorPN),
|
||||
emptyDash(row.Description),
|
||||
fmt.Sprintf("%d", exportPositiveInt(row.Quantity, 1)),
|
||||
)
|
||||
if opts.IncludeBOM {
|
||||
record = append(record, formatMoneyValue(row.BOMTotal))
|
||||
}
|
||||
if opts.IncludeEstimate {
|
||||
record = append(record, formatMoneyValue(row.Estimate))
|
||||
}
|
||||
if opts.IncludeStock {
|
||||
record = append(record, formatMoneyValue(row.Stock))
|
||||
}
|
||||
if opts.IncludeCompetitor {
|
||||
record = append(record, formatMoneyValue(row.Competitor))
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
func pricingConfigSummaryRow(cfg ProjectPricingExportConfig, opts ProjectPricingExportOptions) []string {
|
||||
record := make([]string, 0, 8)
|
||||
record = append(record, fmt.Sprintf("%d", cfg.Line))
|
||||
if opts.IncludeLOT {
|
||||
record = append(record, "")
|
||||
}
|
||||
record = append(record,
|
||||
emptyDash(cfg.Article),
|
||||
emptyDash(cfg.Name),
|
||||
fmt.Sprintf("%d", exportPositiveInt(cfg.ServerCount, 1)),
|
||||
)
|
||||
if opts.IncludeBOM {
|
||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.BOMTotal })))
|
||||
}
|
||||
if opts.IncludeEstimate {
|
||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.Estimate })))
|
||||
}
|
||||
if opts.IncludeStock {
|
||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.Stock })))
|
||||
}
|
||||
if opts.IncludeCompetitor {
|
||||
record = append(record, formatMoneyValue(sumPricingColumn(cfg.Rows, func(row ProjectPricingExportRow) *float64 { return row.Competitor })))
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
func formatLotDisplay(mappings []localdb.VendorSpecLotMapping) string {
|
||||
switch len(mappings) {
|
||||
case 0:
|
||||
return "н/д"
|
||||
case 1:
|
||||
return mappings[0].LotName
|
||||
default:
|
||||
return fmt.Sprintf("%s +%d", mappings[0].LotName, len(mappings)-1)
|
||||
}
|
||||
}
|
||||
|
||||
func formatMoneyValue(value *float64) string {
|
||||
if value == nil {
|
||||
return "—"
|
||||
}
|
||||
n := math.Round(*value*100) / 100
|
||||
sign := ""
|
||||
if n < 0 {
|
||||
sign = "-"
|
||||
n = -n
|
||||
}
|
||||
whole := int64(n)
|
||||
fraction := int(math.Round((n - float64(whole)) * 100))
|
||||
if fraction == 100 {
|
||||
whole++
|
||||
fraction = 0
|
||||
}
|
||||
return fmt.Sprintf("%s%s,%02d", sign, formatIntWithSpace(whole), fraction)
|
||||
}
|
||||
|
||||
func emptyDash(value string) string {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return "—"
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func sumPricingColumn(rows []ProjectPricingExportRow, selector func(ProjectPricingExportRow) *float64) *float64 {
|
||||
total := 0.0
|
||||
hasValue := false
|
||||
for _, row := range rows {
|
||||
value := selector(row)
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
total += *value
|
||||
hasValue = true
|
||||
}
|
||||
if !hasValue {
|
||||
return nil
|
||||
}
|
||||
return floatPtr(total)
|
||||
}
|
||||
|
||||
func floatPtr(value float64) *float64 {
|
||||
v := value
|
||||
return &v
|
||||
}
|
||||
|
||||
func exportPositiveInt(value, fallback int) int {
|
||||
if value < 1 {
|
||||
return fallback
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// formatPriceComma formats a price with comma as decimal separator (e.g., "2074,5").
|
||||
// Trailing zeros after the comma are trimmed, and if the value is an integer, no comma is shown.
|
||||
func formatPriceComma(value float64) string {
|
||||
if value == math.Trunc(value) {
|
||||
return fmt.Sprintf("%.0f", value)
|
||||
}
|
||||
s := fmt.Sprintf("%.2f", value)
|
||||
s = strings.ReplaceAll(s, ".", ",")
|
||||
// Trim trailing zero: "2074,50" -> "2074,5"
|
||||
s = strings.TrimRight(s, "0")
|
||||
s = strings.TrimRight(s, ",")
|
||||
return s
|
||||
}
|
||||
|
||||
// formatPriceInt formats price as integer (rounded), no decimal.
|
||||
func formatPriceInt(value float64) string {
|
||||
return fmt.Sprintf("%.0f", math.Round(value))
|
||||
}
|
||||
|
||||
// formatPriceWithSpace formats a price as an integer with space as thousands separator (e.g., "104 700").
|
||||
func formatPriceWithSpace(value float64) string {
|
||||
intVal := int64(math.Round(value))
|
||||
if intVal < 0 {
|
||||
return "-" + formatIntWithSpace(-intVal)
|
||||
}
|
||||
return formatIntWithSpace(intVal)
|
||||
}
|
||||
|
||||
func formatIntWithSpace(n int64) string {
|
||||
s := fmt.Sprintf("%d", n)
|
||||
if len(s) <= 3 {
|
||||
return s
|
||||
total += itemTotal
|
||||
}
|
||||
|
||||
var result strings.Builder
|
||||
remainder := len(s) % 3
|
||||
if remainder > 0 {
|
||||
result.WriteString(s[:remainder])
|
||||
return &ExportData{
|
||||
Name: config.Name,
|
||||
Items: items,
|
||||
Total: total,
|
||||
Notes: config.Notes,
|
||||
CreatedAt: config.CreatedAt,
|
||||
}
|
||||
for i := remainder; i < len(s); i += 3 {
|
||||
if result.Len() > 0 {
|
||||
result.WriteByte(' ')
|
||||
}
|
||||
result.WriteString(s[i : i+3])
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
|
||||
@@ -1,563 +0,0 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/quoteforge/internal/config"
|
||||
"git.mchus.pro/mchus/quoteforge/internal/models"
|
||||
)
|
||||
|
||||
func newTestProjectData(items []ExportItem, article string, serverCount int) *ProjectExportData {
|
||||
var unitTotal float64
|
||||
for _, item := range items {
|
||||
unitTotal += item.UnitPrice * float64(item.Quantity)
|
||||
}
|
||||
if serverCount < 1 {
|
||||
serverCount = 1
|
||||
}
|
||||
return &ProjectExportData{
|
||||
Configs: []ConfigExportBlock{
|
||||
{
|
||||
Article: article,
|
||||
ServerCount: serverCount,
|
||||
UnitPrice: unitTotal,
|
||||
Items: items,
|
||||
},
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_UTF8BOM(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := newTestProjectData([]ExportItem{
|
||||
{
|
||||
LotName: "LOT-001",
|
||||
Category: "CAT",
|
||||
Quantity: 1,
|
||||
UnitPrice: 100.0,
|
||||
TotalPrice: 100.0,
|
||||
},
|
||||
}, "TEST-ARTICLE", 1)
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToCSV(&buf, data); err != nil {
|
||||
t.Fatalf("ToCSV failed: %v", err)
|
||||
}
|
||||
|
||||
csvBytes := buf.Bytes()
|
||||
if len(csvBytes) < 3 {
|
||||
t.Fatalf("CSV too short to contain BOM")
|
||||
}
|
||||
|
||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
||||
actualBOM := csvBytes[:3]
|
||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
||||
t.Errorf("UTF-8 BOM mismatch. Expected %v, got %v", expectedBOM, actualBOM)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_SemicolonDelimiter(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := newTestProjectData([]ExportItem{
|
||||
{
|
||||
LotName: "LOT-001",
|
||||
Category: "CAT",
|
||||
Quantity: 2,
|
||||
UnitPrice: 100.50,
|
||||
TotalPrice: 201.00,
|
||||
},
|
||||
}, "TEST-ARTICLE", 1)
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToCSV(&buf, data); err != nil {
|
||||
t.Fatalf("ToCSV failed: %v", err)
|
||||
}
|
||||
|
||||
csvBytes := buf.Bytes()
|
||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
||||
reader.Comma = ';'
|
||||
|
||||
// Read header
|
||||
header, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read header: %v", err)
|
||||
}
|
||||
|
||||
if len(header) != 8 {
|
||||
t.Errorf("Expected 8 columns, got %d", len(header))
|
||||
}
|
||||
|
||||
expectedHeader := []string{"Line", "Type", "p/n", "Description", "Qty (1 pcs.)", "Qty (total)", "Price (1 pcs.)", "Price (total)"}
|
||||
for i, col := range expectedHeader {
|
||||
if i < len(header) && header[i] != col {
|
||||
t.Errorf("Column %d: expected %q, got %q", i, col, header[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Read server row
|
||||
serverRow, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read server row: %v", err)
|
||||
}
|
||||
if serverRow[0] != "10" {
|
||||
t.Errorf("Expected line number 10, got %s", serverRow[0])
|
||||
}
|
||||
if serverRow[2] != "TEST-ARTICLE" {
|
||||
t.Errorf("Expected article TEST-ARTICLE, got %s", serverRow[2])
|
||||
}
|
||||
|
||||
// Read component row
|
||||
itemRow, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read item row: %v", err)
|
||||
}
|
||||
if itemRow[2] != "LOT-001" {
|
||||
t.Errorf("Lot name mismatch: expected LOT-001, got %s", itemRow[2])
|
||||
}
|
||||
if itemRow[4] != "2" {
|
||||
t.Errorf("Quantity mismatch: expected 2, got %s", itemRow[4])
|
||||
}
|
||||
if itemRow[6] != "100,5" {
|
||||
t.Errorf("Unit price mismatch: expected 100,5, got %s", itemRow[6])
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_ServerRow(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := newTestProjectData([]ExportItem{
|
||||
{LotName: "LOT-001", Category: "CAT", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
||||
{LotName: "LOT-002", Category: "CAT", Quantity: 2, UnitPrice: 50.0, TotalPrice: 100.0},
|
||||
}, "DL380-ART", 10)
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToCSV(&buf, data); err != nil {
|
||||
t.Fatalf("ToCSV failed: %v", err)
|
||||
}
|
||||
|
||||
csvBytes := buf.Bytes()
|
||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
||||
reader.Comma = ';'
|
||||
|
||||
// Skip header
|
||||
reader.Read()
|
||||
|
||||
// Read server row
|
||||
serverRow, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read server row: %v", err)
|
||||
}
|
||||
|
||||
if serverRow[0] != "10" {
|
||||
t.Errorf("Expected line 10, got %s", serverRow[0])
|
||||
}
|
||||
if serverRow[2] != "DL380-ART" {
|
||||
t.Errorf("Expected article DL380-ART, got %s", serverRow[2])
|
||||
}
|
||||
if serverRow[5] != "10" {
|
||||
t.Errorf("Expected server count 10, got %s", serverRow[5])
|
||||
}
|
||||
// UnitPrice = 100 + 100 = 200
|
||||
if serverRow[6] != "200" {
|
||||
t.Errorf("Expected unit price 200, got %s", serverRow[6])
|
||||
}
|
||||
// TotalPrice = 200 * 10 = 2000
|
||||
if serverRow[7] != "2 000" {
|
||||
t.Errorf("Expected total price '2 000', got %q", serverRow[7])
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_CategorySorting(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := newTestProjectData([]ExportItem{
|
||||
{LotName: "LOT-001", Category: "CAT-A", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
||||
{LotName: "LOT-002", Category: "CAT-C", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
||||
{LotName: "LOT-003", Category: "CAT-B", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
||||
}, "ART", 1)
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToCSV(&buf, data); err != nil {
|
||||
t.Fatalf("ToCSV failed: %v", err)
|
||||
}
|
||||
|
||||
csvBytes := buf.Bytes()
|
||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
||||
reader.Comma = ';'
|
||||
|
||||
// Skip header and server row
|
||||
reader.Read()
|
||||
reader.Read()
|
||||
|
||||
// Without category repo, items maintain original order
|
||||
row1, _ := reader.Read()
|
||||
if row1[2] != "LOT-001" {
|
||||
t.Errorf("Expected LOT-001 first, got %s", row1[2])
|
||||
}
|
||||
|
||||
row2, _ := reader.Read()
|
||||
if row2[2] != "LOT-002" {
|
||||
t.Errorf("Expected LOT-002 second, got %s", row2[2])
|
||||
}
|
||||
|
||||
row3, _ := reader.Read()
|
||||
if row3[2] != "LOT-003" {
|
||||
t.Errorf("Expected LOT-003 third, got %s", row3[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_EmptyData(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := &ProjectExportData{
|
||||
Configs: []ConfigExportBlock{},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToCSV(&buf, data); err != nil {
|
||||
t.Fatalf("ToCSV failed: %v", err)
|
||||
}
|
||||
|
||||
csvBytes := buf.Bytes()
|
||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
||||
reader.Comma = ';'
|
||||
|
||||
header, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read header: %v", err)
|
||||
}
|
||||
|
||||
if len(header) != 8 {
|
||||
t.Errorf("Expected 8 columns, got %d", len(header))
|
||||
}
|
||||
|
||||
// No more rows expected
|
||||
_, err = reader.Read()
|
||||
if err != io.EOF {
|
||||
t.Errorf("Expected EOF after header, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSVBytes_BackwardCompat(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := newTestProjectData([]ExportItem{
|
||||
{LotName: "LOT-001", Category: "CAT", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
||||
}, "ART", 1)
|
||||
|
||||
csvBytes, err := svc.ToCSVBytes(data)
|
||||
if err != nil {
|
||||
t.Fatalf("ToCSVBytes failed: %v", err)
|
||||
}
|
||||
|
||||
if len(csvBytes) < 3 {
|
||||
t.Fatalf("CSV bytes too short")
|
||||
}
|
||||
|
||||
expectedBOM := []byte{0xEF, 0xBB, 0xBF}
|
||||
actualBOM := csvBytes[:3]
|
||||
if !bytes.Equal(actualBOM, expectedBOM) {
|
||||
t.Errorf("UTF-8 BOM mismatch in ToCSVBytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_WriterError(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := newTestProjectData([]ExportItem{
|
||||
{LotName: "LOT-001", Category: "CAT", Quantity: 1, UnitPrice: 100.0, TotalPrice: 100.0},
|
||||
}, "ART", 1)
|
||||
|
||||
failingWriter := &failingWriter{}
|
||||
|
||||
if err := svc.ToCSV(failingWriter, data); err == nil {
|
||||
t.Errorf("Expected error from failing writer, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCSV_MultipleBlocks(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
data := &ProjectExportData{
|
||||
Configs: []ConfigExportBlock{
|
||||
{
|
||||
Article: "ART-1",
|
||||
ServerCount: 2,
|
||||
UnitPrice: 500.0,
|
||||
Items: []ExportItem{
|
||||
{LotName: "LOT-A", Category: "CPU", Quantity: 1, UnitPrice: 500.0, TotalPrice: 500.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Article: "ART-2",
|
||||
ServerCount: 3,
|
||||
UnitPrice: 1000.0,
|
||||
Items: []ExportItem{
|
||||
{LotName: "LOT-B", Category: "MEM", Quantity: 2, UnitPrice: 500.0, TotalPrice: 1000.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToCSV(&buf, data); err != nil {
|
||||
t.Fatalf("ToCSV failed: %v", err)
|
||||
}
|
||||
|
||||
csvBytes := buf.Bytes()
|
||||
reader := csv.NewReader(bytes.NewReader(csvBytes[3:]))
|
||||
reader.Comma = ';'
|
||||
reader.FieldsPerRecord = -1 // allow variable fields
|
||||
|
||||
// Header
|
||||
reader.Read()
|
||||
|
||||
// Block 1: server row
|
||||
srv1, _ := reader.Read()
|
||||
if srv1[0] != "10" {
|
||||
t.Errorf("Block 1 line: expected 10, got %s", srv1[0])
|
||||
}
|
||||
if srv1[7] != "1 000" {
|
||||
t.Errorf("Block 1 total: expected '1 000', got %q", srv1[7])
|
||||
}
|
||||
|
||||
// Block 1: component row
|
||||
comp1, _ := reader.Read()
|
||||
if comp1[2] != "LOT-A" {
|
||||
t.Errorf("Block 1 component: expected LOT-A, got %s", comp1[2])
|
||||
}
|
||||
|
||||
// Separator row
|
||||
sep, _ := reader.Read()
|
||||
allEmpty := true
|
||||
for _, v := range sep {
|
||||
if v != "" {
|
||||
allEmpty = false
|
||||
}
|
||||
}
|
||||
if !allEmpty {
|
||||
t.Errorf("Expected empty separator row, got %v", sep)
|
||||
}
|
||||
|
||||
// Block 2: server row
|
||||
srv2, _ := reader.Read()
|
||||
if srv2[0] != "20" {
|
||||
t.Errorf("Block 2 line: expected 20, got %s", srv2[0])
|
||||
}
|
||||
if srv2[7] != "3 000" {
|
||||
t.Errorf("Block 2 total: expected '3 000', got %q", srv2[7])
|
||||
}
|
||||
}
|
||||
|
||||
func TestProjectToExportData_SortsByLine(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
|
||||
configs := []models.Configuration{
|
||||
{
|
||||
UUID: "cfg-1",
|
||||
Line: 30,
|
||||
Article: "ART-30",
|
||||
ServerCount: 1,
|
||||
Items: models.ConfigItems{{LotName: "LOT-30", Quantity: 1, UnitPrice: 300}},
|
||||
CreatedAt: time.Now().Add(-1 * time.Hour),
|
||||
},
|
||||
{
|
||||
UUID: "cfg-2",
|
||||
Line: 10,
|
||||
Article: "ART-10",
|
||||
ServerCount: 1,
|
||||
Items: models.ConfigItems{{LotName: "LOT-10", Quantity: 1, UnitPrice: 100}},
|
||||
CreatedAt: time.Now().Add(-2 * time.Hour),
|
||||
},
|
||||
{
|
||||
UUID: "cfg-3",
|
||||
Line: 20,
|
||||
Article: "ART-20",
|
||||
ServerCount: 1,
|
||||
Items: models.ConfigItems{{LotName: "LOT-20", Quantity: 1, UnitPrice: 200}},
|
||||
CreatedAt: time.Now().Add(-3 * time.Hour),
|
||||
},
|
||||
}
|
||||
|
||||
data := svc.ProjectToExportData(configs)
|
||||
if len(data.Configs) != 3 {
|
||||
t.Fatalf("expected 3 blocks, got %d", len(data.Configs))
|
||||
}
|
||||
if data.Configs[0].Article != "ART-10" || data.Configs[0].Line != 10 {
|
||||
t.Fatalf("first block must be line 10, got article=%s line=%d", data.Configs[0].Article, data.Configs[0].Line)
|
||||
}
|
||||
if data.Configs[1].Article != "ART-20" || data.Configs[1].Line != 20 {
|
||||
t.Fatalf("second block must be line 20, got article=%s line=%d", data.Configs[1].Article, data.Configs[1].Line)
|
||||
}
|
||||
if data.Configs[2].Article != "ART-30" || data.Configs[2].Line != 30 {
|
||||
t.Fatalf("third block must be line 30, got article=%s line=%d", data.Configs[2].Article, data.Configs[2].Line)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPriceWithSpace(t *testing.T) {
|
||||
tests := []struct {
|
||||
input float64
|
||||
expected string
|
||||
}{
|
||||
{0, "0"},
|
||||
{100, "100"},
|
||||
{1000, "1 000"},
|
||||
{10470, "10 470"},
|
||||
{104700, "104 700"},
|
||||
{1000000, "1 000 000"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := formatPriceWithSpace(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("formatPriceWithSpace(%v): expected %q, got %q", tt.input, tt.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPriceComma(t *testing.T) {
|
||||
tests := []struct {
|
||||
input float64
|
||||
expected string
|
||||
}{
|
||||
{100.0, "100"},
|
||||
{2074.5, "2074,5"},
|
||||
{100.50, "100,5"},
|
||||
{99.99, "99,99"},
|
||||
{0, "0"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := formatPriceComma(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("formatPriceComma(%v): expected %q, got %q", tt.input, tt.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToPricingCSV_UsesSelectedColumns(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
data := &ProjectPricingExportData{
|
||||
Configs: []ProjectPricingExportConfig{
|
||||
{
|
||||
Name: "Config A",
|
||||
Article: "ART-1",
|
||||
Line: 10,
|
||||
ServerCount: 2,
|
||||
Rows: []ProjectPricingExportRow{
|
||||
{
|
||||
LotDisplay: "LOT_A +1",
|
||||
VendorPN: "PN-001",
|
||||
Description: "Bundle row",
|
||||
Quantity: 2,
|
||||
BOMTotal: floatPtr(2400.5),
|
||||
Estimate: floatPtr(2000),
|
||||
Stock: floatPtr(1800.25),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
opts := ProjectPricingExportOptions{
|
||||
IncludeLOT: true,
|
||||
IncludeBOM: true,
|
||||
IncludeEstimate: true,
|
||||
IncludeStock: true,
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := svc.ToPricingCSV(&buf, data, opts); err != nil {
|
||||
t.Fatalf("ToPricingCSV failed: %v", err)
|
||||
}
|
||||
|
||||
reader := csv.NewReader(bytes.NewReader(buf.Bytes()[3:]))
|
||||
reader.Comma = ';'
|
||||
reader.FieldsPerRecord = -1
|
||||
|
||||
header, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("read header row: %v", err)
|
||||
}
|
||||
expectedHeader := []string{"Line Item", "LOT", "PN вендора", "Описание", "Кол-во", "BOM", "Estimate", "Stock"}
|
||||
for i, want := range expectedHeader {
|
||||
if header[i] != want {
|
||||
t.Fatalf("header[%d]: expected %q, got %q", i, want, header[i])
|
||||
}
|
||||
}
|
||||
|
||||
summary, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("read summary row: %v", err)
|
||||
}
|
||||
expectedSummary := []string{"10", "", "", "Config A", "2", "2 400,50", "2 000,00", "1 800,25"}
|
||||
for i, want := range expectedSummary {
|
||||
if summary[i] != want {
|
||||
t.Fatalf("summary[%d]: expected %q, got %q", i, want, summary[i])
|
||||
}
|
||||
}
|
||||
|
||||
row, err := reader.Read()
|
||||
if err != nil {
|
||||
t.Fatalf("read data row: %v", err)
|
||||
}
|
||||
expectedRow := []string{"", "LOT_A +1", "PN-001", "Bundle row", "2", "2 400,50", "2 000,00", "1 800,25"}
|
||||
for i, want := range expectedRow {
|
||||
if row[i] != want {
|
||||
t.Fatalf("row[%d]: expected %q, got %q", i, want, row[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProjectToPricingExportData_UsesCartRowsWithoutBOM(t *testing.T) {
|
||||
svc := NewExportService(config.ExportConfig{}, nil, nil)
|
||||
configs := []models.Configuration{
|
||||
{
|
||||
UUID: "cfg-1",
|
||||
Name: "Config A",
|
||||
Article: "ART-1",
|
||||
ServerCount: 1,
|
||||
Items: models.ConfigItems{
|
||||
{LotName: "LOT_A", Quantity: 2, UnitPrice: 300},
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
data, err := svc.ProjectToPricingExportData(configs, ProjectPricingExportOptions{
|
||||
IncludeLOT: true,
|
||||
IncludeEstimate: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("ProjectToPricingExportData failed: %v", err)
|
||||
}
|
||||
if len(data.Configs) != 1 || len(data.Configs[0].Rows) != 1 {
|
||||
t.Fatalf("unexpected rows count: %+v", data.Configs)
|
||||
}
|
||||
row := data.Configs[0].Rows[0]
|
||||
if row.LotDisplay != "LOT_A" {
|
||||
t.Fatalf("expected LOT_A, got %q", row.LotDisplay)
|
||||
}
|
||||
if row.VendorPN != "—" {
|
||||
t.Fatalf("expected vendor dash, got %q", row.VendorPN)
|
||||
}
|
||||
if row.Estimate == nil || *row.Estimate != 600 {
|
||||
t.Fatalf("expected estimate total 600, got %+v", row.Estimate)
|
||||
}
|
||||
}
|
||||
|
||||
// failingWriter always returns an error
|
||||
type failingWriter struct{}
|
||||
|
||||
func (fw *failingWriter) Write(p []byte) (int, error) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user