Files
logpile/internal/server/handlers.go
2026-02-25 12:16:31 +03:00

1232 lines
33 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
package server
import (
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"git.mchus.pro/mchus/logpile/internal/collector"
"git.mchus.pro/mchus/logpile/internal/exporter"
"git.mchus.pro/mchus/logpile/internal/models"
"git.mchus.pro/mchus/logpile/internal/parser"
)
func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
tmplContent, err := WebFS.ReadFile("templates/index.html")
if err != nil {
http.Error(w, "Template not found", http.StatusInternalServerError)
return
}
tmpl, err := template.New("index").Parse(string(tmplContent))
if err != nil {
http.Error(w, "Template parse error", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
tmpl.Execute(w, nil)
}
func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
if err := r.ParseMultipartForm(uploadMultipartMaxBytes()); err != nil {
jsonError(w, "File too large", http.StatusBadRequest)
return
}
file, header, err := r.FormFile("archive")
if err != nil {
jsonError(w, "Failed to read file", http.StatusBadRequest)
return
}
defer file.Close()
payload, err := io.ReadAll(file)
if err != nil {
jsonError(w, "Failed to read file", http.StatusBadRequest)
return
}
var (
result *models.AnalysisResult
vendor string
)
if rawPkg, ok, err := parseRawExportBundle(payload); err != nil {
jsonError(w, "Failed to parse raw export bundle: "+err.Error(), http.StatusBadRequest)
return
} else if ok {
replayed, replayVendor, replayErr := s.reanalyzeRawExportPackage(rawPkg)
if replayErr != nil {
jsonError(w, "Failed to reanalyze raw export package: "+replayErr.Error(), http.StatusBadRequest)
return
}
result = replayed
vendor = replayVendor
if strings.TrimSpace(vendor) == "" {
vendor = "snapshot"
}
s.SetRawExport(rawPkg)
} else if looksLikeJSONSnapshot(header.Filename, payload) {
if rawPkg, ok, err := parseRawExportPackage(payload); err != nil {
jsonError(w, "Failed to parse raw export package: "+err.Error(), http.StatusBadRequest)
return
} else if ok {
replayed, replayVendor, replayErr := s.reanalyzeRawExportPackage(rawPkg)
if replayErr != nil {
jsonError(w, "Failed to reanalyze raw export package: "+replayErr.Error(), http.StatusBadRequest)
return
}
result = replayed
vendor = replayVendor
if strings.TrimSpace(vendor) == "" {
vendor = "snapshot"
}
s.SetRawExport(rawPkg)
} else {
snapshotResult, snapshotErr := parseUploadedSnapshot(payload)
if snapshotErr != nil {
jsonError(w, "Failed to parse snapshot: "+snapshotErr.Error(), http.StatusBadRequest)
return
}
result = snapshotResult
vendor = strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
s.SetRawExport(newRawExportFromUploadedFile(header.Filename, header.Header.Get("Content-Type"), payload, result))
}
} else {
// Parse archive
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), header.Filename); err != nil {
jsonError(w, "Failed to parse archive: "+err.Error(), http.StatusBadRequest)
return
}
result = p.Result()
applyArchiveSourceMetadata(result)
vendor = p.DetectedVendor()
s.SetRawExport(newRawExportFromUploadedFile(header.Filename, header.Header.Get("Content-Type"), payload, result))
}
s.SetResult(result)
s.SetDetectedVendor(vendor)
jsonResponse(w, map[string]interface{}{
"status": "ok",
"message": "File uploaded and parsed successfully",
"filename": header.Filename,
"vendor": vendor,
"stats": map[string]int{
"events": len(result.Events),
"sensors": len(result.Sensors),
"fru": len(result.FRU),
},
})
}
func uploadMultipartMaxBytes() int64 {
// Large Redfish raw bundles can easily exceed 100 MiB once raw trees and logs
// are embedded. Keep the default high but bounded for a normal workstation.
const (
defMB = 512
minMB = 100
maxMB = 2048
)
mb := defMB
if v := strings.TrimSpace(os.Getenv("LOGPILE_UPLOAD_MAX_MB")); v != "" {
if n, err := strconv.Atoi(v); err == nil {
if n < minMB {
n = minMB
}
if n > maxMB {
n = maxMB
}
mb = n
}
}
return int64(mb) << 20
}
func (s *Server) reanalyzeRawExportPackage(pkg *RawExportPackage) (*models.AnalysisResult, string, error) {
if pkg == nil {
return nil, "", fmt.Errorf("empty package")
}
switch pkg.Source.Kind {
case "file_bytes":
if strings.TrimSpace(pkg.Source.Encoding) != "base64" {
return nil, "", fmt.Errorf("unsupported file_bytes encoding: %s", pkg.Source.Encoding)
}
data, err := base64.StdEncoding.DecodeString(pkg.Source.Data)
if err != nil {
return nil, "", fmt.Errorf("decode source.data: %w", err)
}
return s.parseUploadedPayload(pkg.Source.Filename, data)
case "live_redfish":
if !strings.EqualFold(strings.TrimSpace(pkg.Source.Protocol), "redfish") {
return nil, "", fmt.Errorf("unsupported live protocol: %s", pkg.Source.Protocol)
}
result, err := collector.ReplayRedfishFromRawPayloads(pkg.Source.RawPayloads, nil)
if err != nil {
return nil, "", err
}
if result != nil {
if strings.TrimSpace(result.Protocol) == "" {
result.Protocol = "redfish"
}
if strings.TrimSpace(result.SourceType) == "" {
result.SourceType = models.SourceTypeAPI
}
if strings.TrimSpace(result.TargetHost) == "" {
result.TargetHost = strings.TrimSpace(pkg.Source.TargetHost)
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
target := result.TargetHost
if target == "" {
target = "snapshot"
}
result.Filename = "redfish://" + target
}
}
return result, "redfish", nil
default:
return nil, "", fmt.Errorf("unsupported raw export source kind: %s", pkg.Source.Kind)
}
}
func (s *Server) parseUploadedPayload(filename string, payload []byte) (*models.AnalysisResult, string, error) {
if looksLikeJSONSnapshot(filename, payload) {
snapshotResult, err := parseUploadedSnapshot(payload)
if err != nil {
return nil, "", err
}
vendor := strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
return snapshotResult, vendor, nil
}
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), filename); err != nil {
return nil, "", err
}
result := p.Result()
applyArchiveSourceMetadata(result)
return result, p.DetectedVendor(), nil
}
func (s *Server) handleGetParsers(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, map[string]interface{}{
"parsers": parser.ListParsersInfo(),
})
}
func (s *Server) handleGetEvents(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, []interface{}{})
return
}
// Sort events by timestamp (newest first)
events := make([]models.Event, len(result.Events))
copy(events, result.Events)
// Sort in descending order using sort.Slice (newest first)
sort.Slice(events, func(i, j int) bool {
return events[i].Timestamp.After(events[j].Timestamp)
})
jsonResponse(w, events)
}
func (s *Server) handleGetSensors(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, []interface{}{})
return
}
sensors := append([]models.SensorReading{}, result.Sensors...)
sensors = append(sensors, synthesizePSUVoltageSensors(result.Hardware)...)
jsonResponse(w, sensors)
}
func synthesizePSUVoltageSensors(hw *models.HardwareConfig) []models.SensorReading {
if hw == nil || len(hw.PowerSupply) == 0 {
return nil
}
const (
nominalV = 230.0
minV = nominalV * 0.9 // 207V
maxV = nominalV * 1.1 // 253V
)
out := make([]models.SensorReading, 0, len(hw.PowerSupply))
for _, psu := range hw.PowerSupply {
if psu.InputVoltage <= 0 {
continue
}
name := "PSU " + strings.TrimSpace(psu.Slot) + " input voltage"
if strings.TrimSpace(psu.Slot) == "" {
name = "PSU input voltage"
}
status := "ok"
if psu.InputVoltage < minV || psu.InputVoltage > maxV {
status = "warn"
}
out = append(out, models.SensorReading{
Name: name,
Type: "voltage",
Value: psu.InputVoltage,
Unit: "V",
Status: status,
})
}
return out
}
func (s *Server) handleGetConfig(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, map[string]interface{}{})
return
}
response := map[string]interface{}{
"source_type": result.SourceType,
"protocol": result.Protocol,
"target_host": result.TargetHost,
"collected_at": result.CollectedAt,
}
if result.RawPayloads != nil {
if fetchErrors, ok := result.RawPayloads["redfish_fetch_errors"]; ok {
response["redfish_fetch_errors"] = fetchErrors
}
}
if result.Hardware == nil {
response["hardware"] = map[string]interface{}{}
response["specification"] = []SpecLine{}
jsonResponse(w, response)
return
}
devices := canonicalDevices(result.Hardware)
spec := buildSpecification(result.Hardware)
response["hardware"] = map[string]any{
"board": result.Hardware.BoardInfo,
"firmware": result.Hardware.Firmware,
"devices": devices,
}
response["specification"] = spec
jsonResponse(w, response)
}
// SpecLine represents a single line in specification
type SpecLine struct {
Category string `json:"category"`
Name string `json:"name"`
Quantity int `json:"quantity"`
}
func canonicalDevices(hw *models.HardwareConfig) []models.HardwareDevice {
if hw == nil {
return nil
}
hw.Devices = BuildHardwareDevices(hw)
return hw.Devices
}
func buildSpecification(hw *models.HardwareConfig) []SpecLine {
var spec []SpecLine
if hw == nil {
return spec
}
devices := canonicalDevices(hw)
// CPUs - group by model
cpuGroups := make(map[string]int)
cpuDetails := make(map[string]models.HardwareDevice)
for _, cpu := range devices {
if cpu.Kind != models.DeviceKindCPU {
continue
}
cpuGroups[cpu.Model]++
cpuDetails[cpu.Model] = cpu
}
for model, count := range cpuGroups {
cpu := cpuDetails[model]
name := fmt.Sprintf("Intel %s (%.1fGHz %dC %dW)",
model,
float64(cpu.FrequencyMHz)/1000,
cpu.Cores,
intFromDetails(cpu.Details, "tdp_w"))
spec = append(spec, SpecLine{Category: "Процессор", Name: name, Quantity: count})
}
// Memory - group by size, type and frequency (only installed modules)
memGroups := make(map[string]int)
for _, mem := range devices {
if mem.Kind != models.DeviceKindMemory {
continue
}
present := mem.Present != nil && *mem.Present
// Skip empty slots (not present or 0 size)
if !present || mem.SizeMB == 0 {
continue
}
// Include frequency if available
key := ""
currentSpeed := intFromDetails(mem.Details, "current_speed_mhz")
if currentSpeed > 0 {
key = fmt.Sprintf("%s %dGB %dMHz", mem.Type, mem.SizeMB/1024, currentSpeed)
} else {
key = fmt.Sprintf("%s %dGB", mem.Type, mem.SizeMB/1024)
}
memGroups[key]++
}
for key, count := range memGroups {
spec = append(spec, SpecLine{Category: "Память", Name: key, Quantity: count})
}
// Storage - group by type and capacity
storGroups := make(map[string]int)
for _, stor := range devices {
if stor.Kind != models.DeviceKindStorage {
continue
}
var key string
if stor.SizeGB >= 1000 {
key = fmt.Sprintf("%s %s %.2fTB", stor.Type, stor.Interface, float64(stor.SizeGB)/1000)
} else {
key = fmt.Sprintf("%s %s %dGB", stor.Type, stor.Interface, stor.SizeGB)
}
storGroups[key]++
}
for key, count := range storGroups {
spec = append(spec, SpecLine{Category: "Накопитель", Name: key, Quantity: count})
}
// PCIe devices - group by device class/name and manufacturer
pcieGroups := make(map[string]int)
pcieDetails := make(map[string]models.HardwareDevice)
for _, pcie := range devices {
if pcie.Kind != models.DeviceKindPCIe && pcie.Kind != models.DeviceKindGPU && pcie.Kind != models.DeviceKindNetwork {
continue
}
// Create unique key from manufacturer + device class/name
key := pcie.DeviceClass
if pcie.Manufacturer != "" {
key = pcie.Manufacturer + " " + pcie.DeviceClass
}
if pcie.PartNumber != "" && pcie.PartNumber != pcie.DeviceClass {
key = key + " (" + pcie.PartNumber + ")"
}
pcieGroups[key]++
pcieDetails[key] = pcie
}
for key, count := range pcieGroups {
pcie := pcieDetails[key]
category := "PCIe устройство"
name := key
// Determine category based on device class or known GPU names
deviceClass := pcie.DeviceClass
isGPU := pcie.Kind == models.DeviceKindGPU || isGPUDevice(deviceClass)
isNetwork := deviceClass == "Network" || strings.Contains(deviceClass, "ConnectX")
if isGPU {
category = "Графический процессор"
} else if isNetwork {
category = "Сетевой адаптер"
} else if deviceClass == "NVMe" || deviceClass == "RAID" || deviceClass == "SAS" || deviceClass == "SATA" || deviceClass == "Storage" {
category = "Контроллер"
}
spec = append(spec, SpecLine{Category: category, Name: name, Quantity: count})
}
// Power supplies - group by model/wattage
psuGroups := make(map[string]int)
for _, psu := range devices {
if psu.Kind != models.DeviceKindPSU {
continue
}
key := psu.Model
if key == "" && psu.WattageW > 0 {
key = fmt.Sprintf("%dW", psu.WattageW)
}
if key != "" {
psuGroups[key]++
}
}
for key, count := range psuGroups {
spec = append(spec, SpecLine{Category: "Блок питания", Name: key, Quantity: count})
}
return spec
}
func (s *Server) handleGetSerials(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, []interface{}{})
return
}
// Collect all serial numbers from various sources
type SerialEntry struct {
Component string `json:"component"`
Location string `json:"location,omitempty"`
SerialNumber string `json:"serial_number"`
Manufacturer string `json:"manufacturer,omitempty"`
PartNumber string `json:"part_number,omitempty"`
Category string `json:"category"`
}
var serials []SerialEntry
// From FRU
for _, fru := range result.FRU {
if !hasUsableSerial(fru.SerialNumber) {
continue
}
name := fru.ProductName
if name == "" {
name = fru.Description
}
serials = append(serials, SerialEntry{
Component: name,
SerialNumber: strings.TrimSpace(fru.SerialNumber),
Manufacturer: fru.Manufacturer,
PartNumber: fru.PartNumber,
Category: "FRU",
})
}
if result.Hardware != nil {
for _, d := range canonicalDevices(result.Hardware) {
if !hasUsableSerial(d.SerialNumber) {
continue
}
serials = append(serials, SerialEntry{
Component: serialComponent(d),
Location: strings.TrimSpace(coalesce(d.Location, d.Slot)),
SerialNumber: strings.TrimSpace(d.SerialNumber),
Manufacturer: strings.TrimSpace(d.Manufacturer),
PartNumber: strings.TrimSpace(d.PartNumber),
Category: serialCategory(d.Kind),
})
}
}
jsonResponse(w, serials)
}
func normalizePCIeSerialComponentName(p models.PCIeDevice) string {
className := strings.TrimSpace(p.DeviceClass)
part := strings.TrimSpace(p.PartNumber)
if part != "" && !strings.EqualFold(part, className) {
return part
}
lowerClass := strings.ToLower(className)
switch lowerClass {
case "display", "display controller", "3d controller", "vga", "network", "network controller", "pcie device", "other", "unknown", "":
if part != "" {
return part
}
}
if className != "" {
return className
}
if part != "" {
return part
}
return "PCIe device"
}
func hasUsableSerial(serial string) bool {
s := strings.TrimSpace(serial)
if s == "" {
return false
}
switch strings.ToUpper(s) {
case "N/A", "NA", "NONE", "NULL", "UNKNOWN", "-":
return false
default:
return true
}
}
func (s *Server) handleGetFirmware(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil || result.Hardware == nil {
jsonResponse(w, []interface{}{})
return
}
jsonResponse(w, buildFirmwareEntries(result.Hardware))
}
type firmwareEntry struct {
Component string `json:"component"`
Model string `json:"model"`
Version string `json:"version"`
}
func buildFirmwareEntries(hw *models.HardwareConfig) []firmwareEntry {
if hw == nil {
return nil
}
// Deduplicate firmware by extracting model name and version
// E.g., "PSU0 (AP-CR3000F12BY)" and "PSU1 (AP-CR3000F12BY)" with same version -> one entry
seen := make(map[string]bool)
var deduplicated []firmwareEntry
appendEntry := func(component, model, version string) {
component = strings.TrimSpace(component)
model = strings.TrimSpace(model)
version = strings.TrimSpace(version)
if component == "" || version == "" {
return
}
if model == "" {
model = "-"
}
key := component + "|" + model + "|" + version
if seen[key] {
return
}
seen[key] = true
deduplicated = append(deduplicated, firmwareEntry{
Component: component,
Model: model,
Version: version,
})
}
for _, fw := range hw.Firmware {
component, model := extractFirmwareComponentAndModel(fw.DeviceName)
appendEntry(component, model, fw.Version)
}
for _, d := range canonicalDevices(hw) {
version := strings.TrimSpace(d.Firmware)
if version == "" {
continue
}
model := strings.TrimSpace(d.PartNumber)
if model == "" {
model = strings.TrimSpace(d.Model)
}
if model == "" {
model = strings.TrimSpace(d.Slot)
}
appendEntry(serialCategory(d.Kind), model, version)
}
return deduplicated
}
func serialComponent(d models.HardwareDevice) string {
if strings.TrimSpace(d.Model) != "" {
return strings.TrimSpace(d.Model)
}
if strings.TrimSpace(d.PartNumber) != "" {
return strings.TrimSpace(d.PartNumber)
}
if d.Kind == models.DeviceKindPCIe {
return normalizePCIeSerialComponentName(models.PCIeDevice{
DeviceClass: d.DeviceClass,
PartNumber: d.PartNumber,
})
}
if strings.TrimSpace(d.DeviceClass) != "" {
return strings.TrimSpace(d.DeviceClass)
}
return strings.ToUpper(d.Kind)
}
func serialCategory(kind string) string {
switch kind {
case models.DeviceKindBoard:
return "Board"
case models.DeviceKindCPU:
return "CPU"
case models.DeviceKindMemory:
return "Memory"
case models.DeviceKindStorage:
return "Storage"
case models.DeviceKindGPU:
return "GPU"
case models.DeviceKindNetwork:
return "Network"
case models.DeviceKindPSU:
return "PSU"
default:
return "PCIe"
}
}
func intFromDetails(details map[string]any, key string) int {
if details == nil {
return 0
}
v, ok := details[key]
if !ok {
return 0
}
switch n := v.(type) {
case int:
return n
case float64:
return int(n)
default:
return 0
}
}
func coalesce(values ...string) string {
for _, v := range values {
if strings.TrimSpace(v) != "" {
return v
}
}
return ""
}
// extractFirmwareComponentAndModel extracts the component type and model from firmware device name
func extractFirmwareComponentAndModel(deviceName string) (component, model string) {
// Parse different firmware name formats and extract component + model
// For "PSU0 (AP-CR3000F12BY)" -> component: "PSU", model: "AP-CR3000F12BY"
if strings.HasPrefix(deviceName, "PSU") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.Trim(deviceName[idx:], "()")
return "PSU", model
}
return "PSU", "-"
}
// For "CPU0 Microcode" -> component: "CPU Microcode", model: "-"
if strings.HasPrefix(deviceName, "CPU") && strings.Contains(deviceName, "Microcode") {
return "CPU Microcode", "-"
}
// For "NIC #CPU1_PCIE9 (MCX512A-ACAT)" -> component: "NIC", model: "MCX512A-ACAT"
if strings.HasPrefix(deviceName, "NIC ") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.Trim(deviceName[idx:], "()")
return "NIC", model
}
return "NIC", "-"
}
// For "GPU GPUSXM5 (692-2G520-0280-501)" -> component: "GPU", model: "GPUSXM5 (692-2G520-0280-501)"
if strings.HasPrefix(deviceName, "GPU ") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.TrimSpace(strings.Trim(deviceName[idx:], "()"))
if model != "" {
return "GPU", model
}
}
model = strings.TrimSpace(strings.TrimPrefix(deviceName, "GPU "))
if model == "" {
return "GPU", "-"
}
return "GPU", model
}
// For "NVSwitch NVSWITCH2 (NVSWITCH2)" -> component: "NVSwitch", model: "NVSWITCH2 (NVSWITCH2)"
if strings.HasPrefix(deviceName, "NVSwitch ") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.TrimSpace(strings.Trim(deviceName[idx:], "()"))
if model != "" {
return "NVSwitch", model
}
}
model = strings.TrimSpace(strings.TrimPrefix(deviceName, "NVSwitch "))
if model == "" {
return "NVSwitch", "-"
}
return "NVSwitch", model
}
// For "HDD Samsung MZ7L33T8HBNA-00A07" -> component: "HDD", model: "Samsung MZ7L33T8HBNA-00A07"
if strings.HasPrefix(deviceName, "HDD ") {
return "HDD", strings.TrimPrefix(deviceName, "HDD ")
}
// For "SSD Samsung MZ7..." -> component: "SSD", model: "Samsung MZ7..."
if strings.HasPrefix(deviceName, "SSD ") {
return "SSD", strings.TrimPrefix(deviceName, "SSD ")
}
// For "NVMe KIOXIA..." -> component: "NVMe", model: "KIOXIA..."
if strings.HasPrefix(deviceName, "NVMe ") {
return "NVMe", strings.TrimPrefix(deviceName, "NVMe ")
}
// For simple names like "BIOS", "ME", "BKC", "Virtual MicroCo"
// component = name, model = "-"
return deviceName, "-"
}
func (s *Server) handleGetStatus(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, map[string]interface{}{
"loaded": false,
})
return
}
jsonResponse(w, map[string]interface{}{
"loaded": true,
"filename": result.Filename,
"vendor": s.GetDetectedVendor(),
"source_type": result.SourceType,
"protocol": result.Protocol,
"target_host": result.TargetHost,
"collected_at": result.CollectedAt,
"stats": map[string]int{
"events": len(result.Events),
"sensors": len(result.Sensors),
"fru": len(result.FRU),
},
})
}
func (s *Server) handleExportCSV(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "csv")))
exp := exporter.New(result)
exp.ExportCSV(w)
}
func (s *Server) handleExportJSON(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if rawPkg := s.GetRawExport(); rawPkg != nil {
bundle, err := buildRawExportBundle(rawPkg, result, s.ClientVersionString())
if err != nil {
jsonError(w, "Failed to build raw export bundle: "+err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/zip")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "zip")))
_, _ = w.Write(bundle)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "json")))
exp := exporter.New(result)
_ = exp.ExportJSON(w)
}
func (s *Server) handleExportReanimator(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil || result.Hardware == nil {
jsonError(w, "No hardware data available for export", http.StatusBadRequest)
return
}
reanimatorData, err := exporter.ConvertToReanimator(result)
if err != nil {
statusCode := http.StatusInternalServerError
if strings.Contains(err.Error(), "required for Reanimator export") {
statusCode = http.StatusBadRequest
}
jsonError(w, fmt.Sprintf("Export failed: %v", err), statusCode)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "reanimator.json")))
encoder := json.NewEncoder(w)
encoder.SetIndent("", " ")
if err := encoder.Encode(reanimatorData); err != nil {
// Log error, but likely too late to send error response
return
}
}
func (s *Server) handleClear(w http.ResponseWriter, r *http.Request) {
s.SetResult(nil)
s.SetDetectedVendor("")
s.SetRawExport(nil)
jsonResponse(w, map[string]string{
"status": "ok",
"message": "Data cleared",
})
}
func (s *Server) handleShutdown(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, map[string]string{
"status": "ok",
"message": "Server shutting down",
})
// Shutdown in a goroutine so the response can be sent
go func() {
time.Sleep(100 * time.Millisecond)
s.Shutdown()
os.Exit(0)
}()
}
func (s *Server) handleCollectStart(w http.ResponseWriter, r *http.Request) {
var req CollectRequest
decoder := json.NewDecoder(r.Body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(&req); err != nil {
jsonError(w, "Invalid JSON body", http.StatusBadRequest)
return
}
if err := validateCollectRequest(req); err != nil {
jsonError(w, err.Error(), http.StatusUnprocessableEntity)
return
}
job := s.jobManager.CreateJob(req)
s.jobManager.AppendJobLog(job.ID, "Клиент: "+s.ClientVersionString())
s.startCollectionJob(job.ID, req)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
_ = json.NewEncoder(w).Encode(job.toJobResponse("Collection job accepted"))
}
func (s *Server) handleCollectStatus(w http.ResponseWriter, r *http.Request) {
jobID := strings.TrimSpace(r.PathValue("id"))
if !isValidCollectJobID(jobID) {
jsonError(w, "Invalid collect job id", http.StatusBadRequest)
return
}
job, ok := s.jobManager.GetJob(jobID)
if !ok {
jsonError(w, "Collect job not found", http.StatusNotFound)
return
}
jsonResponse(w, job.toStatusResponse())
}
func (s *Server) handleCollectCancel(w http.ResponseWriter, r *http.Request) {
jobID := strings.TrimSpace(r.PathValue("id"))
if !isValidCollectJobID(jobID) {
jsonError(w, "Invalid collect job id", http.StatusBadRequest)
return
}
job, ok := s.jobManager.CancelJob(jobID)
if !ok {
jsonError(w, "Collect job not found", http.StatusNotFound)
return
}
jsonResponse(w, job.toStatusResponse())
}
func (s *Server) startCollectionJob(jobID string, req CollectRequest) {
ctx, cancel := context.WithCancel(context.Background())
if attached := s.jobManager.AttachJobCancel(jobID, cancel); !attached {
cancel()
return
}
go func() {
connector, ok := s.getCollector(req.Protocol)
if !ok {
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, "Коннектор для протокола не зарегистрирован")
s.jobManager.AppendJobLog(jobID, "Сбор завершен с ошибкой")
return
}
emitProgress := func(update collector.Progress) {
if job, ok := s.jobManager.GetJob(jobID); !ok || isTerminalCollectStatus(job.Status) {
return
}
status := update.Status
if status == "" {
status = CollectStatusRunning
}
s.jobManager.UpdateJobStatus(jobID, status, update.Progress, "")
if update.Message != "" {
s.jobManager.AppendJobLog(jobID, update.Message)
}
}
result, err := connector.Collect(ctx, toCollectorRequest(req), emitProgress)
if err != nil {
if ctx.Err() != nil {
return
}
if job, ok := s.jobManager.GetJob(jobID); !ok || isTerminalCollectStatus(job.Status) {
return
}
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, err.Error())
s.jobManager.AppendJobLog(jobID, "Сбор завершен с ошибкой")
return
}
if job, ok := s.jobManager.GetJob(jobID); !ok || isTerminalCollectStatus(job.Status) {
return
}
applyCollectSourceMetadata(result, req)
s.jobManager.UpdateJobStatus(jobID, CollectStatusSuccess, 100, "")
s.jobManager.AppendJobLog(jobID, "Сбор завершен")
s.SetResult(result)
s.SetDetectedVendor(req.Protocol)
if job, ok := s.jobManager.GetJob(jobID); ok {
s.SetRawExport(newRawExportFromLiveCollect(result, req, job.Logs))
}
}()
}
func validateCollectRequest(req CollectRequest) error {
if strings.TrimSpace(req.Host) == "" {
return fmt.Errorf("field 'host' is required")
}
switch req.Protocol {
case "redfish", "ipmi":
default:
return fmt.Errorf("field 'protocol' must be one of: redfish, ipmi")
}
if req.Port < 1 || req.Port > 65535 {
return fmt.Errorf("field 'port' must be in range 1..65535")
}
if strings.TrimSpace(req.Username) == "" {
return fmt.Errorf("field 'username' is required")
}
switch req.AuthType {
case "password":
if strings.TrimSpace(req.Password) == "" {
return fmt.Errorf("field 'password' is required when auth_type=password")
}
case "token":
if strings.TrimSpace(req.Token) == "" {
return fmt.Errorf("field 'token' is required when auth_type=token")
}
default:
return fmt.Errorf("field 'auth_type' must be one of: password, token")
}
switch req.TLSMode {
case "strict", "insecure":
default:
return fmt.Errorf("field 'tls_mode' must be one of: strict, insecure")
}
return nil
}
var collectJobIDPattern = regexp.MustCompile(`^job_[a-zA-Z0-9_-]{8,}$`)
func isValidCollectJobID(id string) bool {
return collectJobIDPattern.MatchString(id)
}
func generateJobID() string {
buf := make([]byte, 8)
if _, err := rand.Read(buf); err != nil {
return fmt.Sprintf("job_%d", time.Now().UnixNano())
}
return fmt.Sprintf("job_%x", buf)
}
func applyArchiveSourceMetadata(result *models.AnalysisResult) {
if result == nil {
return
}
result.SourceType = models.SourceTypeArchive
result.Protocol = ""
result.TargetHost = ""
result.CollectedAt = time.Now().UTC()
}
func applyCollectSourceMetadata(result *models.AnalysisResult, req CollectRequest) {
if result == nil {
return
}
result.SourceType = models.SourceTypeAPI
result.Protocol = req.Protocol
result.TargetHost = req.Host
result.CollectedAt = time.Now().UTC()
if strings.TrimSpace(result.Filename) == "" {
result.Filename = fmt.Sprintf("%s://%s", req.Protocol, req.Host)
}
}
func toCollectorRequest(req CollectRequest) collector.Request {
return collector.Request{
Host: req.Host,
Protocol: req.Protocol,
Port: req.Port,
Username: req.Username,
AuthType: req.AuthType,
Password: req.Password,
Token: req.Token,
TLSMode: req.TLSMode,
}
}
func looksLikeJSONSnapshot(filename string, payload []byte) bool {
ext := strings.ToLower(filepath.Ext(filename))
if ext == ".json" {
return true
}
trimmed := bytes.TrimSpace(payload)
return len(trimmed) > 0 && (trimmed[0] == '{' || trimmed[0] == '[')
}
func parseUploadedSnapshot(payload []byte) (*models.AnalysisResult, error) {
var result models.AnalysisResult
if err := json.Unmarshal(payload, &result); err != nil {
return nil, err
}
if result.Hardware == nil && len(result.Events) == 0 && len(result.Sensors) == 0 && len(result.FRU) == 0 {
return nil, fmt.Errorf("unsupported snapshot format")
}
if strings.TrimSpace(result.SourceType) == "" {
if result.Protocol != "" {
result.SourceType = models.SourceTypeAPI
} else {
result.SourceType = models.SourceTypeArchive
}
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
result.Filename = "uploaded_snapshot.json"
}
return &result, nil
}
func (s *Server) getCollector(protocol string) (collector.Connector, bool) {
if s.collectors == nil {
s.collectors = collector.NewDefaultRegistry()
}
return s.collectors.Get(protocol)
}
func jsonResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(data)
}
func jsonError(w http.ResponseWriter, message string, code int) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
json.NewEncoder(w).Encode(map[string]string{"error": message})
}
// isGPUDevice checks if device class indicates a GPU
func isGPUDevice(deviceClass string) bool {
// Standard PCI class names
if deviceClass == "VGA" || deviceClass == "3D Controller" || deviceClass == "Display" {
return true
}
// Known GPU model patterns
gpuPatterns := []string{
"L40", "A100", "A10", "A16", "A30", "H100", "H200", "V100",
"RTX", "GTX", "Quadro", "Tesla",
"Instinct", "Radeon",
"AST2500", "AST2600", // ASPEED BMC VGA
}
upperClass := strings.ToUpper(deviceClass)
for _, pattern := range gpuPatterns {
if strings.Contains(upperClass, strings.ToUpper(pattern)) {
return true
}
}
return false
}
func exportFilename(result *models.AnalysisResult, ext string) string {
date := time.Now().UTC().Format("2006-01-02")
model := "SERVER MODEL"
sn := "SERVER SN"
if result != nil {
if !result.CollectedAt.IsZero() {
date = result.CollectedAt.UTC().Format("2006-01-02")
}
if result.Hardware != nil {
if m := strings.TrimSpace(result.Hardware.BoardInfo.ProductName); m != "" {
model = m
}
if serial := strings.TrimSpace(result.Hardware.BoardInfo.SerialNumber); serial != "" {
sn = serial
}
}
}
model = sanitizeFilenamePart(model)
sn = sanitizeFilenamePart(sn)
ext = strings.TrimPrefix(strings.TrimSpace(ext), ".")
if ext == "" {
ext = "json"
}
return fmt.Sprintf("%s (%s) - %s.%s", date, model, sn, ext)
}
func sanitizeFilenamePart(v string) string {
v = strings.TrimSpace(v)
if v == "" {
return "-"
}
replacer := strings.NewReplacer(
"/", "_",
"\\", "_",
":", "_",
"*", "_",
"?", "_",
"\"", "_",
"<", "_",
">", "_",
"|", "_",
"\n", " ",
"\r", " ",
"\t", " ",
)
v = replacer.Replace(v)
v = strings.Join(strings.Fields(v), " ")
if v == "" {
return "-"
}
return v
}