Files
logpile/internal/server/handlers.go
2026-03-26 18:42:54 +03:00

2150 lines
59 KiB
Go
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
package server
import (
"archive/zip"
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"html/template"
"io"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
"git.mchus.pro/mchus/logpile/internal/collector"
"git.mchus.pro/mchus/logpile/internal/exporter"
"git.mchus.pro/mchus/logpile/internal/ingest"
"git.mchus.pro/mchus/logpile/internal/models"
"git.mchus.pro/mchus/logpile/internal/parser"
chartviewer "reanimator/chart/viewer"
)
func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
tmplContent, err := WebFS.ReadFile("templates/index.html")
if err != nil {
http.Error(w, "Template not found", http.StatusInternalServerError)
return
}
tmpl, err := template.New("index").Parse(string(tmplContent))
if err != nil {
http.Error(w, "Template parse error", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
tmpl.Execute(w, map[string]string{
"AppVersion": s.config.AppVersion,
"AppCommit": s.config.AppCommit,
})
}
func (s *Server) handleChartCurrent(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
title := chartTitle(result)
if result == nil || result.Hardware == nil {
html, err := chartviewer.RenderHTML(nil, title)
if err != nil {
http.Error(w, "failed to render viewer", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
_, _ = w.Write(rewriteChartStaticPaths(html))
return
}
snapshotBytes, err := currentReanimatorSnapshotBytes(result)
if err != nil {
http.Error(w, "failed to build reanimator snapshot: "+err.Error(), http.StatusInternalServerError)
return
}
html, err := chartviewer.RenderHTML(snapshotBytes, title)
if err != nil {
http.Error(w, "failed to render chart: "+err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
_, _ = w.Write(rewriteChartStaticPaths(html))
}
func currentReanimatorSnapshotBytes(result *models.AnalysisResult) ([]byte, error) {
reanimatorData, err := exporter.ConvertToReanimator(result)
if err != nil {
return nil, err
}
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetIndent("", " ")
if err := encoder.Encode(reanimatorData); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func chartTitle(result *models.AnalysisResult) string {
const fallback = "LOGPile Reanimator Viewer"
if result == nil {
return fallback
}
if result.Hardware != nil {
board := result.Hardware.BoardInfo
product := strings.TrimSpace(board.ProductName)
serial := strings.TrimSpace(board.SerialNumber)
switch {
case product != "" && serial != "":
return product + " - " + serial
case product != "":
return product
case serial != "":
return serial
}
}
if host := strings.TrimSpace(result.TargetHost); host != "" {
return host
}
if filename := strings.TrimSpace(result.Filename); filename != "" {
return filename
}
return fallback
}
func rewriteChartStaticPaths(html []byte) []byte {
return bytes.ReplaceAll(html, []byte(`href="/static/view.css"`), []byte(`href="/chart/static/view.css"`))
}
func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
r.Body = http.MaxBytesReader(w, r.Body, uploadMultipartMaxBytes())
if err := r.ParseMultipartForm(uploadMultipartFormMemoryBytes()); err != nil {
jsonError(w, "File too large", http.StatusBadRequest)
return
}
file, header, err := r.FormFile("archive")
if err != nil {
jsonError(w, "Failed to read file", http.StatusBadRequest)
return
}
defer file.Close()
payload, err := io.ReadAll(file)
if err != nil {
jsonError(w, "Failed to read file", http.StatusBadRequest)
return
}
var (
result *models.AnalysisResult
vendor string
)
result, vendor, rawPkg, err := s.analyzeUploadedFile(header.Filename, header.Header.Get("Content-Type"), payload)
if err != nil {
jsonError(w, "Failed to parse uploaded file: "+err.Error(), http.StatusBadRequest)
return
}
if strings.TrimSpace(vendor) == "" {
vendor = "snapshot"
}
if rawPkg != nil {
s.SetRawExport(rawPkg)
}
s.SetResult(result)
s.SetDetectedVendor(vendor)
jsonResponse(w, map[string]interface{}{
"status": "ok",
"message": "File uploaded and parsed successfully",
"filename": header.Filename,
"vendor": vendor,
"stats": map[string]int{
"events": len(result.Events),
"sensors": len(result.Sensors),
"fru": len(result.FRU),
},
})
}
func (s *Server) analyzeUploadedFile(filename, mimeType string, payload []byte) (*models.AnalysisResult, string, *RawExportPackage, error) {
if rawPkg, ok, err := parseRawExportBundle(payload); err != nil {
return nil, "", nil, err
} else if ok {
result, vendor, err := s.reanalyzeRawExportPackage(rawPkg)
if err != nil {
return nil, "", nil, err
}
if strings.TrimSpace(vendor) == "" {
vendor = "snapshot"
}
return result, vendor, rawPkg, nil
}
if looksLikeJSONSnapshot(filename, payload) {
if rawPkg, ok, err := parseRawExportPackage(payload); err != nil {
return nil, "", nil, err
} else if ok {
result, vendor, err := s.reanalyzeRawExportPackage(rawPkg)
if err != nil {
return nil, "", nil, err
}
if strings.TrimSpace(vendor) == "" {
vendor = "snapshot"
}
return result, vendor, rawPkg, nil
}
snapshotResult, err := parseUploadedSnapshot(payload)
if err != nil {
return nil, "", nil, err
}
vendor := strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
return snapshotResult, vendor, newRawExportFromUploadedFile(filename, mimeType, payload, snapshotResult), nil
}
if !parser.IsSupportedArchiveFilename(filename) {
return nil, "", nil, fmt.Errorf("unsupported archive format: %s", strings.ToLower(filepath.Ext(filename)))
}
result, vendor, err := s.ingestService().AnalyzeArchivePayload(filename, payload)
if err != nil {
return nil, "", nil, err
}
applyArchiveSourceMetadata(result)
return result, vendor, newRawExportFromUploadedFile(filename, mimeType, payload, result), nil
}
func uploadMultipartMaxBytes() int64 {
// Limit for incoming multipart request body.
const (
defMB = 2048
minMB = 100
maxMB = 8192
)
mb := defMB
if v := strings.TrimSpace(os.Getenv("LOGPILE_UPLOAD_MAX_MB")); v != "" {
if n, err := strconv.Atoi(v); err == nil {
if n < minMB {
n = minMB
}
if n > maxMB {
n = maxMB
}
mb = n
}
}
return int64(mb) << 20
}
func convertMultipartMaxBytes() int64 {
// Convert mode typically uploads a folder with many files,
// so it has a larger independent limit.
const (
defMB = 16384
minMB = 512
maxMB = 65536
)
mb := defMB
if v := strings.TrimSpace(os.Getenv("LOGPILE_CONVERT_MAX_MB")); v != "" {
if n, err := strconv.Atoi(v); err == nil {
if n < minMB {
n = minMB
}
if n > maxMB {
n = maxMB
}
mb = n
}
}
return int64(mb) << 20
}
func uploadMultipartFormMemoryBytes() int64 {
// Keep a small in-memory threshold; file parts spill to temp files.
const formMemoryMB = 32
return int64(formMemoryMB) << 20
}
func (s *Server) reanalyzeRawExportPackage(pkg *RawExportPackage) (*models.AnalysisResult, string, error) {
if pkg == nil {
return nil, "", fmt.Errorf("empty package")
}
switch pkg.Source.Kind {
case "file_bytes":
if strings.TrimSpace(pkg.Source.Encoding) != "base64" {
return nil, "", fmt.Errorf("unsupported file_bytes encoding: %s", pkg.Source.Encoding)
}
data, err := base64.StdEncoding.DecodeString(pkg.Source.Data)
if err != nil {
return nil, "", fmt.Errorf("decode source.data: %w", err)
}
return s.parseUploadedPayload(pkg.Source.Filename, data)
case "live_redfish":
if !strings.EqualFold(strings.TrimSpace(pkg.Source.Protocol), "redfish") {
return nil, "", fmt.Errorf("unsupported live protocol: %s", pkg.Source.Protocol)
}
result, vendor, err := s.ingestService().AnalyzeRedfishRawPayloads(pkg.Source.RawPayloads, ingest.RedfishSourceMetadata{
TargetHost: pkg.Source.TargetHost,
SourceTimezone: pkg.Source.SourceTimezone,
Filename: pkg.Source.Filename,
})
if err != nil {
return nil, "", err
}
if result != nil {
result.CollectedAt = inferRawExportCollectedAt(result, pkg)
}
return result, vendor, nil
default:
return nil, "", fmt.Errorf("unsupported raw export source kind: %s", pkg.Source.Kind)
}
}
func (s *Server) parseUploadedPayload(filename string, payload []byte) (*models.AnalysisResult, string, error) {
if looksLikeJSONSnapshot(filename, payload) {
snapshotResult, err := parseUploadedSnapshot(payload)
if err != nil {
return nil, "", err
}
vendor := strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
return snapshotResult, vendor, nil
}
result, vendor, err := s.ingestService().AnalyzeArchivePayload(filename, payload)
if err != nil {
return nil, "", err
}
applyArchiveSourceMetadata(result)
return result, vendor, nil
}
func (s *Server) handleGetParsers(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, map[string]interface{}{
"parsers": parser.ListParsersInfo(),
})
}
func (s *Server) handleGetFileTypes(w http.ResponseWriter, r *http.Request) {
archiveExt := parser.SupportedArchiveExtensions()
uploadExt := append([]string{}, archiveExt...)
uploadExt = append(uploadExt, ".json")
jsonResponse(w, map[string]any{
"archive_extensions": archiveExt,
"upload_extensions": uniqueSortedExtensions(uploadExt),
"convert_extensions": uniqueSortedExtensions(uploadExt),
})
}
func uniqueSortedExtensions(exts []string) []string {
if len(exts) == 0 {
return nil
}
seen := make(map[string]struct{}, len(exts))
out := make([]string, 0, len(exts))
for _, e := range exts {
e = strings.ToLower(strings.TrimSpace(e))
if e == "" {
continue
}
if _, ok := seen[e]; ok {
continue
}
seen[e] = struct{}{}
out = append(out, e)
}
sort.Strings(out)
return out
}
func (s *Server) handleGetEvents(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, []interface{}{})
return
}
// Sort events by timestamp (newest first)
events := make([]models.Event, len(result.Events))
copy(events, result.Events)
// Sort in descending order using sort.Slice (newest first)
sort.Slice(events, func(i, j int) bool {
return events[i].Timestamp.After(events[j].Timestamp)
})
jsonResponse(w, events)
}
func (s *Server) handleGetSensors(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, []interface{}{})
return
}
sensors := append([]models.SensorReading{}, result.Sensors...)
sensors = append(sensors, synthesizePSUVoltageSensors(result.Hardware)...)
jsonResponse(w, sensors)
}
func synthesizePSUVoltageSensors(hw *models.HardwareConfig) []models.SensorReading {
if hw == nil || len(hw.PowerSupply) == 0 {
return nil
}
const (
nominalV = 230.0
minV = nominalV * 0.9 // 207V
maxV = nominalV * 1.1 // 253V
)
out := make([]models.SensorReading, 0, len(hw.PowerSupply))
for _, psu := range hw.PowerSupply {
if psu.InputVoltage <= 0 {
continue
}
name := "PSU " + strings.TrimSpace(psu.Slot) + " input voltage"
if strings.TrimSpace(psu.Slot) == "" {
name = "PSU input voltage"
}
status := "ok"
if psu.InputVoltage < minV || psu.InputVoltage > maxV {
status = "warn"
}
out = append(out, models.SensorReading{
Name: name,
Type: "voltage",
Value: psu.InputVoltage,
Unit: "V",
Status: status,
})
}
return out
}
func (s *Server) handleGetConfig(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, map[string]interface{}{})
return
}
response := map[string]interface{}{
"source_type": result.SourceType,
"protocol": result.Protocol,
"target_host": result.TargetHost,
"source_timezone": result.SourceTimezone,
"collected_at": result.CollectedAt,
}
if result.RawPayloads != nil {
if fetchErrors, ok := result.RawPayloads["redfish_fetch_errors"]; ok {
response["redfish_fetch_errors"] = fetchErrors
}
}
if result.Hardware == nil {
response["hardware"] = map[string]interface{}{}
response["specification"] = []SpecLine{}
jsonResponse(w, response)
return
}
devices := canonicalDevices(result.Hardware)
spec := buildSpecification(result.Hardware)
response["hardware"] = map[string]any{
"board": result.Hardware.BoardInfo,
"firmware": result.Hardware.Firmware,
"devices": devices,
}
response["specification"] = spec
jsonResponse(w, response)
}
// SpecLine represents a single line in specification
type SpecLine struct {
Category string `json:"category"`
Name string `json:"name"`
Quantity int `json:"quantity"`
}
func canonicalDevices(hw *models.HardwareConfig) []models.HardwareDevice {
if hw == nil {
return nil
}
hw.Devices = BuildHardwareDevices(hw)
return hw.Devices
}
func buildSpecification(hw *models.HardwareConfig) []SpecLine {
var spec []SpecLine
if hw == nil {
return spec
}
devices := canonicalDevices(hw)
// CPUs - group by model
cpuGroups := make(map[string]int)
cpuDetails := make(map[string]models.HardwareDevice)
for _, cpu := range devices {
if cpu.Kind != models.DeviceKindCPU {
continue
}
cpuGroups[cpu.Model]++
cpuDetails[cpu.Model] = cpu
}
for model, count := range cpuGroups {
cpu := cpuDetails[model]
name := fmt.Sprintf("Intel %s (%.1fGHz %dC %dW)",
model,
float64(cpu.FrequencyMHz)/1000,
cpu.Cores,
intFromDetails(cpu.Details, "tdp_w"))
spec = append(spec, SpecLine{Category: "Процессор", Name: name, Quantity: count})
}
// Memory - group by size, type and frequency (only installed modules)
memGroups := make(map[string]int)
for _, mem := range devices {
if mem.Kind != models.DeviceKindMemory {
continue
}
present := mem.Present != nil && *mem.Present
// Skip empty slots (not present or 0 size)
if !present || mem.SizeMB == 0 {
continue
}
// Include frequency if available
key := ""
currentSpeed := intFromDetails(mem.Details, "current_speed_mhz")
if currentSpeed > 0 {
key = fmt.Sprintf("%s %dGB %dMHz", mem.Type, mem.SizeMB/1024, currentSpeed)
} else {
key = fmt.Sprintf("%s %dGB", mem.Type, mem.SizeMB/1024)
}
memGroups[key]++
}
for key, count := range memGroups {
spec = append(spec, SpecLine{Category: "Память", Name: key, Quantity: count})
}
// Storage - group by type and capacity
storGroups := make(map[string]int)
for _, stor := range devices {
if stor.Kind != models.DeviceKindStorage {
continue
}
var key string
if stor.SizeGB >= 1000 {
key = fmt.Sprintf("%s %s %.2fTB", stor.Type, stor.Interface, float64(stor.SizeGB)/1000)
} else {
key = fmt.Sprintf("%s %s %dGB", stor.Type, stor.Interface, stor.SizeGB)
}
storGroups[key]++
}
for key, count := range storGroups {
spec = append(spec, SpecLine{Category: "Накопитель", Name: key, Quantity: count})
}
// PCIe devices - group by device class/name and manufacturer
pcieGroups := make(map[string]int)
pcieDetails := make(map[string]models.HardwareDevice)
for _, pcie := range devices {
if pcie.Kind != models.DeviceKindPCIe && pcie.Kind != models.DeviceKindGPU && pcie.Kind != models.DeviceKindNetwork {
continue
}
// Create unique key from manufacturer + device class/name
key := pcie.DeviceClass
if pcie.Manufacturer != "" {
key = pcie.Manufacturer + " " + pcie.DeviceClass
}
if pcie.PartNumber != "" && pcie.PartNumber != pcie.DeviceClass {
key = key + " (" + pcie.PartNumber + ")"
}
pcieGroups[key]++
pcieDetails[key] = pcie
}
for key, count := range pcieGroups {
pcie := pcieDetails[key]
category := "PCIe устройство"
name := key
// Determine category based on device class or known GPU names
deviceClass := pcie.DeviceClass
isGPU := pcie.Kind == models.DeviceKindGPU || isGPUDevice(deviceClass)
isNetwork := deviceClass == "Network" || strings.Contains(deviceClass, "ConnectX")
if isGPU {
category = "Графический процессор"
} else if isNetwork {
category = "Сетевой адаптер"
} else if deviceClass == "NVMe" || deviceClass == "RAID" || deviceClass == "SAS" || deviceClass == "SATA" || deviceClass == "Storage" {
category = "Контроллер"
}
spec = append(spec, SpecLine{Category: category, Name: name, Quantity: count})
}
// Power supplies - group by model/wattage
psuGroups := make(map[string]int)
for _, psu := range devices {
if psu.Kind != models.DeviceKindPSU {
continue
}
key := psu.Model
if key == "" && psu.WattageW > 0 {
key = fmt.Sprintf("%dW", psu.WattageW)
}
if key != "" {
psuGroups[key]++
}
}
for key, count := range psuGroups {
spec = append(spec, SpecLine{Category: "Блок питания", Name: key, Quantity: count})
}
return spec
}
func (s *Server) handleGetSerials(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, []interface{}{})
return
}
// Collect all serial numbers from various sources
type SerialEntry struct {
Component string `json:"component"`
Location string `json:"location,omitempty"`
SerialNumber string `json:"serial_number"`
Manufacturer string `json:"manufacturer,omitempty"`
PartNumber string `json:"part_number,omitempty"`
Category string `json:"category"`
}
var serials []SerialEntry
// From FRU
for _, fru := range result.FRU {
if !hasUsableSerial(fru.SerialNumber) {
continue
}
name := fru.ProductName
if name == "" {
name = fru.Description
}
serials = append(serials, SerialEntry{
Component: name,
SerialNumber: strings.TrimSpace(fru.SerialNumber),
Manufacturer: fru.Manufacturer,
PartNumber: fru.PartNumber,
Category: "FRU",
})
}
if result.Hardware != nil {
for _, d := range canonicalDevices(result.Hardware) {
if !hasUsableSerial(d.SerialNumber) {
continue
}
serials = append(serials, SerialEntry{
Component: serialComponent(d),
Location: strings.TrimSpace(coalesce(d.Location, d.Slot)),
SerialNumber: strings.TrimSpace(d.SerialNumber),
Manufacturer: strings.TrimSpace(d.Manufacturer),
PartNumber: strings.TrimSpace(d.PartNumber),
Category: serialCategory(d.Kind),
})
}
}
jsonResponse(w, serials)
}
func normalizePCIeSerialComponentName(p models.PCIeDevice) string {
className := strings.TrimSpace(p.DeviceClass)
part := strings.TrimSpace(p.PartNumber)
if part != "" && !strings.EqualFold(part, className) {
return part
}
lowerClass := strings.ToLower(className)
switch lowerClass {
case "display", "display controller", "3d controller", "vga", "network", "network controller", "pcie device", "other", "unknown", "":
if part != "" {
return part
}
}
if className != "" {
return className
}
if part != "" {
return part
}
return "PCIe device"
}
func hasUsableSerial(serial string) bool {
s := strings.TrimSpace(serial)
if s == "" {
return false
}
switch strings.ToUpper(s) {
case "N/A", "NA", "NONE", "NULL", "UNKNOWN", "-":
return false
default:
return true
}
}
func hasUsableFirmwareVersion(version string) bool {
v := strings.TrimSpace(version)
if v == "" {
return false
}
switch strings.ToUpper(v) {
case "N/A", "NA", "NONE", "NULL", "UNKNOWN", "-":
return false
default:
return true
}
}
func (s *Server) handleGetFirmware(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil || result.Hardware == nil {
jsonResponse(w, []interface{}{})
return
}
jsonResponse(w, buildFirmwareEntries(result.Hardware))
}
type parseErrorEntry struct {
Source string `json:"source"` // redfish | parser | file | collect_log
Category string `json:"category"` // fetch | partial_inventory | collect_log
Severity string `json:"severity,omitempty"` // error | warning | info
Path string `json:"path,omitempty"`
Message string `json:"message"`
Detail string `json:"detail,omitempty"`
}
func (s *Server) handleGetParseErrors(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
rawPkg := s.GetRawExport()
items := make([]parseErrorEntry, 0)
seen := make(map[string]struct{})
add := func(e parseErrorEntry) {
key := strings.TrimSpace(e.Source) + "|" + strings.TrimSpace(e.Category) + "|" + strings.TrimSpace(e.Path) + "|" + strings.TrimSpace(e.Message)
if _, ok := seen[key]; ok {
return
}
seen[key] = struct{}{}
items = append(items, e)
}
var fetchErrMap map[string]string
if result != nil && result.RawPayloads != nil {
fetchErrMap = extractRedfishFetchErrors(result.RawPayloads["redfish_fetch_errors"])
for path, msg := range fetchErrMap {
add(parseErrorEntry{
Source: "redfish",
Category: "fetch",
Severity: parseErrorSeverityFromMessage(msg),
Path: path,
Message: msg,
})
}
}
if rawPkg != nil && len(rawPkg.Source.CollectLogs) > 0 {
for _, line := range rawPkg.Source.CollectLogs {
if !looksLikeErrorLogLine(line) {
continue
}
add(parseErrorEntry{
Source: "collect_log",
Category: "collect_log",
Severity: parseErrorSeverityFromMessage(line),
Message: strings.TrimSpace(line),
})
}
}
if result != nil && result.Protocol == "redfish" && result.Hardware != nil {
hw := result.Hardware
if len(hw.Memory) == 0 && hasFetchErrorSuffix(fetchErrMap, "/Memory") {
add(parseErrorEntry{
Source: "parser",
Category: "partial_inventory",
Severity: "warning",
Path: "/redfish/v1/Systems/*/Memory",
Message: "Memory inventory is empty because Redfish Memory endpoint failed during collection",
})
}
if len(hw.CPUs) == 0 && hasFetchErrorSuffix(fetchErrMap, "/Processors") {
add(parseErrorEntry{
Source: "parser",
Category: "partial_inventory",
Severity: "warning",
Path: "/redfish/v1/Systems/*/Processors",
Message: "CPU inventory is empty because Redfish Processors endpoint failed during collection",
})
}
if len(hw.Firmware) == 0 && (hasFetchErrorSuffix(fetchErrMap, "/Managers/1") || hasFetchErrorSuffix(fetchErrMap, "/UpdateService")) {
add(parseErrorEntry{
Source: "parser",
Category: "partial_inventory",
Severity: "warning",
Message: "Firmware inventory may be incomplete due to Redfish Manager/UpdateService endpoint failures",
})
}
}
sort.Slice(items, func(i, j int) bool {
if items[i].Severity != items[j].Severity {
// error > warning > info
return parseErrorSeverityRank(items[i].Severity) < parseErrorSeverityRank(items[j].Severity)
}
if items[i].Source != items[j].Source {
return items[i].Source < items[j].Source
}
if items[i].Category != items[j].Category {
return items[i].Category < items[j].Category
}
return items[i].Path < items[j].Path
})
jsonResponse(w, map[string]any{
"items": items,
"summary": map[string]any{
"total": len(items),
"source_kind": func() string {
if rawPkg != nil {
return rawPkg.Source.Kind
}
return ""
}(),
},
})
}
func extractRedfishFetchErrors(raw any) map[string]string {
out := make(map[string]string)
list, ok := raw.([]map[string]interface{})
if ok {
for _, item := range list {
p := strings.TrimSpace(fmt.Sprintf("%v", item["path"]))
if p == "" {
continue
}
out[p] = strings.TrimSpace(fmt.Sprintf("%v", item["error"]))
}
return out
}
if generic, ok := raw.([]interface{}); ok {
for _, itemAny := range generic {
item, ok := itemAny.(map[string]interface{})
if !ok {
continue
}
p := strings.TrimSpace(fmt.Sprintf("%v", item["path"]))
if p == "" {
continue
}
out[p] = strings.TrimSpace(fmt.Sprintf("%v", item["error"]))
}
}
return out
}
func looksLikeErrorLogLine(line string) bool {
s := strings.ToLower(strings.TrimSpace(line))
if s == "" {
return false
}
return strings.Contains(s, "ошибка") ||
strings.Contains(s, "error") ||
strings.Contains(s, "failed") ||
strings.Contains(s, "timeout") ||
strings.Contains(s, "deadline exceeded")
}
func hasFetchErrorSuffix(fetchErrs map[string]string, suffix string) bool {
if len(fetchErrs) == 0 {
return false
}
for p := range fetchErrs {
if strings.HasSuffix(p, suffix) {
return true
}
}
return false
}
func parseErrorSeverityFromMessage(msg string) string {
s := strings.ToLower(strings.TrimSpace(msg))
if s == "" {
return "info"
}
if strings.Contains(s, "timeout") || strings.Contains(s, "deadline exceeded") {
return "error"
}
if strings.HasPrefix(s, "status 500 ") || strings.HasPrefix(s, "status 502 ") || strings.HasPrefix(s, "status 503 ") || strings.HasPrefix(s, "status 504 ") {
return "error"
}
if strings.HasPrefix(s, "status 401 ") || strings.HasPrefix(s, "status 403 ") {
return "error"
}
if strings.HasPrefix(s, "status 404 ") || strings.HasPrefix(s, "status 405 ") || strings.HasPrefix(s, "status 410 ") || strings.HasPrefix(s, "status 501 ") {
return "info"
}
if strings.Contains(s, "ошибка") || strings.Contains(s, "error") || strings.Contains(s, "failed") {
return "warning"
}
return "info"
}
func parseErrorSeverityRank(severity string) int {
switch strings.ToLower(strings.TrimSpace(severity)) {
case "error":
return 0
case "warning":
return 1
default:
return 2
}
}
type firmwareEntry struct {
Component string `json:"component"`
Model string `json:"model"`
Version string `json:"version"`
}
func buildFirmwareEntries(hw *models.HardwareConfig) []firmwareEntry {
if hw == nil {
return nil
}
// Deduplicate firmware by extracting model name and version
// E.g., "PSU0 (AP-CR3000F12BY)" and "PSU1 (AP-CR3000F12BY)" with same version -> one entry
seen := make(map[string]bool)
var deduplicated []firmwareEntry
appendEntry := func(component, model, version string) {
component = strings.TrimSpace(component)
model = strings.TrimSpace(model)
version = strings.TrimSpace(version)
if component == "" || !hasUsableFirmwareVersion(version) {
return
}
if model == "" {
model = "-"
}
key := component + "|" + model + "|" + version
if seen[key] {
return
}
seen[key] = true
deduplicated = append(deduplicated, firmwareEntry{
Component: component,
Model: model,
Version: version,
})
}
for _, fw := range hw.Firmware {
component, model := extractFirmwareComponentAndModel(fw.DeviceName)
appendEntry(component, model, fw.Version)
}
for _, d := range canonicalDevices(hw) {
version := strings.TrimSpace(d.Firmware)
if version == "" {
continue
}
model := strings.TrimSpace(d.PartNumber)
if model == "" {
model = strings.TrimSpace(d.Model)
}
if model == "" {
model = strings.TrimSpace(d.Slot)
}
appendEntry(serialCategory(d.Kind), model, version)
}
return deduplicated
}
func serialComponent(d models.HardwareDevice) string {
if strings.TrimSpace(d.Model) != "" {
return strings.TrimSpace(d.Model)
}
if strings.TrimSpace(d.PartNumber) != "" {
return strings.TrimSpace(d.PartNumber)
}
if d.Kind == models.DeviceKindPCIe {
return normalizePCIeSerialComponentName(models.PCIeDevice{
DeviceClass: d.DeviceClass,
PartNumber: d.PartNumber,
})
}
if strings.TrimSpace(d.DeviceClass) != "" {
return strings.TrimSpace(d.DeviceClass)
}
return strings.ToUpper(d.Kind)
}
func serialCategory(kind string) string {
switch kind {
case models.DeviceKindBoard:
return "Board"
case models.DeviceKindCPU:
return "CPU"
case models.DeviceKindMemory:
return "Memory"
case models.DeviceKindStorage:
return "Storage"
case models.DeviceKindGPU:
return "GPU"
case models.DeviceKindNetwork:
return "Network"
case models.DeviceKindPSU:
return "PSU"
default:
return "PCIe"
}
}
func intFromDetails(details map[string]any, key string) int {
if details == nil {
return 0
}
v, ok := details[key]
if !ok {
return 0
}
switch n := v.(type) {
case int:
return n
case float64:
return int(n)
default:
return 0
}
}
func coalesce(values ...string) string {
for _, v := range values {
if strings.TrimSpace(v) != "" {
return v
}
}
return ""
}
// extractFirmwareComponentAndModel extracts the component type and model from firmware device name
func extractFirmwareComponentAndModel(deviceName string) (component, model string) {
// Parse different firmware name formats and extract component + model
// For "PSU0 (AP-CR3000F12BY)" -> component: "PSU", model: "AP-CR3000F12BY"
if strings.HasPrefix(deviceName, "PSU") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.Trim(deviceName[idx:], "()")
return "PSU", model
}
return "PSU", "-"
}
// For "CPU0 Microcode" -> component: "CPU Microcode", model: "-"
if strings.HasPrefix(deviceName, "CPU") && strings.Contains(deviceName, "Microcode") {
return "CPU Microcode", "-"
}
// For "NIC #CPU1_PCIE9 (MCX512A-ACAT)" -> component: "NIC", model: "MCX512A-ACAT"
if strings.HasPrefix(deviceName, "NIC ") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.Trim(deviceName[idx:], "()")
return "NIC", model
}
return "NIC", "-"
}
// For "GPU GPUSXM5 (692-2G520-0280-501)" -> component: "GPU", model: "GPUSXM5 (692-2G520-0280-501)"
if strings.HasPrefix(deviceName, "GPU ") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.TrimSpace(strings.Trim(deviceName[idx:], "()"))
if model != "" {
return "GPU", model
}
}
model = strings.TrimSpace(strings.TrimPrefix(deviceName, "GPU "))
if model == "" {
return "GPU", "-"
}
return "GPU", model
}
// For "NVSwitch NVSWITCH2 (NVSWITCH2)" -> component: "NVSwitch", model: "NVSWITCH2 (NVSWITCH2)"
if strings.HasPrefix(deviceName, "NVSwitch ") {
if idx := strings.Index(deviceName, "("); idx != -1 {
model = strings.TrimSpace(strings.Trim(deviceName[idx:], "()"))
if model != "" {
return "NVSwitch", model
}
}
model = strings.TrimSpace(strings.TrimPrefix(deviceName, "NVSwitch "))
if model == "" {
return "NVSwitch", "-"
}
return "NVSwitch", model
}
// For "HDD Samsung MZ7L33T8HBNA-00A07" -> component: "HDD", model: "Samsung MZ7L33T8HBNA-00A07"
if strings.HasPrefix(deviceName, "HDD ") {
return "HDD", strings.TrimPrefix(deviceName, "HDD ")
}
// For "SSD Samsung MZ7..." -> component: "SSD", model: "Samsung MZ7..."
if strings.HasPrefix(deviceName, "SSD ") {
return "SSD", strings.TrimPrefix(deviceName, "SSD ")
}
// For "NVMe KIOXIA..." -> component: "NVMe", model: "KIOXIA..."
if strings.HasPrefix(deviceName, "NVMe ") {
return "NVMe", strings.TrimPrefix(deviceName, "NVMe ")
}
// For simple names like "BIOS", "ME", "BKC", "Virtual MicroCo"
// component = name, model = "-"
return deviceName, "-"
}
func (s *Server) handleGetStatus(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil {
jsonResponse(w, map[string]interface{}{
"loaded": false,
})
return
}
jsonResponse(w, map[string]interface{}{
"loaded": true,
"filename": result.Filename,
"vendor": s.GetDetectedVendor(),
"source_type": result.SourceType,
"protocol": result.Protocol,
"target_host": result.TargetHost,
"source_timezone": result.SourceTimezone,
"collected_at": result.CollectedAt,
"stats": map[string]int{
"events": len(result.Events),
"sensors": len(result.Sensors),
"fru": len(result.FRU),
},
})
}
func (s *Server) handleExportCSV(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "csv")))
exp := exporter.New(result)
exp.ExportCSV(w)
}
func (s *Server) handleExportJSON(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if rawPkg := s.GetRawExport(); rawPkg != nil {
bundle, err := buildRawExportBundle(rawPkg, result, s.ClientVersionString())
if err != nil {
jsonError(w, "Failed to build raw export bundle: "+err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/zip")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "zip")))
_, _ = w.Write(bundle)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "json")))
exp := exporter.New(result)
_ = exp.ExportJSON(w)
}
func (s *Server) handleExportReanimator(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
if result == nil || result.Hardware == nil {
jsonError(w, "No hardware data available for export", http.StatusBadRequest)
return
}
reanimatorData, err := exporter.ConvertToReanimator(result)
if err != nil {
statusCode := http.StatusInternalServerError
if strings.Contains(err.Error(), "required for Reanimator export") {
statusCode = http.StatusBadRequest
}
jsonError(w, fmt.Sprintf("Export failed: %v", err), statusCode)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "reanimator.json")))
encoder := json.NewEncoder(w)
encoder.SetIndent("", " ")
if err := encoder.Encode(reanimatorData); err != nil {
// Log error, but likely too late to send error response
return
}
}
func (s *Server) handleConvertReanimatorBatch(w http.ResponseWriter, r *http.Request) {
r.Body = http.MaxBytesReader(w, r.Body, convertMultipartMaxBytes())
if err := r.ParseMultipartForm(uploadMultipartFormMemoryBytes()); err != nil {
if strings.Contains(strings.ToLower(err.Error()), "too large") {
msg := fmt.Sprintf(
"File too large. Increase LOGPILE_CONVERT_MAX_MB (current limit: %d MB)",
convertMultipartMaxBytes()>>20,
)
jsonError(w, msg, http.StatusBadRequest)
return
}
jsonError(w, "Failed to parse multipart form", http.StatusBadRequest)
return
}
form := r.MultipartForm
if form == nil {
jsonError(w, "No files provided", http.StatusBadRequest)
return
}
files := form.File["files[]"]
if len(files) == 0 {
files = form.File["files"]
}
if len(files) == 0 {
jsonError(w, "No files provided", http.StatusBadRequest)
return
}
tempDir, err := os.MkdirTemp("", "logpile-convert-input-*")
if err != nil {
jsonError(w, "Не удалось создать временную директорию", http.StatusInternalServerError)
return
}
inputFiles := make([]convertInputFile, 0, len(files))
var skipped int
for _, fh := range files {
if fh == nil {
continue
}
if !isSupportedConvertFileName(fh.Filename) {
skipped++
continue
}
tmpFile, err := os.CreateTemp(tempDir, "input-*")
if err != nil {
continue
}
src, err := fh.Open()
if err != nil {
_ = tmpFile.Close()
_ = os.Remove(tmpFile.Name())
continue
}
_, err = io.Copy(tmpFile, src)
_ = src.Close()
_ = tmpFile.Close()
if err != nil {
_ = os.Remove(tmpFile.Name())
continue
}
mimeType := ""
if fh.Header != nil {
mimeType = fh.Header.Get("Content-Type")
}
inputFiles = append(inputFiles, convertInputFile{
Name: fh.Filename,
Path: tmpFile.Name(),
MIMEType: mimeType,
})
}
if len(inputFiles) == 0 {
_ = os.RemoveAll(tempDir)
jsonError(w, "Нет файлов поддерживаемого типа для конвертации", http.StatusBadRequest)
return
}
job := s.jobManager.CreateJob(CollectRequest{
Host: "convert.local",
Protocol: "convert",
Port: 0,
Username: "convert",
AuthType: "password",
TLSMode: "insecure",
})
s.markConvertJob(job.ID)
s.jobManager.AppendJobLog(job.ID, fmt.Sprintf("Запущена пакетная конвертация: %d файлов", len(inputFiles)))
if skipped > 0 {
s.jobManager.AppendJobLog(job.ID, fmt.Sprintf("Пропущено неподдерживаемых файлов: %d", skipped))
}
s.jobManager.UpdateJobStatus(job.ID, CollectStatusRunning, 0, "")
go s.runConvertJob(job.ID, tempDir, inputFiles, skipped, len(files))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
_ = json.NewEncoder(w).Encode(map[string]any{
"job_id": job.ID,
"status": CollectStatusRunning,
"accepted": len(inputFiles),
"skipped": skipped,
"total_files": len(files),
})
}
type convertInputFile struct {
Name string
Path string
MIMEType string
}
func (s *Server) runConvertJob(jobID, tempDir string, inputFiles []convertInputFile, skipped, total int) {
defer os.RemoveAll(tempDir)
resultFile, err := os.CreateTemp("", "logpile-convert-result-*.zip")
if err != nil {
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, "не удалось создать zip")
return
}
resultPath := resultFile.Name()
defer resultFile.Close()
zw := zip.NewWriter(resultFile)
failures := make([]string, 0)
success := 0
totalProcess := len(inputFiles)
for i, in := range inputFiles {
s.jobManager.AppendJobLog(jobID, fmt.Sprintf("Обработка %s", in.Name))
payload, err := os.ReadFile(in.Path)
if err != nil {
failures = append(failures, fmt.Sprintf("%s: %v", in.Name, err))
continue
}
result, _, _, err := s.analyzeUploadedFile(in.Name, in.MIMEType, payload)
if err != nil {
failures = append(failures, fmt.Sprintf("%s: %v", in.Name, err))
progress := ((i + 1) * 100) / totalProcess
s.jobManager.UpdateJobStatus(jobID, CollectStatusRunning, progress, "")
continue
}
if result == nil || result.Hardware == nil {
failures = append(failures, fmt.Sprintf("%s: no hardware data", in.Name))
progress := ((i + 1) * 100) / totalProcess
s.jobManager.UpdateJobStatus(jobID, CollectStatusRunning, progress, "")
continue
}
reanimatorData, err := exporter.ConvertToReanimator(result)
if err != nil {
failures = append(failures, fmt.Sprintf("%s: %v", in.Name, err))
progress := ((i + 1) * 100) / totalProcess
s.jobManager.UpdateJobStatus(jobID, CollectStatusRunning, progress, "")
continue
}
entryPath := sanitizeZipPath(in.Name)
entry, err := zw.Create(entryPath + ".reanimator.json")
if err != nil {
failures = append(failures, fmt.Sprintf("%s: %v", in.Name, err))
progress := ((i + 1) * 100) / totalProcess
s.jobManager.UpdateJobStatus(jobID, CollectStatusRunning, progress, "")
continue
}
encoder := json.NewEncoder(entry)
encoder.SetIndent("", " ")
if err := encoder.Encode(reanimatorData); err != nil {
failures = append(failures, fmt.Sprintf("%s: %v", in.Name, err))
} else {
success++
}
progress := ((i + 1) * 100) / totalProcess
s.jobManager.UpdateJobStatus(jobID, CollectStatusRunning, progress, "")
}
if success == 0 {
_ = zw.Close()
_ = os.Remove(resultPath)
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, "Не удалось конвертировать ни один файл")
return
}
summaryLines := []string{fmt.Sprintf("Конвертировано %d из %d файлов", success, total)}
if skipped > 0 {
summaryLines = append(summaryLines, fmt.Sprintf("Пропущено неподдерживаемых: %d", skipped))
}
summaryLines = append(summaryLines, failures...)
if entry, err := zw.Create("convert-summary.txt"); err == nil {
_, _ = io.WriteString(entry, strings.Join(summaryLines, "\n"))
}
if err := zw.Close(); err != nil {
_ = os.Remove(resultPath)
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, "Не удалось упаковать результаты")
return
}
s.setConvertArtifact(jobID, ConvertArtifact{
Path: resultPath,
Summary: summaryLines[0],
})
s.jobManager.UpdateJobStatus(jobID, CollectStatusSuccess, 100, "")
}
func (s *Server) handleConvertStatus(w http.ResponseWriter, r *http.Request) {
jobID := strings.TrimSpace(r.PathValue("id"))
if !isValidCollectJobID(jobID) {
jsonError(w, "Invalid convert job id", http.StatusBadRequest)
return
}
if !s.isConvertJob(jobID) {
jsonError(w, "Convert job not found", http.StatusNotFound)
return
}
job, ok := s.jobManager.GetJob(jobID)
if !ok {
jsonError(w, "Convert job not found", http.StatusNotFound)
return
}
jsonResponse(w, job.toStatusResponse())
}
func (s *Server) handleConvertDownload(w http.ResponseWriter, r *http.Request) {
jobID := strings.TrimSpace(r.PathValue("id"))
if !isValidCollectJobID(jobID) {
jsonError(w, "Invalid convert job id", http.StatusBadRequest)
return
}
if !s.isConvertJob(jobID) {
jsonError(w, "Convert job not found", http.StatusNotFound)
return
}
job, ok := s.jobManager.GetJob(jobID)
if !ok {
jsonError(w, "Convert job not found", http.StatusNotFound)
return
}
if job.Status != CollectStatusSuccess {
jsonError(w, "Convert job is not finished yet", http.StatusConflict)
return
}
artifact, ok := s.getConvertArtifact(jobID)
if !ok || strings.TrimSpace(artifact.Path) == "" {
jsonError(w, "Convert result not found", http.StatusNotFound)
return
}
file, err := os.Open(artifact.Path)
if err != nil {
jsonError(w, "Convert result not found", http.StatusNotFound)
return
}
defer file.Close()
defer func() {
_ = os.Remove(artifact.Path)
s.clearConvertArtifact(jobID)
}()
stat, err := file.Stat()
if err != nil {
jsonError(w, "Convert result not found", http.StatusNotFound)
return
}
w.Header().Set("Content-Type", "application/zip")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", "logpile-convert.zip"))
if artifact.Summary != "" {
w.Header().Set("X-Convert-Summary", artifact.Summary)
}
http.ServeContent(w, r, "logpile-convert.zip", stat.ModTime(), file)
}
func isSupportedConvertFileName(filename string) bool {
name := strings.TrimSpace(filename)
if name == "" {
return false
}
if strings.HasSuffix(strings.ToLower(name), ".json") {
return true
}
return parser.IsSupportedArchiveFilename(name)
}
func sanitizeZipPath(filename string) string {
path := filepath.Clean(filename)
if path == "." || path == "/" {
path = filepath.Base(filename)
}
path = strings.TrimPrefix(path, string(filepath.Separator))
if strings.HasPrefix(path, "..") {
path = filepath.Base(path)
}
path = filepath.ToSlash(path)
if path == "" {
path = filepath.Base(filename)
}
return path
}
func (s *Server) handleClear(w http.ResponseWriter, r *http.Request) {
s.SetResult(nil)
s.SetDetectedVendor("")
s.SetRawExport(nil)
for _, artifact := range s.clearAllConvertArtifacts() {
if strings.TrimSpace(artifact.Path) != "" {
_ = os.Remove(artifact.Path)
}
}
jsonResponse(w, map[string]string{
"status": "ok",
"message": "Data cleared",
})
}
func (s *Server) handleShutdown(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, map[string]string{
"status": "ok",
"message": "Server shutting down",
})
// Shutdown in a goroutine so the response can be sent
go func() {
time.Sleep(100 * time.Millisecond)
s.Shutdown()
os.Exit(0)
}()
}
func (s *Server) handleCollectStart(w http.ResponseWriter, r *http.Request) {
var req CollectRequest
decoder := json.NewDecoder(r.Body)
decoder.DisallowUnknownFields()
if err := decoder.Decode(&req); err != nil {
jsonError(w, "Invalid JSON body", http.StatusBadRequest)
return
}
if err := validateCollectRequest(req); err != nil {
jsonError(w, err.Error(), http.StatusUnprocessableEntity)
return
}
job := s.jobManager.CreateJob(req)
s.jobManager.AppendJobLog(job.ID, "Клиент: "+s.ClientVersionString())
s.startCollectionJob(job.ID, req)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
_ = json.NewEncoder(w).Encode(job.toJobResponse("Collection job accepted"))
}
// pingHost dials host:port up to total times with 2s timeout each, returns true if
// at least need attempts succeeded.
func pingHost(host string, port int, total, need int) (bool, string) {
addr := fmt.Sprintf("%s:%d", host, port)
var successes atomic.Int32
done := make(chan struct{}, total)
for i := 0; i < total; i++ {
go func() {
defer func() { done <- struct{}{} }()
conn, err := net.DialTimeout("tcp", addr, 2*time.Second)
if err == nil {
conn.Close()
successes.Add(1)
}
}()
}
for i := 0; i < total; i++ {
<-done
}
n := int(successes.Load())
if n < need {
return false, fmt.Sprintf("Хост недоступен: только %d из %d попыток подключения к %s прошли успешно (требуется минимум %d)", n, total, addr, need)
}
return true, ""
}
func (s *Server) handleCollectProbe(w http.ResponseWriter, r *http.Request) {
var req CollectRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
jsonError(w, "Invalid JSON body", http.StatusBadRequest)
return
}
if err := validateCollectRequest(req); err != nil {
jsonError(w, err.Error(), http.StatusBadRequest)
return
}
connector, ok := s.getCollector(req.Protocol)
if !ok {
jsonError(w, "Коннектор для протокола не зарегистрирован", http.StatusBadRequest)
return
}
prober, ok := connector.(collector.Prober)
if !ok {
jsonError(w, "Проверка подключения для протокола не поддерживается", http.StatusBadRequest)
return
}
if ok, msg := pingHost(req.Host, req.Port, 10, 3); !ok {
jsonError(w, msg, http.StatusBadRequest)
return
}
ctx, cancel := context.WithTimeout(r.Context(), 20*time.Second)
defer cancel()
result, err := prober.Probe(ctx, toCollectorRequest(req))
if err != nil {
jsonError(w, "Проверка подключения не удалась: "+err.Error(), http.StatusBadRequest)
return
}
message := "Связь с BMC установлена"
if result != nil {
switch {
case !result.HostPoweredOn && result.PowerControlAvailable:
message = "Связь с BMC установлена, host выключен. Можно включить перед сбором."
case !result.HostPoweredOn:
message = "Связь с BMC установлена, host выключен."
default:
message = "Связь с BMC установлена, host включен."
}
}
hostPowerState := ""
hostPoweredOn := false
powerControlAvailable := false
reachable := false
if result != nil {
reachable = result.Reachable
hostPowerState = strings.TrimSpace(result.HostPowerState)
hostPoweredOn = result.HostPoweredOn
powerControlAvailable = result.PowerControlAvailable
}
jsonResponse(w, CollectProbeResponse{
Reachable: reachable,
Protocol: req.Protocol,
HostPowerState: hostPowerState,
HostPoweredOn: hostPoweredOn,
PowerControlAvailable: powerControlAvailable,
Message: message,
})
}
func (s *Server) handleCollectStatus(w http.ResponseWriter, r *http.Request) {
jobID := strings.TrimSpace(r.PathValue("id"))
if !isValidCollectJobID(jobID) {
jsonError(w, "Invalid collect job id", http.StatusBadRequest)
return
}
job, ok := s.jobManager.GetJob(jobID)
if !ok {
jsonError(w, "Collect job not found", http.StatusNotFound)
return
}
jsonResponse(w, job.toStatusResponse())
}
func (s *Server) handleCollectCancel(w http.ResponseWriter, r *http.Request) {
jobID := strings.TrimSpace(r.PathValue("id"))
if !isValidCollectJobID(jobID) {
jsonError(w, "Invalid collect job id", http.StatusBadRequest)
return
}
job, ok := s.jobManager.CancelJob(jobID)
if !ok {
jsonError(w, "Collect job not found", http.StatusNotFound)
return
}
jsonResponse(w, job.toStatusResponse())
}
func (s *Server) startCollectionJob(jobID string, req CollectRequest) {
ctx, cancel := context.WithCancel(context.Background())
if attached := s.jobManager.AttachJobCancel(jobID, cancel); !attached {
cancel()
return
}
go func() {
connector, ok := s.getCollector(req.Protocol)
if !ok {
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, "Коннектор для протокола не зарегистрирован")
s.jobManager.AppendJobLog(jobID, "Сбор завершен с ошибкой")
return
}
emitProgress := func(update collector.Progress) {
if job, ok := s.jobManager.GetJob(jobID); !ok || isTerminalCollectStatus(job.Status) {
return
}
status := update.Status
if status == "" {
status = CollectStatusRunning
}
s.jobManager.UpdateJobStatus(jobID, status, update.Progress, "")
if update.CurrentPhase != "" || update.ETASeconds > 0 {
s.jobManager.UpdateJobETA(jobID, update.CurrentPhase, update.ETASeconds)
}
if update.DebugInfo != nil {
debugInfo := &CollectDebugInfo{
AdaptiveThrottled: update.DebugInfo.AdaptiveThrottled,
SnapshotWorkers: update.DebugInfo.SnapshotWorkers,
PrefetchWorkers: update.DebugInfo.PrefetchWorkers,
PrefetchEnabled: update.DebugInfo.PrefetchEnabled,
}
if len(update.DebugInfo.PhaseTelemetry) > 0 {
debugInfo.PhaseTelemetry = make([]CollectPhaseTelemetry, 0, len(update.DebugInfo.PhaseTelemetry))
for _, item := range update.DebugInfo.PhaseTelemetry {
debugInfo.PhaseTelemetry = append(debugInfo.PhaseTelemetry, CollectPhaseTelemetry{
Phase: item.Phase,
Requests: item.Requests,
Errors: item.Errors,
ErrorRate: item.ErrorRate,
AvgMS: item.AvgMS,
P95MS: item.P95MS,
})
}
}
s.jobManager.UpdateJobDebugInfo(jobID, debugInfo)
}
if len(update.ActiveModules) > 0 || len(update.ModuleScores) > 0 {
activeModules := make([]CollectModuleStatus, 0, len(update.ActiveModules))
for _, module := range update.ActiveModules {
activeModules = append(activeModules, CollectModuleStatus{
Name: module.Name,
Score: module.Score,
Active: true,
})
}
moduleScores := make([]CollectModuleStatus, 0, len(update.ModuleScores))
for _, module := range update.ModuleScores {
moduleScores = append(moduleScores, CollectModuleStatus{
Name: module.Name,
Score: module.Score,
Active: module.Active,
Priority: module.Priority,
})
}
s.jobManager.UpdateJobModules(jobID, activeModules, moduleScores)
}
if update.Message != "" {
s.jobManager.AppendJobLog(jobID, update.Message)
}
}
result, err := connector.Collect(ctx, toCollectorRequest(req), emitProgress)
if err != nil {
if ctx.Err() != nil {
return
}
if job, ok := s.jobManager.GetJob(jobID); !ok || isTerminalCollectStatus(job.Status) {
return
}
s.jobManager.UpdateJobStatus(jobID, CollectStatusFailed, 100, err.Error())
s.jobManager.AppendJobLog(jobID, "Сбор завершен с ошибкой")
return
}
if job, ok := s.jobManager.GetJob(jobID); !ok || isTerminalCollectStatus(job.Status) {
return
}
applyCollectSourceMetadata(result, req)
s.jobManager.UpdateJobStatus(jobID, CollectStatusSuccess, 100, "")
s.jobManager.AppendJobLog(jobID, "Сбор завершен")
s.SetResult(result)
s.SetDetectedVendor(req.Protocol)
if job, ok := s.jobManager.GetJob(jobID); ok {
s.SetRawExport(newRawExportFromLiveCollect(result, req, job.Logs))
}
}()
}
func validateCollectRequest(req CollectRequest) error {
if strings.TrimSpace(req.Host) == "" {
return fmt.Errorf("field 'host' is required")
}
switch req.Protocol {
case "redfish", "ipmi":
default:
return fmt.Errorf("field 'protocol' must be one of: redfish, ipmi")
}
if req.Port < 1 || req.Port > 65535 {
return fmt.Errorf("field 'port' must be in range 1..65535")
}
if strings.TrimSpace(req.Username) == "" {
return fmt.Errorf("field 'username' is required")
}
switch req.AuthType {
case "password":
if strings.TrimSpace(req.Password) == "" {
return fmt.Errorf("field 'password' is required when auth_type=password")
}
case "token":
if strings.TrimSpace(req.Token) == "" {
return fmt.Errorf("field 'token' is required when auth_type=token")
}
default:
return fmt.Errorf("field 'auth_type' must be one of: password, token")
}
switch req.TLSMode {
case "strict", "insecure":
default:
return fmt.Errorf("field 'tls_mode' must be one of: strict, insecure")
}
return nil
}
var collectJobIDPattern = regexp.MustCompile(`^job_[a-zA-Z0-9_-]{8,}$`)
func isValidCollectJobID(id string) bool {
return collectJobIDPattern.MatchString(id)
}
func generateJobID() string {
buf := make([]byte, 8)
if _, err := rand.Read(buf); err != nil {
return fmt.Sprintf("job_%d", time.Now().UnixNano())
}
return fmt.Sprintf("job_%x", buf)
}
func applyArchiveSourceMetadata(result *models.AnalysisResult) {
if result == nil {
return
}
result.SourceType = models.SourceTypeArchive
result.Protocol = ""
result.TargetHost = ""
if result.CollectedAt.IsZero() {
result.CollectedAt = inferArchiveCollectedAt(result)
}
}
func inferArchiveCollectedAt(result *models.AnalysisResult) time.Time {
if result == nil {
return time.Now().UTC()
}
var latestReliable time.Time
var latestAny time.Time
for _, event := range result.Events {
if event.Timestamp.IsZero() {
continue
}
// Drop obviously bad epochs from broken RTC logs.
if event.Timestamp.Year() < 2000 {
continue
}
if latestAny.IsZero() || event.Timestamp.After(latestAny) {
latestAny = event.Timestamp
}
if !isReliableCollectedAtEvent(event) {
continue
}
if latestReliable.IsZero() || event.Timestamp.After(latestReliable) {
latestReliable = event.Timestamp
}
}
if !latestReliable.IsZero() {
return latestReliable.UTC()
}
if !latestAny.IsZero() {
return latestAny.UTC()
}
if fromFilename, ok := inferCollectedAtFromFilename(result.Filename); ok {
return fromFilename.UTC()
}
return time.Now().UTC()
}
func isReliableCollectedAtEvent(event models.Event) bool {
// component.log-derived synthetic states are created "at parse time"
// and must not override real log timestamps.
src := strings.ToLower(strings.TrimSpace(event.Source))
etype := strings.ToLower(strings.TrimSpace(event.EventType))
stype := strings.ToLower(strings.TrimSpace(event.SensorType))
if etype == "fan status" && (src == "fan" || stype == "fan") {
return false
}
if etype == "memory status" && (src == "memory" || stype == "memory") {
return false
}
return true
}
var (
filenameDateTimeRegex = regexp.MustCompile(`(?i)(\d{8})[-_](\d{4})(\d{2})?`)
filenameDateRegex = regexp.MustCompile(`(?i)(\d{4})-(\d{2})-(\d{2})`)
)
func inferCollectedAtFromFilename(name string) (time.Time, bool) {
base := strings.TrimSpace(filepath.Base(name))
if base == "" {
return time.Time{}, false
}
if m := filenameDateTimeRegex.FindStringSubmatch(base); len(m) == 4 {
datePart := m[1]
timePart := m[2]
if strings.TrimSpace(m[3]) != "" {
timePart += m[3]
} else {
timePart += "00"
}
if ts, err := parser.ParseInDefaultArchiveLocation("20060102 150405", datePart+" "+timePart); err == nil {
return ts, true
}
}
if m := filenameDateRegex.FindStringSubmatch(base); len(m) == 4 {
datePart := m[1] + "-" + m[2] + "-" + m[3]
if ts, err := parser.ParseInDefaultArchiveLocation("2006-01-02 15:04:05", datePart+" 00:00:00"); err == nil {
return ts, true
}
}
return time.Time{}, false
}
func inferRawExportCollectedAt(result *models.AnalysisResult, pkg *RawExportPackage) time.Time {
if result != nil && !result.CollectedAt.IsZero() {
return result.CollectedAt.UTC()
}
if pkg != nil {
if !pkg.CollectedAtHint.IsZero() {
return pkg.CollectedAtHint.UTC()
}
if _, finishedAt, ok := collectLogTimeBounds(pkg.Source.CollectLogs); ok {
return finishedAt.UTC()
}
if !pkg.ExportedAt.IsZero() {
return pkg.ExportedAt.UTC()
}
}
return time.Now().UTC()
}
func applyCollectSourceMetadata(result *models.AnalysisResult, req CollectRequest) {
if result == nil {
return
}
result.SourceType = models.SourceTypeAPI
result.Protocol = req.Protocol
result.TargetHost = req.Host
if strings.TrimSpace(result.SourceTimezone) == "" && result.RawPayloads != nil {
if tz, ok := result.RawPayloads["source_timezone"].(string); ok {
result.SourceTimezone = strings.TrimSpace(tz)
}
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
result.Filename = fmt.Sprintf("%s://%s", req.Protocol, req.Host)
}
}
func toCollectorRequest(req CollectRequest) collector.Request {
return collector.Request{
Host: req.Host,
Protocol: req.Protocol,
Port: req.Port,
Username: req.Username,
AuthType: req.AuthType,
Password: req.Password,
Token: req.Token,
TLSMode: req.TLSMode,
PowerOnIfHostOff: req.PowerOnIfHostOff,
StopHostAfterCollect: req.StopHostAfterCollect,
DebugPayloads: req.DebugPayloads,
}
}
func looksLikeJSONSnapshot(filename string, payload []byte) bool {
ext := strings.ToLower(filepath.Ext(filename))
if ext == ".json" {
return true
}
trimmed := bytes.TrimSpace(payload)
return len(trimmed) > 0 && (trimmed[0] == '{' || trimmed[0] == '[')
}
func parseUploadedSnapshot(payload []byte) (*models.AnalysisResult, error) {
var result models.AnalysisResult
if err := json.Unmarshal(payload, &result); err != nil {
return nil, err
}
if result.Hardware == nil && len(result.Events) == 0 && len(result.Sensors) == 0 && len(result.FRU) == 0 {
return nil, fmt.Errorf("unsupported snapshot format")
}
if strings.TrimSpace(result.SourceType) == "" {
if result.Protocol != "" {
result.SourceType = models.SourceTypeAPI
} else {
result.SourceType = models.SourceTypeArchive
}
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
result.Filename = "uploaded_snapshot.json"
}
return &result, nil
}
func (s *Server) getCollector(protocol string) (collector.Connector, bool) {
if s.collectors == nil {
s.collectors = collector.NewDefaultRegistry()
}
return s.collectors.Get(protocol)
}
func jsonResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(data)
}
func jsonError(w http.ResponseWriter, message string, code int) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
json.NewEncoder(w).Encode(map[string]string{"error": message})
}
// isGPUDevice checks if device class indicates a GPU
func isGPUDevice(deviceClass string) bool {
// Standard PCI class names
if deviceClass == "VGA" || deviceClass == "3D Controller" || deviceClass == "Display" {
return true
}
// Known GPU model patterns
gpuPatterns := []string{
"L40", "A100", "A10", "A16", "A30", "H100", "H200", "V100",
"RTX", "GTX", "Quadro", "Tesla",
"Instinct", "Radeon",
"AST2500", "AST2600", // ASPEED BMC VGA
}
upperClass := strings.ToUpper(deviceClass)
for _, pattern := range gpuPatterns {
if strings.Contains(upperClass, strings.ToUpper(pattern)) {
return true
}
}
return false
}
func exportFilename(result *models.AnalysisResult, ext string) string {
date := time.Now().UTC().Format("2006-01-02")
model := "SERVER MODEL"
sn := "SERVER SN"
if result != nil {
if !result.CollectedAt.IsZero() {
date = result.CollectedAt.UTC().Format("2006-01-02")
}
if result.Hardware != nil {
if m := strings.TrimSpace(result.Hardware.BoardInfo.ProductName); m != "" {
model = m
}
if serial := strings.TrimSpace(result.Hardware.BoardInfo.SerialNumber); serial != "" {
sn = serial
}
}
}
model = sanitizeFilenamePart(model)
sn = sanitizeFilenamePart(sn)
ext = strings.TrimPrefix(strings.TrimSpace(ext), ".")
if ext == "" {
ext = "json"
}
return fmt.Sprintf("%s (%s) - %s.%s", date, model, sn, ext)
}
func sanitizeFilenamePart(v string) string {
v = strings.TrimSpace(v)
if v == "" {
return "-"
}
replacer := strings.NewReplacer(
"/", "_",
"\\", "_",
":", "_",
"*", "_",
"?", "_",
"\"", "_",
"<", "_",
">", "_",
"|", "_",
"\n", " ",
"\r", " ",
"\t", " ",
)
v = replacer.Replace(v)
v = strings.Join(strings.Fields(v), " ")
if v == "" {
return "-"
}
return v
}