Redfish snapshot/export overhaul and portable release build

This commit is contained in:
Mikhail Chusavitin
2026-02-04 19:43:51 +03:00
parent c89ee0118f
commit bb48b03677
11 changed files with 1357 additions and 110 deletions

View File

@@ -6,7 +6,7 @@ COMMIT=$(shell git rev-parse --short HEAD 2>/dev/null || echo "none")
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT)"
build:
go build $(LDFLAGS) -o bin/$(BINARY_NAME) ./cmd/logpile
CGO_ENABLED=0 go build $(LDFLAGS) -o bin/$(BINARY_NAME) ./cmd/logpile
run: build
./bin/$(BINARY_NAME)
@@ -19,11 +19,11 @@ test:
# Cross-platform builds
build-all: clean
GOOS=linux GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-amd64 ./cmd/logpile
GOOS=linux GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-arm64 ./cmd/logpile
GOOS=darwin GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-amd64 ./cmd/logpile
GOOS=darwin GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-arm64 ./cmd/logpile
GOOS=windows GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-windows-amd64.exe ./cmd/logpile
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-amd64 ./cmd/logpile
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-arm64 ./cmd/logpile
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-amd64 ./cmd/logpile
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-arm64 ./cmd/logpile
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-windows-amd64.exe ./cmd/logpile
dev:
go run ./cmd/logpile

View File

@@ -1,6 +1,7 @@
package main
import (
"bufio"
"flag"
"fmt"
"log"
@@ -21,6 +22,7 @@ var (
)
func main() {
holdOnCrash := flag.Bool("hold-on-crash", runtime.GOOS == "windows", "Wait for Enter on crash to keep console open")
port := flag.Int("port", 8082, "HTTP server port")
file := flag.String("file", "", "Pre-load archive file")
showVersion := flag.Bool("version", false, "Show version")
@@ -54,11 +56,22 @@ func main() {
}()
}
if err := srv.Run(); err != nil {
log.Fatalf("Server error: %v", err)
if err := runServer(srv); err != nil {
log.Printf("FATAL: %v", err)
maybeWaitForCrashInput(*holdOnCrash)
os.Exit(1)
}
}
func runServer(srv *server.Server) (runErr error) {
defer func() {
if recovered := recover(); recovered != nil {
runErr = fmt.Errorf("panic: %v", recovered)
}
}()
return srv.Run()
}
// openBrowser opens the default browser with the given URL
func openBrowser(url string) {
var cmd *exec.Cmd
@@ -76,3 +89,23 @@ func openBrowser(url string) {
log.Printf("Failed to open browser: %v", err)
}
}
func maybeWaitForCrashInput(enabled bool) {
if !enabled || !isInteractiveConsole() {
return
}
fmt.Fprintln(os.Stderr, "\nApplication crashed. Press Enter to close...")
_, _ = bufio.NewReader(os.Stdin).ReadString('\n')
}
func isInteractiveConsole() bool {
stdinInfo, err := os.Stdin.Stat()
if err != nil {
return false
}
stderrInfo, err := os.Stderr.Stat()
if err != nil {
return false
}
return (stdinInfo.Mode()&os.ModeCharDevice) != 0 && (stderrInfo.Mode()&os.ModeCharDevice) != 0
}

24
docs/releases/v1.2.1.md Normal file
View File

@@ -0,0 +1,24 @@
# LOGPile v1.2.1
Release date: 2026-02-04
## Highlights
- Redfish collection significantly expanded: dynamic Systems/Chassis/Managers discovery, PSU/GPU/PCIe inventory mapping, improved NVMe and storage parsing (including SimpleStorage and chassis drive fallbacks).
- Added Redfish snapshot support with broad raw Redfish tree capture for future offline analysis.
- Upload flow now accepts JSON snapshots in addition to archives, enabling offline re-open of live Redfish collections.
- Export UX improved:
- Export filenames now follow `YYYY-MM-DD (SERVER MODEL) - SERVER SN`.
- TXT export now outputs tabular sections matching web UI views (no raw JSON dump).
- Live API UI improvements: parser/file badges for Redfish sessions and clearer upload format messaging.
- Redfish progress logs are more informative (snapshot stage and active top-level roots).
- Build/distribution hardening:
- Cross-platform builds via `make build-all`.
- `CGO_ENABLED=0` for more portable single-binary distribution.
- Crash hold option to keep console open for debugging (`-hold-on-crash`, enabled by default on Windows).
## Artifacts
- `bin/logpile-linux-amd64`
- `bin/logpile-linux-arm64`
- `bin/logpile-darwin-amd64`
- `bin/logpile-darwin-arm64`
- `bin/logpile-windows-amd64.exe`

File diff suppressed because it is too large Load Diff

View File

@@ -70,11 +70,59 @@ func TestRedfishConnectorCollect(t *testing.T) {
"CapacityGB": 960,
"SerialNumber": "SN123",
})
register("/redfish/v1/Systems/1/PCIeDevices", map[string]interface{}{
"Members": []map[string]string{
{"@odata.id": "/redfish/v1/Systems/1/PCIeDevices/GPU1"},
},
})
register("/redfish/v1/Systems/1/PCIeDevices/GPU1", map[string]interface{}{
"Id": "GPU1",
"Name": "NVIDIA H100",
"Model": "NVIDIA H100 PCIe",
"Manufacturer": "NVIDIA",
"SerialNumber": "GPU-SN-001",
"PCIeFunctions": map[string]interface{}{
"@odata.id": "/redfish/v1/Systems/1/PCIeDevices/GPU1/PCIeFunctions",
},
})
register("/redfish/v1/Systems/1/PCIeDevices/GPU1/PCIeFunctions", map[string]interface{}{
"Members": []map[string]string{
{"@odata.id": "/redfish/v1/Systems/1/PCIeFunctions/GPU1F0"},
},
})
register("/redfish/v1/Systems/1/PCIeFunctions/GPU1F0", map[string]interface{}{
"FunctionId": "0000:65:00.0",
"VendorId": "0x10DE",
"DeviceId": "0x2331",
"ClassCode": "0x030200",
"CurrentLinkWidth": 16,
"CurrentLinkSpeed": "16.0 GT/s",
"MaxLinkWidth": 16,
"MaxLinkSpeed": "16.0 GT/s",
})
register("/redfish/v1/Chassis/1/NetworkAdapters", map[string]interface{}{
"Members": []map[string]string{
{"@odata.id": "/redfish/v1/Chassis/1/NetworkAdapters/1"},
},
})
register("/redfish/v1/Chassis/1/Power", map[string]interface{}{
"PowerSupplies": []map[string]interface{}{
{
"MemberId": "PSU1",
"Name": "PSU Slot 1",
"Model": "PWS-2K01A-1R",
"Manufacturer": "Delta",
"PowerCapacityWatts": 2000,
"PowerInputWatts": 1600,
"LastPowerOutputWatts": 1200,
"LineInputVoltage": 230,
"Status": map[string]interface{}{
"Health": "OK",
"State": "Enabled",
},
},
},
})
register("/redfish/v1/Chassis/1/NetworkAdapters/1", map[string]interface{}{
"Name": "Mellanox",
"Model": "ConnectX-6",
@@ -122,7 +170,33 @@ func TestRedfishConnectorCollect(t *testing.T) {
if len(result.Hardware.NetworkAdapters) != 1 {
t.Fatalf("expected one nic, got %d", len(result.Hardware.NetworkAdapters))
}
if len(result.Hardware.GPUs) != 1 {
t.Fatalf("expected one gpu, got %d", len(result.Hardware.GPUs))
}
if result.Hardware.GPUs[0].BDF != "0000:65:00.0" {
t.Fatalf("unexpected gpu BDF: %q", result.Hardware.GPUs[0].BDF)
}
if len(result.Hardware.PCIeDevices) != 1 {
t.Fatalf("expected one pcie device, got %d", len(result.Hardware.PCIeDevices))
}
if len(result.Hardware.PowerSupply) != 1 {
t.Fatalf("expected one psu, got %d", len(result.Hardware.PowerSupply))
}
if result.Hardware.PowerSupply[0].WattageW != 2000 {
t.Fatalf("unexpected psu wattage: %d", result.Hardware.PowerSupply[0].WattageW)
}
if len(result.Hardware.Firmware) == 0 {
t.Fatalf("expected firmware entries")
}
if result.RawPayloads == nil {
t.Fatalf("expected raw payloads")
}
treeAny, ok := result.RawPayloads["redfish_tree"]
if !ok {
t.Fatalf("expected redfish_tree in raw payloads")
}
tree, ok := treeAny.(map[string]interface{})
if !ok || len(tree) == 0 {
t.Fatalf("expected non-empty redfish_tree, got %#v", treeAny)
}
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"text/tabwriter"
"git.mchus.pro/mchus/logpile/internal/models"
)
@@ -125,13 +126,16 @@ func (e *Exporter) ExportTXT(w io.Writer) error {
return nil
}
fmt.Fprintf(w, "File: %s\n", e.result.Filename)
fmt.Fprintf(w, "File:\t%s\n", e.result.Filename)
fmt.Fprintf(w, "Source:\t%s\n", e.result.SourceType)
fmt.Fprintf(w, "Protocol:\t%s\n", e.result.Protocol)
fmt.Fprintf(w, "Target:\t%s\n", e.result.TargetHost)
fmt.Fprintln(w)
// Server model and serial number
if e.result.Hardware != nil && e.result.Hardware.BoardInfo.ProductName != "" {
fmt.Fprintln(w)
fmt.Fprintf(w, "Server Model: %s\n", e.result.Hardware.BoardInfo.ProductName)
fmt.Fprintf(w, "Serial Number: %s\n", e.result.Hardware.BoardInfo.SerialNumber)
fmt.Fprintf(w, "Server Model:\t%s\n", e.result.Hardware.BoardInfo.ProductName)
fmt.Fprintf(w, "Serial Number:\t%s\n", e.result.Hardware.BoardInfo.SerialNumber)
}
fmt.Fprintln(w)
@@ -139,118 +143,172 @@ func (e *Exporter) ExportTXT(w io.Writer) error {
if e.result.Hardware != nil {
hw := e.result.Hardware
// Firmware
// Firmware tab
if len(hw.Firmware) > 0 {
fmt.Fprintln(w, "FIRMWARE VERSIONS")
fmt.Fprintln(w, "-----------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Component\tVersion\tBuild Time")
for _, fw := range hw.Firmware {
fmt.Fprintf(w, " %s: %s\n", fw.DeviceName, fw.Version)
fmt.Fprintf(tw, "%s\t%s\t%s\n", fw.DeviceName, fw.Version, fw.BuildTime)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// CPUs
// CPU tab
if len(hw.CPUs) > 0 {
fmt.Fprintln(w, "PROCESSORS")
fmt.Fprintln(w, "----------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Socket\tModel\tCores\tThreads\tFreq MHz\tTurbo MHz\tTDP W\tPPIN/SN")
for _, cpu := range hw.CPUs {
fmt.Fprintf(w, " Socket %d: %s\n", cpu.Socket, cpu.Model)
fmt.Fprintf(w, " Cores: %d, Threads: %d, Freq: %d MHz (Turbo: %d MHz)\n",
cpu.Cores, cpu.Threads, cpu.FrequencyMHz, cpu.MaxFreqMHz)
fmt.Fprintf(w, " TDP: %dW, L3 Cache: %d KB\n", cpu.TDP, cpu.L3CacheKB)
id := cpu.SerialNumber
if id == "" {
id = cpu.PPIN
}
fmt.Fprintf(tw, "CPU%d\t%s\t%d\t%d\t%d\t%d\t%d\t%s\n",
cpu.Socket, cpu.Model, cpu.Cores, cpu.Threads, cpu.FrequencyMHz, cpu.MaxFreqMHz, cpu.TDP, id)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Memory
// Memory tab
if len(hw.Memory) > 0 {
fmt.Fprintln(w, "MEMORY")
fmt.Fprintln(w, "------")
totalMB := 0
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tPresent\tSize MB\tType\tSpeed MHz\tVendor\tModel/PN\tSerial\tStatus")
for _, mem := range hw.Memory {
totalMB += mem.SizeMB
location := mem.Location
if location == "" {
location = mem.Slot
}
fmt.Fprintf(tw, "%s\t%t\t%d\t%s\t%d\t%s\t%s\t%s\t%s\n",
location, mem.Present, mem.SizeMB, mem.Type, mem.CurrentSpeedMHz, mem.Manufacturer, mem.PartNumber, mem.SerialNumber, mem.Status)
}
fmt.Fprintf(w, " Total: %d GB (%d DIMMs)\n", totalMB/1024, len(hw.Memory))
fmt.Fprintf(w, " Type: %s @ %d MHz\n", hw.Memory[0].Type, hw.Memory[0].CurrentSpeedMHz)
fmt.Fprintf(w, " Manufacturer: %s\n", hw.Memory[0].Manufacturer)
_ = tw.Flush()
fmt.Fprintln(w)
}
// Storage
// Power tab
if len(hw.PowerSupply) > 0 {
fmt.Fprintln(w, "POWER SUPPLIES")
fmt.Fprintln(w, "--------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tPresent\tVendor\tModel\tWattage W\tInput W\tOutput W\tInput V\tTemp C\tStatus\tSerial")
for _, psu := range hw.PowerSupply {
fmt.Fprintf(tw, "%s\t%t\t%s\t%s\t%d\t%d\t%d\t%.0f\t%d\t%s\t%s\n",
psu.Slot, psu.Present, psu.Vendor, psu.Model, psu.WattageW, psu.InputPowerW, psu.OutputPowerW, psu.InputVoltage, psu.TemperatureC, psu.Status, psu.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Storage tab
if len(hw.Storage) > 0 {
fmt.Fprintln(w, "STORAGE")
fmt.Fprintln(w, "-------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tPresent\tType\tInterface\tModel\tSize GB\tVendor\tFirmware\tSerial")
for _, stor := range hw.Storage {
fmt.Fprintf(w, " %s: %s (%d GB) - S/N: %s\n",
stor.Slot, stor.Model, stor.SizeGB, stor.SerialNumber)
fmt.Fprintf(tw, "%s\t%t\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n",
stor.Slot, stor.Present, stor.Type, stor.Interface, stor.Model, stor.SizeGB, stor.Manufacturer, stor.Firmware, stor.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// PCIe
// GPU tab
if len(hw.GPUs) > 0 {
fmt.Fprintln(w, "GPUS")
fmt.Fprintln(w, "----")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tModel\tVendor\tBDF\tPCIe\tSerial\tStatus")
for _, gpu := range hw.GPUs {
link := fmt.Sprintf("x%d %s", gpu.CurrentLinkWidth, gpu.CurrentLinkSpeed)
if gpu.MaxLinkWidth > 0 || gpu.MaxLinkSpeed != "" {
link = fmt.Sprintf("%s / x%d %s", link, gpu.MaxLinkWidth, gpu.MaxLinkSpeed)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
gpu.Slot, gpu.Model, gpu.Manufacturer, gpu.BDF, link, gpu.SerialNumber, gpu.Status)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Network tab
if len(hw.NetworkAdapters) > 0 {
fmt.Fprintln(w, "NETWORK ADAPTERS")
fmt.Fprintln(w, "----------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tLocation\tModel\tVendor\tPorts\tType\tStatus\tSerial")
for _, nic := range hw.NetworkAdapters {
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n",
nic.Slot, nic.Location, nic.Model, nic.Vendor, nic.PortCount, nic.PortType, nic.Status, nic.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Device inventory tab
if len(hw.PCIeDevices) > 0 {
fmt.Fprintln(w, "PCIE DEVICES")
fmt.Fprintln(w, "------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tBDF\tClass\tVendor\tVID:DID\tLink\tSerial")
for _, pcie := range hw.PCIeDevices {
fmt.Fprintf(w, " %s: %s (x%d %s)\n",
pcie.Slot, pcie.DeviceClass, pcie.LinkWidth, pcie.LinkSpeed)
if pcie.SerialNumber != "" {
fmt.Fprintf(w, " S/N: %s\n", pcie.SerialNumber)
}
if len(pcie.MACAddresses) > 0 {
fmt.Fprintf(w, " MACs: %v\n", pcie.MACAddresses)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%04x:%04x\tx%d %s / x%d %s\t%s\n",
pcie.Slot, pcie.BDF, pcie.DeviceClass, pcie.Manufacturer, pcie.VendorID, pcie.DeviceID,
pcie.LinkWidth, pcie.LinkSpeed, pcie.MaxLinkWidth, pcie.MaxLinkSpeed, pcie.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
}
// Sensors summary
// Sensors tab
if len(e.result.Sensors) > 0 {
fmt.Fprintln(w, "SENSOR READINGS")
fmt.Fprintln(w, "---------------")
// Group by type
byType := make(map[string][]models.SensorReading)
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Type\tName\tValue\tUnit\tRaw\tStatus")
for _, s := range e.result.Sensors {
byType[s.Type] = append(byType[s.Type], s)
}
for stype, sensors := range byType {
fmt.Fprintf(w, "\n %s:\n", stype)
for _, s := range sensors {
if s.Value != 0 {
fmt.Fprintf(w, " %s: %.0f %s [%s]\n", s.Name, s.Value, s.Unit, s.Status)
} else if s.RawValue != "" {
fmt.Fprintf(w, " %s: %s [%s]\n", s.Name, s.RawValue, s.Status)
}
}
fmt.Fprintf(tw, "%s\t%s\t%.0f\t%s\t%s\t%s\n", s.Type, s.Name, s.Value, s.Unit, s.RawValue, s.Status)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// FRU summary
// Serials/FRU tab
if len(e.result.FRU) > 0 {
fmt.Fprintln(w, "FRU COMPONENTS")
fmt.Fprintln(w, "--------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Description\tManufacturer\tProduct\tSerial\tPart Number")
for _, fru := range e.result.FRU {
name := fru.ProductName
if name == "" {
name = fru.Description
}
fmt.Fprintf(w, " %s\n", name)
if fru.SerialNumber != "" {
fmt.Fprintf(w, " Serial: %s\n", fru.SerialNumber)
}
if fru.Manufacturer != "" {
fmt.Fprintf(w, " Manufacturer: %s\n", fru.Manufacturer)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", fru.Description, fru.Manufacturer, name, fru.SerialNumber, fru.PartNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Events summary
// Events tab
fmt.Fprintf(w, "EVENTS: %d total\n", len(e.result.Events))
if len(e.result.Events) > 0 {
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Time\tSeverity\tSource\tType\tName\tDescription")
for _, ev := range e.result.Events {
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\n",
ev.Timestamp.Format("2006-01-02 15:04:05"), ev.Severity, ev.Source, ev.SensorType, ev.SensorName, ev.Description)
}
_ = tw.Flush()
}
var critical, warning, info int
for _, ev := range e.result.Events {
switch ev.Severity {

View File

@@ -14,6 +14,7 @@ type AnalysisResult struct {
Protocol string `json:"protocol,omitempty"` // redfish | ipmi
TargetHost string `json:"target_host,omitempty"` // BMC host for live collect
CollectedAt time.Time `json:"collected_at,omitempty"` // Collection/upload timestamp
RawPayloads map[string]any `json:"raw_payloads,omitempty"` // Additional source payloads (e.g. Redfish tree)
Events []Event `json:"events"`
FRU []FRUInfo `json:"fru"`
Sensors []SensorReading `json:"sensors"`

View File

@@ -1,13 +1,16 @@
package server
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
@@ -55,23 +58,48 @@ func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
}
defer file.Close()
// Parse archive
p := parser.NewBMCParser()
if err := p.ParseFromReader(file, header.Filename); err != nil {
jsonError(w, "Failed to parse archive: "+err.Error(), http.StatusBadRequest)
payload, err := io.ReadAll(file)
if err != nil {
jsonError(w, "Failed to read file", http.StatusBadRequest)
return
}
result := p.Result()
applyArchiveSourceMetadata(result)
var (
result *models.AnalysisResult
vendor string
)
if looksLikeJSONSnapshot(header.Filename, payload) {
snapshotResult, snapshotErr := parseUploadedSnapshot(payload)
if snapshotErr != nil {
jsonError(w, "Failed to parse snapshot: "+snapshotErr.Error(), http.StatusBadRequest)
return
}
result = snapshotResult
vendor = strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
} else {
// Parse archive
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), header.Filename); err != nil {
jsonError(w, "Failed to parse archive: "+err.Error(), http.StatusBadRequest)
return
}
result = p.Result()
applyArchiveSourceMetadata(result)
vendor = p.DetectedVendor()
}
s.SetResult(result)
s.SetDetectedVendor(p.DetectedVendor())
s.SetDetectedVendor(vendor)
jsonResponse(w, map[string]interface{}{
"status": "ok",
"message": "File uploaded and parsed successfully",
"filename": header.Filename,
"vendor": p.DetectedVendor(),
"vendor": vendor,
"stats": map[string]int{
"events": len(result.Events),
"sensors": len(result.Sensors),
@@ -529,7 +557,7 @@ func (s *Server) handleExportCSV(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
w.Header().Set("Content-Disposition", "attachment; filename=serials.csv")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "csv")))
exp := exporter.New(result)
exp.ExportCSV(w)
@@ -539,7 +567,7 @@ func (s *Server) handleExportJSON(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", "attachment; filename=report.json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "json")))
exp := exporter.New(result)
exp.ExportJSON(w)
@@ -549,7 +577,7 @@ func (s *Server) handleExportTXT(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Content-Disposition", "attachment; filename=report.txt")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "txt")))
exp := exporter.New(result)
exp.ExportTXT(w)
@@ -682,7 +710,7 @@ func (s *Server) startCollectionJob(jobID string, req CollectRequest) {
s.jobManager.UpdateJobStatus(jobID, CollectStatusSuccess, 100, "")
s.jobManager.AppendJobLog(jobID, "Сбор завершен")
s.SetResult(result)
s.SetDetectedVendor("")
s.SetDetectedVendor(req.Protocol)
}()
}
@@ -754,6 +782,9 @@ func applyCollectSourceMetadata(result *models.AnalysisResult, req CollectReques
result.Protocol = req.Protocol
result.TargetHost = req.Host
result.CollectedAt = time.Now().UTC()
if strings.TrimSpace(result.Filename) == "" {
result.Filename = fmt.Sprintf("%s://%s", req.Protocol, req.Host)
}
}
func toCollectorRequest(req CollectRequest) collector.Request {
@@ -769,6 +800,39 @@ func toCollectorRequest(req CollectRequest) collector.Request {
}
}
func looksLikeJSONSnapshot(filename string, payload []byte) bool {
ext := strings.ToLower(filepath.Ext(filename))
if ext == ".json" {
return true
}
trimmed := bytes.TrimSpace(payload)
return len(trimmed) > 0 && (trimmed[0] == '{' || trimmed[0] == '[')
}
func parseUploadedSnapshot(payload []byte) (*models.AnalysisResult, error) {
var result models.AnalysisResult
if err := json.Unmarshal(payload, &result); err != nil {
return nil, err
}
if result.Hardware == nil && len(result.Events) == 0 && len(result.Sensors) == 0 && len(result.FRU) == 0 {
return nil, fmt.Errorf("unsupported snapshot format")
}
if strings.TrimSpace(result.SourceType) == "" {
if result.Protocol != "" {
result.SourceType = models.SourceTypeAPI
} else {
result.SourceType = models.SourceTypeArchive
}
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
result.Filename = "uploaded_snapshot.json"
}
return &result, nil
}
func (s *Server) getCollector(protocol string) (collector.Connector, bool) {
if s.collectors == nil {
s.collectors = collector.NewDefaultRegistry()
@@ -808,3 +872,59 @@ func isGPUDevice(deviceClass string) bool {
}
return false
}
func exportFilename(result *models.AnalysisResult, ext string) string {
date := time.Now().UTC().Format("2006-01-02")
model := "SERVER MODEL"
sn := "SERVER SN"
if result != nil {
if !result.CollectedAt.IsZero() {
date = result.CollectedAt.UTC().Format("2006-01-02")
}
if result.Hardware != nil {
if m := strings.TrimSpace(result.Hardware.BoardInfo.ProductName); m != "" {
model = m
}
if serial := strings.TrimSpace(result.Hardware.BoardInfo.SerialNumber); serial != "" {
sn = serial
}
}
}
model = sanitizeFilenamePart(model)
sn = sanitizeFilenamePart(sn)
ext = strings.TrimPrefix(strings.TrimSpace(ext), ".")
if ext == "" {
ext = "txt"
}
return fmt.Sprintf("%s (%s) - %s.%s", date, model, sn, ext)
}
func sanitizeFilenamePart(v string) string {
v = strings.TrimSpace(v)
if v == "" {
return "-"
}
replacer := strings.NewReplacer(
"/", "_",
"\\", "_",
":", "_",
"*", "_",
"?", "_",
"\"", "_",
"<", "_",
">", "_",
"|", "_",
"\n", " ",
"\r", " ",
"\t", " ",
)
v = replacer.Replace(v)
v = strings.Join(strings.Fields(v), " ")
if v == "" {
return "-"
}
return v
}

View File

@@ -154,6 +154,89 @@ func TestCollectStatusNotFoundSmoke(t *testing.T) {
assertJSONError(t, resp, "Collect job not found")
}
func TestUploadRedfishSnapshotJSON(t *testing.T) {
_, ts := newFlowTestServer()
defer ts.Close()
snapshot := `{
"filename": "redfish://bmc01.local",
"source_type": "api",
"protocol": "redfish",
"target_host": "bmc01.local",
"hardware": {
"storage": [
{
"slot": "Drive1",
"type": "NVMe",
"model": "KIOXIA CD8",
"size_gb": 3840,
"serial_number": "SN-NVME-1",
"present": true
}
]
},
"raw_payloads": {
"redfish_tree": {
"/redfish/v1": {"Name": "ServiceRoot"}
}
}
}`
reqBody := &bytes.Buffer{}
writer := multipart.NewWriter(reqBody)
part, err := writer.CreateFormFile("archive", "snapshot.json")
if err != nil {
t.Fatalf("create form file: %v", err)
}
if _, err := part.Write([]byte(snapshot)); err != nil {
t.Fatalf("write snapshot body: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("close multipart writer: %v", err)
}
uploadReq, err := http.NewRequest(http.MethodPost, ts.URL+"/api/upload", reqBody)
if err != nil {
t.Fatalf("build upload request: %v", err)
}
uploadReq.Header.Set("Content-Type", writer.FormDataContentType())
uploadResp, err := http.DefaultClient.Do(uploadReq)
if err != nil {
t.Fatalf("upload request failed: %v", err)
}
defer uploadResp.Body.Close()
if uploadResp.StatusCode != http.StatusOK {
t.Fatalf("expected 200 from /api/upload, got %d", uploadResp.StatusCode)
}
var uploadPayload map[string]interface{}
if err := json.NewDecoder(uploadResp.Body).Decode(&uploadPayload); err != nil {
t.Fatalf("decode upload response: %v", err)
}
if uploadPayload["vendor"] != "redfish" {
t.Fatalf("expected vendor redfish, got %v", uploadPayload["vendor"])
}
statusResp, err := http.Get(ts.URL + "/api/status")
if err != nil {
t.Fatalf("status request failed: %v", err)
}
defer statusResp.Body.Close()
var statusPayload map[string]interface{}
if err := json.NewDecoder(statusResp.Body).Decode(&statusPayload); err != nil {
t.Fatalf("decode status response: %v", err)
}
if statusPayload["protocol"] != "redfish" {
t.Fatalf("expected protocol redfish, got %v", statusPayload["protocol"])
}
if statusPayload["filename"] != "redfish://bmc01.local" {
t.Fatalf("expected snapshot filename, got %v", statusPayload["filename"])
}
}
func buildTarArchive(t *testing.T, name, content string) []byte {
t.Helper()

View File

@@ -426,7 +426,11 @@ async function loadDataFromStatus() {
if (!payload.loaded) {
return;
}
await loadData(payload.vendor || '', payload.filename || '');
const vendor = payload.vendor || payload.protocol || '';
const filename = payload.filename || (payload.protocol && payload.target_host
? `${payload.protocol}://${payload.target_host}`
: '');
await loadData(vendor, filename);
} catch (err) {
console.error('Failed to load data after collect:', err);
}

View File

@@ -21,10 +21,10 @@
<div id="archive-source-content">
<div class="upload-area" id="drop-zone">
<p>Перетащите архив сюда или</p>
<input type="file" id="file-input" accept="application/gzip,application/x-gzip,application/x-tar,application/zip" hidden>
<p>Перетащите архив или JSON snapshot сюда</p>
<input type="file" id="file-input" accept="application/gzip,application/x-gzip,application/x-tar,application/zip,application/json,.json,.tar,.tar.gz,.tgz,.zip" hidden>
<button type="button" onclick="document.getElementById('file-input').click()">Выберите файл</button>
<p class="hint">Поддерживаемые форматы: tar.gz, zip</p>
<p class="hint">Поддерживаемые форматы: tar.gz, zip, json</p>
</div>
<div id="upload-status"></div>
<div id="parsers-info" class="parsers-info"></div>