Redfish snapshot/export overhaul and portable release build

This commit is contained in:
Mikhail Chusavitin
2026-02-04 19:43:51 +03:00
parent c89ee0118f
commit bb48b03677
11 changed files with 1357 additions and 110 deletions

View File

@@ -6,7 +6,7 @@ COMMIT=$(shell git rev-parse --short HEAD 2>/dev/null || echo "none")
LDFLAGS=-ldflags "-X main.version=$(VERSION) -X main.commit=$(COMMIT)"
build:
go build $(LDFLAGS) -o bin/$(BINARY_NAME) ./cmd/logpile
CGO_ENABLED=0 go build $(LDFLAGS) -o bin/$(BINARY_NAME) ./cmd/logpile
run: build
./bin/$(BINARY_NAME)
@@ -19,11 +19,11 @@ test:
# Cross-platform builds
build-all: clean
GOOS=linux GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-amd64 ./cmd/logpile
GOOS=linux GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-arm64 ./cmd/logpile
GOOS=darwin GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-amd64 ./cmd/logpile
GOOS=darwin GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-arm64 ./cmd/logpile
GOOS=windows GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-windows-amd64.exe ./cmd/logpile
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-amd64 ./cmd/logpile
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-linux-arm64 ./cmd/logpile
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-amd64 ./cmd/logpile
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-darwin-arm64 ./cmd/logpile
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build $(LDFLAGS) -o bin/$(BINARY_NAME)-windows-amd64.exe ./cmd/logpile
dev:
go run ./cmd/logpile

View File

@@ -1,6 +1,7 @@
package main
import (
"bufio"
"flag"
"fmt"
"log"
@@ -21,6 +22,7 @@ var (
)
func main() {
holdOnCrash := flag.Bool("hold-on-crash", runtime.GOOS == "windows", "Wait for Enter on crash to keep console open")
port := flag.Int("port", 8082, "HTTP server port")
file := flag.String("file", "", "Pre-load archive file")
showVersion := flag.Bool("version", false, "Show version")
@@ -54,11 +56,22 @@ func main() {
}()
}
if err := srv.Run(); err != nil {
log.Fatalf("Server error: %v", err)
if err := runServer(srv); err != nil {
log.Printf("FATAL: %v", err)
maybeWaitForCrashInput(*holdOnCrash)
os.Exit(1)
}
}
func runServer(srv *server.Server) (runErr error) {
defer func() {
if recovered := recover(); recovered != nil {
runErr = fmt.Errorf("panic: %v", recovered)
}
}()
return srv.Run()
}
// openBrowser opens the default browser with the given URL
func openBrowser(url string) {
var cmd *exec.Cmd
@@ -76,3 +89,23 @@ func openBrowser(url string) {
log.Printf("Failed to open browser: %v", err)
}
}
func maybeWaitForCrashInput(enabled bool) {
if !enabled || !isInteractiveConsole() {
return
}
fmt.Fprintln(os.Stderr, "\nApplication crashed. Press Enter to close...")
_, _ = bufio.NewReader(os.Stdin).ReadString('\n')
}
func isInteractiveConsole() bool {
stdinInfo, err := os.Stdin.Stat()
if err != nil {
return false
}
stderrInfo, err := os.Stderr.Stat()
if err != nil {
return false
}
return (stdinInfo.Mode()&os.ModeCharDevice) != 0 && (stderrInfo.Mode()&os.ModeCharDevice) != 0
}

24
docs/releases/v1.2.1.md Normal file
View File

@@ -0,0 +1,24 @@
# LOGPile v1.2.1
Release date: 2026-02-04
## Highlights
- Redfish collection significantly expanded: dynamic Systems/Chassis/Managers discovery, PSU/GPU/PCIe inventory mapping, improved NVMe and storage parsing (including SimpleStorage and chassis drive fallbacks).
- Added Redfish snapshot support with broad raw Redfish tree capture for future offline analysis.
- Upload flow now accepts JSON snapshots in addition to archives, enabling offline re-open of live Redfish collections.
- Export UX improved:
- Export filenames now follow `YYYY-MM-DD (SERVER MODEL) - SERVER SN`.
- TXT export now outputs tabular sections matching web UI views (no raw JSON dump).
- Live API UI improvements: parser/file badges for Redfish sessions and clearer upload format messaging.
- Redfish progress logs are more informative (snapshot stage and active top-level roots).
- Build/distribution hardening:
- Cross-platform builds via `make build-all`.
- `CGO_ENABLED=0` for more portable single-binary distribution.
- Crash hold option to keep console open for debugging (`-hold-on-crash`, enabled by default on Windows).
## Artifacts
- `bin/logpile-linux-amd64`
- `bin/logpile-linux-arm64`
- `bin/logpile-darwin-amd64`
- `bin/logpile-darwin-arm64`
- `bin/logpile-windows-amd64.exe`

View File

@@ -9,8 +9,11 @@ import (
"net/http"
"net/url"
"path"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"git.mchus.pro/mchus/logpile/internal/models"
@@ -45,40 +48,58 @@ func (c *RedfishConnector) Collect(ctx context.Context, req Request, emit Progre
return nil, fmt.Errorf("redfish service root: %w", err)
}
systemPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Systems", "/redfish/v1/Systems/1")
chassisPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Chassis", "/redfish/v1/Chassis/1")
managerPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Managers", "/redfish/v1/Managers/1")
primarySystem := firstPathOrDefault(systemPaths, "/redfish/v1/Systems/1")
primaryManager := firstPathOrDefault(managerPaths, "/redfish/v1/Managers/1")
if emit != nil {
emit(Progress{Status: "running", Progress: 30, Message: "Redfish: чтение данных системы..."})
}
systemDoc, err := c.getJSON(ctx, client, req, baseURL, "/redfish/v1/Systems/1")
systemDoc, err := c.getJSON(ctx, client, req, baseURL, primarySystem)
if err != nil {
return nil, fmt.Errorf("system info: %w", err)
}
biosDoc, _ := c.getJSON(ctx, client, req, baseURL, "/redfish/v1/Systems/1/Bios")
secureBootDoc, _ := c.getJSON(ctx, client, req, baseURL, "/redfish/v1/Systems/1/SecureBoot")
biosDoc, _ := c.getJSON(ctx, client, req, baseURL, joinPath(primarySystem, "/Bios"))
secureBootDoc, _ := c.getJSON(ctx, client, req, baseURL, joinPath(primarySystem, "/SecureBoot"))
if emit != nil {
emit(Progress{Status: "running", Progress: 55, Message: "Redfish: чтение CPU/RAM/Storage..."})
}
processors, _ := c.getCollectionMembers(ctx, client, req, baseURL, "/redfish/v1/Systems/1/Processors")
memory, _ := c.getCollectionMembers(ctx, client, req, baseURL, "/redfish/v1/Systems/1/Memory")
storageMembers, _ := c.getCollectionMembers(ctx, client, req, baseURL, "/redfish/v1/Systems/1/Storage")
storageDevices := c.collectStorage(ctx, client, req, baseURL, storageMembers)
processors, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(primarySystem, "/Processors"))
memory, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(primarySystem, "/Memory"))
storageDevices := c.collectStorage(ctx, client, req, baseURL, primarySystem)
if emit != nil {
emit(Progress{Status: "running", Progress: 80, Message: "Redfish: чтение сетевых и BMC настроек..."})
}
nics := c.collectNICs(ctx, client, req, baseURL)
managerDoc, _ := c.getJSON(ctx, client, req, baseURL, "/redfish/v1/Managers/1")
networkProtocolDoc, _ := c.getJSON(ctx, client, req, baseURL, "/redfish/v1/Managers/1/NetworkProtocol")
psus := c.collectPSUs(ctx, client, req, baseURL, chassisPaths)
pcieDevices := c.collectPCIeDevices(ctx, client, req, baseURL, systemPaths, chassisPaths)
gpus := c.collectGPUs(ctx, client, req, baseURL, systemPaths, chassisPaths)
nics := c.collectNICs(ctx, client, req, baseURL, chassisPaths)
managerDoc, _ := c.getJSON(ctx, client, req, baseURL, primaryManager)
networkProtocolDoc, _ := c.getJSON(ctx, client, req, baseURL, joinPath(primaryManager, "/NetworkProtocol"))
if emit != nil {
emit(Progress{Status: "running", Progress: 90, Message: "Redfish: сбор расширенного snapshot..."})
}
rawTree := c.collectRawRedfishTree(ctx, client, req, baseURL, emit)
result := &models.AnalysisResult{
Events: make([]models.Event, 0),
FRU: make([]models.FRUInfo, 0),
Sensors: make([]models.SensorReading, 0),
RawPayloads: map[string]any{
"redfish_tree": rawTree,
},
Hardware: &models.HardwareConfig{
BoardInfo: parseBoardInfo(systemDoc),
CPUs: parseCPUs(processors),
Memory: parseMemory(memory),
Storage: storageDevices,
PCIeDevices: pcieDevices,
GPUs: gpus,
PowerSupply: psus,
NetworkAdapters: nics,
Firmware: parseFirmware(systemDoc, biosDoc, managerDoc, secureBootDoc, networkProtocolDoc),
},
@@ -122,13 +143,23 @@ func (c *RedfishConnector) baseURL(req Request) (string, error) {
return fmt.Sprintf("%s://%s:%d", scheme, host, req.Port), nil
}
func (c *RedfishConnector) collectStorage(ctx context.Context, client *http.Client, req Request, baseURL string, storageMembers []map[string]interface{}) []models.Storage {
func (c *RedfishConnector) collectStorage(ctx context.Context, client *http.Client, req Request, baseURL, systemPath string) []models.Storage {
var out []models.Storage
storageMembers, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/Storage"))
for _, member := range storageMembers {
drives, ok := member["Drives"].([]interface{})
if !ok {
// "Drives" can be embedded refs or a link to a collection.
if driveCollection, ok := member["Drives"].(map[string]interface{}); ok {
if driveCollectionPath := asString(driveCollection["@odata.id"]); driveCollectionPath != "" {
driveDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, driveCollectionPath)
if err == nil {
for _, driveDoc := range driveDocs {
out = append(out, parseDrive(driveDoc))
}
}
continue
}
}
if drives, ok := member["Drives"].([]interface{}); ok {
for _, driveAny := range drives {
driveRef, ok := driveAny.(map[string]interface{})
if !ok {
@@ -144,21 +175,388 @@ func (c *RedfishConnector) collectStorage(ctx context.Context, client *http.Clie
}
out = append(out, parseDrive(driveDoc))
}
continue
}
// Some implementations return drive fields right in storage member object.
if looksLikeDrive(member) {
out = append(out, parseDrive(member))
}
}
// Fallback for platforms that expose disks in SimpleStorage.
simpleStorageMembers, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/SimpleStorage"))
for _, member := range simpleStorageMembers {
devices, ok := member["Devices"].([]interface{})
if !ok {
continue
}
for _, devAny := range devices {
devDoc, ok := devAny.(map[string]interface{})
if !ok || !looksLikeDrive(devDoc) {
continue
}
out = append(out, parseDrive(devDoc))
}
}
// Fallback for platforms exposing physical drives under Chassis.
chassisPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Chassis", "/redfish/v1/Chassis/1")
for _, chassisPath := range chassisPaths {
driveDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(chassisPath, "/Drives"))
if err != nil {
continue
}
for _, driveDoc := range driveDocs {
if !looksLikeDrive(driveDoc) {
continue
}
out = append(out, parseDrive(driveDoc))
}
}
out = dedupeStorage(out)
return out
}
func (c *RedfishConnector) collectNICs(ctx context.Context, client *http.Client, req Request, baseURL string, chassisPaths []string) []models.NetworkAdapter {
var nics []models.NetworkAdapter
seen := make(map[string]struct{})
for _, chassisPath := range chassisPaths {
adapterDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(chassisPath, "/NetworkAdapters"))
if err != nil {
continue
}
for _, doc := range adapterDocs {
nic := parseNIC(doc)
key := firstNonEmpty(nic.SerialNumber, nic.Slot+"|"+nic.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
nics = append(nics, nic)
}
}
return nics
}
func (c *RedfishConnector) collectPSUs(ctx context.Context, client *http.Client, req Request, baseURL string, chassisPaths []string) []models.PSU {
var out []models.PSU
seen := make(map[string]struct{})
idx := 1
for _, chassisPath := range chassisPaths {
// Most implementations expose PSU info in Chassis/<id>/Power as an embedded array.
if powerDoc, err := c.getJSON(ctx, client, req, baseURL, joinPath(chassisPath, "/Power")); err == nil {
if members, ok := powerDoc["PowerSupplies"].([]interface{}); ok && len(members) > 0 {
for _, item := range members {
doc, ok := item.(map[string]interface{})
if !ok {
continue
}
psu := parsePSU(doc, idx)
idx++
key := firstNonEmpty(psu.SerialNumber, psu.Slot+"|"+psu.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, psu)
}
}
}
// Redfish 2022+ may expose PSU collection via PowerSubsystem.
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(chassisPath, "/PowerSubsystem/PowerSupplies"))
if err != nil || len(memberDocs) == 0 {
continue
}
for _, doc := range memberDocs {
psu := parsePSU(doc, idx)
idx++
key := firstNonEmpty(psu.SerialNumber, psu.Slot+"|"+psu.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, psu)
}
}
return out
}
func (c *RedfishConnector) collectNICs(ctx context.Context, client *http.Client, req Request, baseURL string) []models.NetworkAdapter {
adapterDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, "/redfish/v1/Chassis/1/NetworkAdapters")
if err != nil {
return nil
func (c *RedfishConnector) collectGPUs(ctx context.Context, client *http.Client, req Request, baseURL string, systemPaths, chassisPaths []string) []models.GPU {
collections := make([]string, 0, len(systemPaths)*2+len(chassisPaths))
for _, systemPath := range systemPaths {
collections = append(collections, joinPath(systemPath, "/PCIeDevices"))
collections = append(collections, joinPath(systemPath, "/Accelerators"))
}
for _, chassisPath := range chassisPaths {
collections = append(collections, joinPath(chassisPath, "/PCIeDevices"))
}
nics := make([]models.NetworkAdapter, 0, len(adapterDocs))
for _, doc := range adapterDocs {
nics = append(nics, parseNIC(doc))
var out []models.GPU
seen := make(map[string]struct{})
idx := 1
for _, collectionPath := range collections {
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, collectionPath)
if err != nil || len(memberDocs) == 0 {
continue
}
return nics
for _, doc := range memberDocs {
functionDocs := c.getLinkedPCIeFunctions(ctx, client, req, baseURL, doc)
if !looksLikeGPU(doc, functionDocs) {
continue
}
gpu := parseGPU(doc, functionDocs, idx)
idx++
key := firstNonEmpty(gpu.SerialNumber, gpu.BDF, gpu.Slot+"|"+gpu.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, gpu)
}
}
return out
}
func (c *RedfishConnector) collectPCIeDevices(ctx context.Context, client *http.Client, req Request, baseURL string, systemPaths, chassisPaths []string) []models.PCIeDevice {
collections := make([]string, 0, len(systemPaths)+len(chassisPaths))
for _, systemPath := range systemPaths {
collections = append(collections, joinPath(systemPath, "/PCIeDevices"))
}
for _, chassisPath := range chassisPaths {
collections = append(collections, joinPath(chassisPath, "/PCIeDevices"))
}
var out []models.PCIeDevice
seen := make(map[string]struct{})
for _, collectionPath := range collections {
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, collectionPath)
if err != nil || len(memberDocs) == 0 {
continue
}
for _, doc := range memberDocs {
functionDocs := c.getLinkedPCIeFunctions(ctx, client, req, baseURL, doc)
dev := parsePCIeDevice(doc, functionDocs)
key := firstNonEmpty(dev.SerialNumber, dev.BDF, dev.Slot+"|"+dev.DeviceClass)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, dev)
}
}
// Fallback: some BMCs expose only PCIeFunctions collection without PCIeDevices.
for _, systemPath := range systemPaths {
functionDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/PCIeFunctions"))
if err != nil || len(functionDocs) == 0 {
continue
}
for idx, fn := range functionDocs {
dev := parsePCIeFunction(fn, idx+1)
key := firstNonEmpty(dev.BDF, dev.SerialNumber, dev.Slot+"|"+dev.DeviceClass)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, dev)
}
}
return out
}
func (c *RedfishConnector) discoverMemberPaths(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath, fallbackPath string) []string {
collection, err := c.getJSON(ctx, client, req, baseURL, collectionPath)
if err == nil {
if refs, ok := collection["Members"].([]interface{}); ok && len(refs) > 0 {
paths := make([]string, 0, len(refs))
for _, refAny := range refs {
ref, ok := refAny.(map[string]interface{})
if !ok {
continue
}
memberPath := asString(ref["@odata.id"])
if memberPath != "" {
paths = append(paths, memberPath)
}
}
if len(paths) > 0 {
return paths
}
}
}
if fallbackPath != "" {
return []string{fallbackPath}
}
return nil
}
func (c *RedfishConnector) collectRawRedfishTree(ctx context.Context, client *http.Client, req Request, baseURL string, emit ProgressFn) map[string]interface{} {
const maxDocuments = 1200
const workers = 6
out := make(map[string]interface{}, maxDocuments)
seen := make(map[string]struct{}, maxDocuments)
rootCounts := make(map[string]int)
var mu sync.Mutex
var processed int32
jobs := make(chan string, 256)
var wg sync.WaitGroup
enqueue := func(path string) {
path = normalizeRedfishPath(path)
if !shouldCrawlPath(path) {
return
}
mu.Lock()
if len(seen) >= maxDocuments {
mu.Unlock()
return
}
if _, ok := seen[path]; ok {
mu.Unlock()
return
}
seen[path] = struct{}{}
wg.Add(1)
mu.Unlock()
jobs <- path
}
enqueue("/redfish/v1")
for i := 0; i < workers; i++ {
go func() {
for current := range jobs {
doc, err := c.getJSON(ctx, client, req, baseURL, current)
if err == nil {
mu.Lock()
out[current] = doc
rootCounts[redfishTopRoot(current)]++
mu.Unlock()
for _, ref := range extractODataIDs(doc) {
enqueue(ref)
}
}
n := atomic.AddInt32(&processed, 1)
if emit != nil && n%40 == 0 {
mu.Lock()
countsCopy := make(map[string]int, len(rootCounts))
for k, v := range rootCounts {
countsCopy[k] = v
}
mu.Unlock()
roots := topRoots(countsCopy, 2)
emit(Progress{
Status: "running",
Progress: 92 + int(minInt32(n/200, 6)),
Message: fmt.Sprintf("Redfish snapshot: документов=%d, корни=%s", n, strings.Join(roots, ", ")),
})
}
wg.Done()
}
}()
}
wg.Wait()
close(jobs)
if emit != nil {
emit(Progress{
Status: "running",
Progress: 98,
Message: fmt.Sprintf("Redfish snapshot: собрано %d документов", len(out)),
})
}
return out
}
func shouldCrawlPath(path string) bool {
if path == "" {
return false
}
heavyParts := []string{
"/LogServices/",
"/Entries/",
"/TelemetryService/",
"/MetricReports/",
"/SessionService/Sessions",
"/TaskService/Tasks",
}
for _, part := range heavyParts {
if strings.Contains(path, part) {
return false
}
}
return true
}
func (c *RedfishConnector) getLinkedPCIeFunctions(ctx context.Context, client *http.Client, req Request, baseURL string, doc map[string]interface{}) []map[string]interface{} {
// Newer Redfish payloads often keep function references in Links.PCIeFunctions.
if links, ok := doc["Links"].(map[string]interface{}); ok {
if refs, ok := links["PCIeFunctions"].([]interface{}); ok && len(refs) > 0 {
out := make([]map[string]interface{}, 0, len(refs))
for _, refAny := range refs {
ref, ok := refAny.(map[string]interface{})
if !ok {
continue
}
memberPath := asString(ref["@odata.id"])
if memberPath == "" {
continue
}
memberDoc, err := c.getJSON(ctx, client, req, baseURL, memberPath)
if err != nil {
continue
}
out = append(out, memberDoc)
}
return out
}
}
// Some implementations expose a collection object in PCIeFunctions.@odata.id.
if pcieFunctions, ok := doc["PCIeFunctions"].(map[string]interface{}); ok {
if collectionPath := asString(pcieFunctions["@odata.id"]); collectionPath != "" {
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, collectionPath)
if err == nil {
return memberDocs
}
}
}
return nil
}
func (c *RedfishConnector) getCollectionMembers(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath string) ([]map[string]interface{}, error) {
@@ -300,14 +698,22 @@ func parseMemory(docs []map[string]interface{}) []models.MemoryDIMM {
}
func parseDrive(doc map[string]interface{}) models.Storage {
sizeGB := asInt(doc["CapacityBytes"]) / (1024 * 1024 * 1024)
sizeGB := 0
if capBytes := asInt64(doc["CapacityBytes"]); capBytes > 0 {
sizeGB = int(capBytes / (1024 * 1024 * 1024))
}
if sizeGB == 0 {
sizeGB = asInt(doc["CapacityGB"])
}
if sizeGB == 0 {
sizeGB = asInt(doc["CapacityMiB"]) / 1024
}
storageType := classifyStorageType(doc)
return models.Storage{
Slot: firstNonEmpty(asString(doc["Id"]), asString(doc["Name"])),
Type: firstNonEmpty(asString(doc["MediaType"]), asString(doc["Protocol"])),
Type: storageType,
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
SizeGB: sizeGB,
SerialNumber: asString(doc["SerialNumber"]),
@@ -331,6 +737,257 @@ func parseNIC(doc map[string]interface{}) models.NetworkAdapter {
}
}
func parsePSU(doc map[string]interface{}, idx int) models.PSU {
status := mapStatus(doc["Status"])
present := true
if statusMap, ok := doc["Status"].(map[string]interface{}); ok {
state := asString(statusMap["State"])
if strings.EqualFold(state, "Absent") || strings.EqualFold(state, "Disabled") {
present = false
}
}
slot := firstNonEmpty(
asString(doc["MemberId"]),
asString(doc["Id"]),
asString(doc["Name"]),
)
if slot == "" {
slot = fmt.Sprintf("PSU%d", idx)
}
return models.PSU{
Slot: slot,
Present: present,
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
Vendor: asString(doc["Manufacturer"]),
WattageW: asInt(doc["PowerCapacityWatts"]),
SerialNumber: asString(doc["SerialNumber"]),
PartNumber: asString(doc["PartNumber"]),
Firmware: asString(doc["FirmwareVersion"]),
Status: status,
InputType: asString(doc["LineInputVoltageType"]),
InputPowerW: asInt(doc["PowerInputWatts"]),
OutputPowerW: asInt(doc["LastPowerOutputWatts"]),
InputVoltage: asFloat(doc["LineInputVoltage"]),
}
}
func parseGPU(doc map[string]interface{}, functionDocs []map[string]interface{}, idx int) models.GPU {
slot := firstNonEmpty(asString(doc["Slot"]), asString(doc["Name"]), asString(doc["Id"]))
if slot == "" {
slot = fmt.Sprintf("GPU%d", idx)
}
gpu := models.GPU{
Slot: slot,
Location: firstNonEmpty(asString(doc["Location"]), asString(doc["PhysicalLocation"])),
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
Manufacturer: asString(doc["Manufacturer"]),
SerialNumber: asString(doc["SerialNumber"]),
PartNumber: asString(doc["PartNumber"]),
Firmware: asString(doc["FirmwareVersion"]),
Status: mapStatus(doc["Status"]),
}
if bdf := asString(doc["BDF"]); bdf != "" {
gpu.BDF = bdf
}
if gpu.VendorID == 0 {
gpu.VendorID = asHexOrInt(doc["VendorId"])
}
if gpu.DeviceID == 0 {
gpu.DeviceID = asHexOrInt(doc["DeviceId"])
}
for _, fn := range functionDocs {
if gpu.BDF == "" {
gpu.BDF = asString(fn["FunctionId"])
}
if gpu.VendorID == 0 {
gpu.VendorID = asHexOrInt(fn["VendorId"])
}
if gpu.DeviceID == 0 {
gpu.DeviceID = asHexOrInt(fn["DeviceId"])
}
if gpu.MaxLinkWidth == 0 {
gpu.MaxLinkWidth = asInt(fn["MaxLinkWidth"])
}
if gpu.CurrentLinkWidth == 0 {
gpu.CurrentLinkWidth = asInt(fn["CurrentLinkWidth"])
}
if gpu.MaxLinkSpeed == "" {
gpu.MaxLinkSpeed = firstNonEmpty(asString(fn["MaxLinkSpeedGTs"]), asString(fn["MaxLinkSpeed"]))
}
if gpu.CurrentLinkSpeed == "" {
gpu.CurrentLinkSpeed = firstNonEmpty(asString(fn["CurrentLinkSpeedGTs"]), asString(fn["CurrentLinkSpeed"]))
}
}
return gpu
}
func parsePCIeDevice(doc map[string]interface{}, functionDocs []map[string]interface{}) models.PCIeDevice {
dev := models.PCIeDevice{
Slot: firstNonEmpty(asString(doc["Slot"]), asString(doc["Name"]), asString(doc["Id"])),
BDF: asString(doc["BDF"]),
DeviceClass: firstNonEmpty(asString(doc["DeviceType"]), asString(doc["PCIeType"])),
Manufacturer: asString(doc["Manufacturer"]),
PartNumber: asString(doc["PartNumber"]),
SerialNumber: asString(doc["SerialNumber"]),
VendorID: asHexOrInt(doc["VendorId"]),
DeviceID: asHexOrInt(doc["DeviceId"]),
}
for _, fn := range functionDocs {
if dev.BDF == "" {
dev.BDF = asString(fn["FunctionId"])
}
if dev.DeviceClass == "" {
dev.DeviceClass = firstNonEmpty(asString(fn["DeviceClass"]), asString(fn["ClassCode"]))
}
if dev.VendorID == 0 {
dev.VendorID = asHexOrInt(fn["VendorId"])
}
if dev.DeviceID == 0 {
dev.DeviceID = asHexOrInt(fn["DeviceId"])
}
if dev.LinkWidth == 0 {
dev.LinkWidth = asInt(fn["CurrentLinkWidth"])
}
if dev.MaxLinkWidth == 0 {
dev.MaxLinkWidth = asInt(fn["MaxLinkWidth"])
}
if dev.LinkSpeed == "" {
dev.LinkSpeed = firstNonEmpty(asString(fn["CurrentLinkSpeedGTs"]), asString(fn["CurrentLinkSpeed"]))
}
if dev.MaxLinkSpeed == "" {
dev.MaxLinkSpeed = firstNonEmpty(asString(fn["MaxLinkSpeedGTs"]), asString(fn["MaxLinkSpeed"]))
}
}
if dev.DeviceClass == "" {
dev.DeviceClass = "PCIe device"
}
return dev
}
func parsePCIeFunction(doc map[string]interface{}, idx int) models.PCIeDevice {
slot := firstNonEmpty(asString(doc["Id"]), asString(doc["Name"]))
if slot == "" {
slot = fmt.Sprintf("PCIeFn%d", idx)
}
return models.PCIeDevice{
Slot: slot,
BDF: asString(doc["FunctionId"]),
VendorID: asHexOrInt(doc["VendorId"]),
DeviceID: asHexOrInt(doc["DeviceId"]),
DeviceClass: firstNonEmpty(asString(doc["DeviceClass"]), asString(doc["ClassCode"]), "PCIe device"),
Manufacturer: asString(doc["Manufacturer"]),
SerialNumber: asString(doc["SerialNumber"]),
LinkWidth: asInt(doc["CurrentLinkWidth"]),
LinkSpeed: firstNonEmpty(asString(doc["CurrentLinkSpeedGTs"]), asString(doc["CurrentLinkSpeed"])),
MaxLinkWidth: asInt(doc["MaxLinkWidth"]),
MaxLinkSpeed: firstNonEmpty(asString(doc["MaxLinkSpeedGTs"]), asString(doc["MaxLinkSpeed"])),
}
}
func looksLikeGPU(doc map[string]interface{}, functionDocs []map[string]interface{}) bool {
deviceType := strings.ToLower(asString(doc["DeviceType"]))
if strings.Contains(deviceType, "gpu") || strings.Contains(deviceType, "graphics") || strings.Contains(deviceType, "accelerator") {
return true
}
modelText := strings.ToLower(strings.Join([]string{
asString(doc["Name"]),
asString(doc["Model"]),
asString(doc["Manufacturer"]),
}, " "))
gpuHints := []string{"gpu", "nvidia", "tesla", "a100", "h100", "l40", "rtx", "radeon", "instinct"}
for _, hint := range gpuHints {
if strings.Contains(modelText, hint) {
return true
}
}
for _, fn := range functionDocs {
classCode := strings.ToLower(strings.TrimPrefix(asString(fn["ClassCode"]), "0x"))
if strings.HasPrefix(classCode, "03") || strings.HasPrefix(classCode, "12") {
return true
}
}
return false
}
func looksLikeDrive(doc map[string]interface{}) bool {
if asString(doc["MediaType"]) != "" {
return true
}
if asString(doc["Protocol"]) != "" && (asInt(doc["CapacityGB"]) > 0 || asInt(doc["CapacityBytes"]) > 0) {
return true
}
if asString(doc["Type"]) != "" && (asString(doc["Model"]) != "" || asInt(doc["CapacityGB"]) > 0 || asInt(doc["CapacityBytes"]) > 0) {
return true
}
return false
}
func classifyStorageType(doc map[string]interface{}) string {
protocol := strings.ToUpper(asString(doc["Protocol"]))
if strings.Contains(protocol, "NVME") {
return "NVMe"
}
media := strings.ToUpper(asString(doc["MediaType"]))
if media == "SSD" {
return "SSD"
}
if media == "HDD" || media == "HDDT" {
return "HDD"
}
nameModel := strings.ToUpper(strings.Join([]string{
asString(doc["Name"]),
asString(doc["Model"]),
asString(doc["Description"]),
}, " "))
if strings.Contains(nameModel, "NVME") {
return "NVMe"
}
if strings.Contains(nameModel, "SSD") {
return "SSD"
}
if strings.Contains(nameModel, "HDD") {
return "HDD"
}
if protocol != "" {
return protocol
}
return firstNonEmpty(asString(doc["Type"]), "Storage")
}
func dedupeStorage(items []models.Storage) []models.Storage {
if len(items) <= 1 {
return items
}
out := make([]models.Storage, 0, len(items))
seen := make(map[string]struct{}, len(items))
for _, item := range items {
key := firstNonEmpty(item.SerialNumber, item.Slot+"|"+item.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, item)
}
return out
}
func parseFirmware(system, bios, manager, secureBoot, networkProtocol map[string]interface{}) []models.FirmwareInfo {
var out []models.FirmwareInfo
@@ -404,6 +1061,92 @@ func asInt(v interface{}) int {
return 0
}
func asInt64(v interface{}) int64 {
switch value := v.(type) {
case nil:
return 0
case int:
return int64(value)
case int64:
return value
case float64:
return int64(value)
case json.Number:
if i, err := value.Int64(); err == nil {
return i
}
if f, err := value.Float64(); err == nil {
return int64(f)
}
case string:
if value == "" {
return 0
}
if i, err := strconv.ParseInt(value, 10, 64); err == nil {
return i
}
}
return 0
}
func asFloat(v interface{}) float64 {
switch value := v.(type) {
case nil:
return 0
case float64:
return value
case int:
return float64(value)
case int64:
return float64(value)
case json.Number:
if f, err := value.Float64(); err == nil {
return f
}
case string:
if value == "" {
return 0
}
if f, err := strconv.ParseFloat(value, 64); err == nil {
return f
}
}
return 0
}
func asHexOrInt(v interface{}) int {
switch value := v.(type) {
case nil:
return 0
case int:
return value
case int64:
return int(value)
case float64:
return int(value)
case json.Number:
if i, err := value.Int64(); err == nil {
return int(i)
}
if f, err := value.Float64(); err == nil {
return int(f)
}
case string:
s := strings.TrimSpace(value)
s = strings.TrimPrefix(strings.ToLower(s), "0x")
if s == "" {
return 0
}
if i, err := strconv.ParseInt(s, 16, 64); err == nil {
return int(i)
}
if i, err := strconv.Atoi(s); err == nil {
return i
}
}
return 0
}
func firstNonEmpty(values ...string) string {
for _, v := range values {
if strings.TrimSpace(v) != "" {
@@ -412,3 +1155,110 @@ func firstNonEmpty(values ...string) string {
}
return ""
}
func joinPath(base, suffix string) string {
base = strings.TrimRight(base, "/")
if suffix == "" {
return base
}
if strings.HasPrefix(suffix, "/") {
return base + suffix
}
return base + "/" + suffix
}
func firstPathOrDefault(paths []string, fallback string) string {
if len(paths) == 0 {
return fallback
}
return paths[0]
}
func normalizeRedfishPath(raw string) string {
raw = strings.TrimSpace(raw)
if raw == "" {
return ""
}
if strings.HasPrefix(raw, "http://") || strings.HasPrefix(raw, "https://") {
u, err := url.Parse(raw)
if err != nil {
return ""
}
raw = u.Path
}
if !strings.HasPrefix(raw, "/") {
raw = "/" + raw
}
if !strings.HasPrefix(raw, "/redfish/") {
return ""
}
return raw
}
func extractODataIDs(v interface{}) []string {
var refs []string
var walk func(any)
walk = func(node any) {
switch typed := node.(type) {
case map[string]interface{}:
for k, child := range typed {
if k == "@odata.id" {
if ref := asString(child); ref != "" {
refs = append(refs, ref)
}
continue
}
walk(child)
}
case []interface{}:
for _, child := range typed {
walk(child)
}
}
}
walk(v)
return refs
}
func redfishTopRoot(path string) string {
path = strings.TrimPrefix(path, "/")
parts := strings.Split(path, "/")
if len(parts) < 3 {
return "root"
}
return parts[2]
}
func topRoots(counts map[string]int, limit int) []string {
if len(counts) == 0 {
return []string{"n/a"}
}
type rootCount struct {
root string
count int
}
items := make([]rootCount, 0, len(counts))
for root, count := range counts {
items = append(items, rootCount{root: root, count: count})
}
sort.Slice(items, func(i, j int) bool {
return items[i].count > items[j].count
})
if len(items) > limit {
items = items[:limit]
}
out := make([]string, 0, len(items))
for _, item := range items {
out = append(out, fmt.Sprintf("%s(%d)", item.root, item.count))
}
return out
}
func minInt32(a, b int32) int32 {
if a < b {
return a
}
return b
}

View File

@@ -70,11 +70,59 @@ func TestRedfishConnectorCollect(t *testing.T) {
"CapacityGB": 960,
"SerialNumber": "SN123",
})
register("/redfish/v1/Systems/1/PCIeDevices", map[string]interface{}{
"Members": []map[string]string{
{"@odata.id": "/redfish/v1/Systems/1/PCIeDevices/GPU1"},
},
})
register("/redfish/v1/Systems/1/PCIeDevices/GPU1", map[string]interface{}{
"Id": "GPU1",
"Name": "NVIDIA H100",
"Model": "NVIDIA H100 PCIe",
"Manufacturer": "NVIDIA",
"SerialNumber": "GPU-SN-001",
"PCIeFunctions": map[string]interface{}{
"@odata.id": "/redfish/v1/Systems/1/PCIeDevices/GPU1/PCIeFunctions",
},
})
register("/redfish/v1/Systems/1/PCIeDevices/GPU1/PCIeFunctions", map[string]interface{}{
"Members": []map[string]string{
{"@odata.id": "/redfish/v1/Systems/1/PCIeFunctions/GPU1F0"},
},
})
register("/redfish/v1/Systems/1/PCIeFunctions/GPU1F0", map[string]interface{}{
"FunctionId": "0000:65:00.0",
"VendorId": "0x10DE",
"DeviceId": "0x2331",
"ClassCode": "0x030200",
"CurrentLinkWidth": 16,
"CurrentLinkSpeed": "16.0 GT/s",
"MaxLinkWidth": 16,
"MaxLinkSpeed": "16.0 GT/s",
})
register("/redfish/v1/Chassis/1/NetworkAdapters", map[string]interface{}{
"Members": []map[string]string{
{"@odata.id": "/redfish/v1/Chassis/1/NetworkAdapters/1"},
},
})
register("/redfish/v1/Chassis/1/Power", map[string]interface{}{
"PowerSupplies": []map[string]interface{}{
{
"MemberId": "PSU1",
"Name": "PSU Slot 1",
"Model": "PWS-2K01A-1R",
"Manufacturer": "Delta",
"PowerCapacityWatts": 2000,
"PowerInputWatts": 1600,
"LastPowerOutputWatts": 1200,
"LineInputVoltage": 230,
"Status": map[string]interface{}{
"Health": "OK",
"State": "Enabled",
},
},
},
})
register("/redfish/v1/Chassis/1/NetworkAdapters/1", map[string]interface{}{
"Name": "Mellanox",
"Model": "ConnectX-6",
@@ -122,7 +170,33 @@ func TestRedfishConnectorCollect(t *testing.T) {
if len(result.Hardware.NetworkAdapters) != 1 {
t.Fatalf("expected one nic, got %d", len(result.Hardware.NetworkAdapters))
}
if len(result.Hardware.GPUs) != 1 {
t.Fatalf("expected one gpu, got %d", len(result.Hardware.GPUs))
}
if result.Hardware.GPUs[0].BDF != "0000:65:00.0" {
t.Fatalf("unexpected gpu BDF: %q", result.Hardware.GPUs[0].BDF)
}
if len(result.Hardware.PCIeDevices) != 1 {
t.Fatalf("expected one pcie device, got %d", len(result.Hardware.PCIeDevices))
}
if len(result.Hardware.PowerSupply) != 1 {
t.Fatalf("expected one psu, got %d", len(result.Hardware.PowerSupply))
}
if result.Hardware.PowerSupply[0].WattageW != 2000 {
t.Fatalf("unexpected psu wattage: %d", result.Hardware.PowerSupply[0].WattageW)
}
if len(result.Hardware.Firmware) == 0 {
t.Fatalf("expected firmware entries")
}
if result.RawPayloads == nil {
t.Fatalf("expected raw payloads")
}
treeAny, ok := result.RawPayloads["redfish_tree"]
if !ok {
t.Fatalf("expected redfish_tree in raw payloads")
}
tree, ok := treeAny.(map[string]interface{})
if !ok || len(tree) == 0 {
t.Fatalf("expected non-empty redfish_tree, got %#v", treeAny)
}
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"text/tabwriter"
"git.mchus.pro/mchus/logpile/internal/models"
)
@@ -125,13 +126,16 @@ func (e *Exporter) ExportTXT(w io.Writer) error {
return nil
}
fmt.Fprintf(w, "File: %s\n", e.result.Filename)
fmt.Fprintf(w, "File:\t%s\n", e.result.Filename)
fmt.Fprintf(w, "Source:\t%s\n", e.result.SourceType)
fmt.Fprintf(w, "Protocol:\t%s\n", e.result.Protocol)
fmt.Fprintf(w, "Target:\t%s\n", e.result.TargetHost)
fmt.Fprintln(w)
// Server model and serial number
if e.result.Hardware != nil && e.result.Hardware.BoardInfo.ProductName != "" {
fmt.Fprintln(w)
fmt.Fprintf(w, "Server Model: %s\n", e.result.Hardware.BoardInfo.ProductName)
fmt.Fprintf(w, "Serial Number: %s\n", e.result.Hardware.BoardInfo.SerialNumber)
fmt.Fprintf(w, "Server Model:\t%s\n", e.result.Hardware.BoardInfo.ProductName)
fmt.Fprintf(w, "Serial Number:\t%s\n", e.result.Hardware.BoardInfo.SerialNumber)
}
fmt.Fprintln(w)
@@ -139,118 +143,172 @@ func (e *Exporter) ExportTXT(w io.Writer) error {
if e.result.Hardware != nil {
hw := e.result.Hardware
// Firmware
// Firmware tab
if len(hw.Firmware) > 0 {
fmt.Fprintln(w, "FIRMWARE VERSIONS")
fmt.Fprintln(w, "-----------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Component\tVersion\tBuild Time")
for _, fw := range hw.Firmware {
fmt.Fprintf(w, " %s: %s\n", fw.DeviceName, fw.Version)
fmt.Fprintf(tw, "%s\t%s\t%s\n", fw.DeviceName, fw.Version, fw.BuildTime)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// CPUs
// CPU tab
if len(hw.CPUs) > 0 {
fmt.Fprintln(w, "PROCESSORS")
fmt.Fprintln(w, "----------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Socket\tModel\tCores\tThreads\tFreq MHz\tTurbo MHz\tTDP W\tPPIN/SN")
for _, cpu := range hw.CPUs {
fmt.Fprintf(w, " Socket %d: %s\n", cpu.Socket, cpu.Model)
fmt.Fprintf(w, " Cores: %d, Threads: %d, Freq: %d MHz (Turbo: %d MHz)\n",
cpu.Cores, cpu.Threads, cpu.FrequencyMHz, cpu.MaxFreqMHz)
fmt.Fprintf(w, " TDP: %dW, L3 Cache: %d KB\n", cpu.TDP, cpu.L3CacheKB)
id := cpu.SerialNumber
if id == "" {
id = cpu.PPIN
}
fmt.Fprintf(tw, "CPU%d\t%s\t%d\t%d\t%d\t%d\t%d\t%s\n",
cpu.Socket, cpu.Model, cpu.Cores, cpu.Threads, cpu.FrequencyMHz, cpu.MaxFreqMHz, cpu.TDP, id)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Memory
// Memory tab
if len(hw.Memory) > 0 {
fmt.Fprintln(w, "MEMORY")
fmt.Fprintln(w, "------")
totalMB := 0
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tPresent\tSize MB\tType\tSpeed MHz\tVendor\tModel/PN\tSerial\tStatus")
for _, mem := range hw.Memory {
totalMB += mem.SizeMB
location := mem.Location
if location == "" {
location = mem.Slot
}
fmt.Fprintf(w, " Total: %d GB (%d DIMMs)\n", totalMB/1024, len(hw.Memory))
fmt.Fprintf(w, " Type: %s @ %d MHz\n", hw.Memory[0].Type, hw.Memory[0].CurrentSpeedMHz)
fmt.Fprintf(w, " Manufacturer: %s\n", hw.Memory[0].Manufacturer)
fmt.Fprintf(tw, "%s\t%t\t%d\t%s\t%d\t%s\t%s\t%s\t%s\n",
location, mem.Present, mem.SizeMB, mem.Type, mem.CurrentSpeedMHz, mem.Manufacturer, mem.PartNumber, mem.SerialNumber, mem.Status)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Storage
// Power tab
if len(hw.PowerSupply) > 0 {
fmt.Fprintln(w, "POWER SUPPLIES")
fmt.Fprintln(w, "--------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tPresent\tVendor\tModel\tWattage W\tInput W\tOutput W\tInput V\tTemp C\tStatus\tSerial")
for _, psu := range hw.PowerSupply {
fmt.Fprintf(tw, "%s\t%t\t%s\t%s\t%d\t%d\t%d\t%.0f\t%d\t%s\t%s\n",
psu.Slot, psu.Present, psu.Vendor, psu.Model, psu.WattageW, psu.InputPowerW, psu.OutputPowerW, psu.InputVoltage, psu.TemperatureC, psu.Status, psu.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Storage tab
if len(hw.Storage) > 0 {
fmt.Fprintln(w, "STORAGE")
fmt.Fprintln(w, "-------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tPresent\tType\tInterface\tModel\tSize GB\tVendor\tFirmware\tSerial")
for _, stor := range hw.Storage {
fmt.Fprintf(w, " %s: %s (%d GB) - S/N: %s\n",
stor.Slot, stor.Model, stor.SizeGB, stor.SerialNumber)
fmt.Fprintf(tw, "%s\t%t\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n",
stor.Slot, stor.Present, stor.Type, stor.Interface, stor.Model, stor.SizeGB, stor.Manufacturer, stor.Firmware, stor.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// PCIe
// GPU tab
if len(hw.GPUs) > 0 {
fmt.Fprintln(w, "GPUS")
fmt.Fprintln(w, "----")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tModel\tVendor\tBDF\tPCIe\tSerial\tStatus")
for _, gpu := range hw.GPUs {
link := fmt.Sprintf("x%d %s", gpu.CurrentLinkWidth, gpu.CurrentLinkSpeed)
if gpu.MaxLinkWidth > 0 || gpu.MaxLinkSpeed != "" {
link = fmt.Sprintf("%s / x%d %s", link, gpu.MaxLinkWidth, gpu.MaxLinkSpeed)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
gpu.Slot, gpu.Model, gpu.Manufacturer, gpu.BDF, link, gpu.SerialNumber, gpu.Status)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Network tab
if len(hw.NetworkAdapters) > 0 {
fmt.Fprintln(w, "NETWORK ADAPTERS")
fmt.Fprintln(w, "----------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tLocation\tModel\tVendor\tPorts\tType\tStatus\tSerial")
for _, nic := range hw.NetworkAdapters {
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%d\t%s\t%s\t%s\n",
nic.Slot, nic.Location, nic.Model, nic.Vendor, nic.PortCount, nic.PortType, nic.Status, nic.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Device inventory tab
if len(hw.PCIeDevices) > 0 {
fmt.Fprintln(w, "PCIE DEVICES")
fmt.Fprintln(w, "------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Slot\tBDF\tClass\tVendor\tVID:DID\tLink\tSerial")
for _, pcie := range hw.PCIeDevices {
fmt.Fprintf(w, " %s: %s (x%d %s)\n",
pcie.Slot, pcie.DeviceClass, pcie.LinkWidth, pcie.LinkSpeed)
if pcie.SerialNumber != "" {
fmt.Fprintf(w, " S/N: %s\n", pcie.SerialNumber)
}
if len(pcie.MACAddresses) > 0 {
fmt.Fprintf(w, " MACs: %v\n", pcie.MACAddresses)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%04x:%04x\tx%d %s / x%d %s\t%s\n",
pcie.Slot, pcie.BDF, pcie.DeviceClass, pcie.Manufacturer, pcie.VendorID, pcie.DeviceID,
pcie.LinkWidth, pcie.LinkSpeed, pcie.MaxLinkWidth, pcie.MaxLinkSpeed, pcie.SerialNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
}
// Sensors summary
// Sensors tab
if len(e.result.Sensors) > 0 {
fmt.Fprintln(w, "SENSOR READINGS")
fmt.Fprintln(w, "---------------")
// Group by type
byType := make(map[string][]models.SensorReading)
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Type\tName\tValue\tUnit\tRaw\tStatus")
for _, s := range e.result.Sensors {
byType[s.Type] = append(byType[s.Type], s)
}
for stype, sensors := range byType {
fmt.Fprintf(w, "\n %s:\n", stype)
for _, s := range sensors {
if s.Value != 0 {
fmt.Fprintf(w, " %s: %.0f %s [%s]\n", s.Name, s.Value, s.Unit, s.Status)
} else if s.RawValue != "" {
fmt.Fprintf(w, " %s: %s [%s]\n", s.Name, s.RawValue, s.Status)
}
}
fmt.Fprintf(tw, "%s\t%s\t%.0f\t%s\t%s\t%s\n", s.Type, s.Name, s.Value, s.Unit, s.RawValue, s.Status)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// FRU summary
// Serials/FRU tab
if len(e.result.FRU) > 0 {
fmt.Fprintln(w, "FRU COMPONENTS")
fmt.Fprintln(w, "--------------")
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Description\tManufacturer\tProduct\tSerial\tPart Number")
for _, fru := range e.result.FRU {
name := fru.ProductName
if name == "" {
name = fru.Description
}
fmt.Fprintf(w, " %s\n", name)
if fru.SerialNumber != "" {
fmt.Fprintf(w, " Serial: %s\n", fru.SerialNumber)
}
if fru.Manufacturer != "" {
fmt.Fprintf(w, " Manufacturer: %s\n", fru.Manufacturer)
}
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", fru.Description, fru.Manufacturer, name, fru.SerialNumber, fru.PartNumber)
}
_ = tw.Flush()
fmt.Fprintln(w)
}
// Events summary
// Events tab
fmt.Fprintf(w, "EVENTS: %d total\n", len(e.result.Events))
if len(e.result.Events) > 0 {
tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
fmt.Fprintln(tw, "Time\tSeverity\tSource\tType\tName\tDescription")
for _, ev := range e.result.Events {
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\n",
ev.Timestamp.Format("2006-01-02 15:04:05"), ev.Severity, ev.Source, ev.SensorType, ev.SensorName, ev.Description)
}
_ = tw.Flush()
}
var critical, warning, info int
for _, ev := range e.result.Events {
switch ev.Severity {

View File

@@ -14,6 +14,7 @@ type AnalysisResult struct {
Protocol string `json:"protocol,omitempty"` // redfish | ipmi
TargetHost string `json:"target_host,omitempty"` // BMC host for live collect
CollectedAt time.Time `json:"collected_at,omitempty"` // Collection/upload timestamp
RawPayloads map[string]any `json:"raw_payloads,omitempty"` // Additional source payloads (e.g. Redfish tree)
Events []Event `json:"events"`
FRU []FRUInfo `json:"fru"`
Sensors []SensorReading `json:"sensors"`

View File

@@ -1,13 +1,16 @@
package server
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"html/template"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
@@ -55,23 +58,48 @@ func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
}
defer file.Close()
// Parse archive
p := parser.NewBMCParser()
if err := p.ParseFromReader(file, header.Filename); err != nil {
jsonError(w, "Failed to parse archive: "+err.Error(), http.StatusBadRequest)
payload, err := io.ReadAll(file)
if err != nil {
jsonError(w, "Failed to read file", http.StatusBadRequest)
return
}
result := p.Result()
var (
result *models.AnalysisResult
vendor string
)
if looksLikeJSONSnapshot(header.Filename, payload) {
snapshotResult, snapshotErr := parseUploadedSnapshot(payload)
if snapshotErr != nil {
jsonError(w, "Failed to parse snapshot: "+snapshotErr.Error(), http.StatusBadRequest)
return
}
result = snapshotResult
vendor = strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
} else {
// Parse archive
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), header.Filename); err != nil {
jsonError(w, "Failed to parse archive: "+err.Error(), http.StatusBadRequest)
return
}
result = p.Result()
applyArchiveSourceMetadata(result)
vendor = p.DetectedVendor()
}
s.SetResult(result)
s.SetDetectedVendor(p.DetectedVendor())
s.SetDetectedVendor(vendor)
jsonResponse(w, map[string]interface{}{
"status": "ok",
"message": "File uploaded and parsed successfully",
"filename": header.Filename,
"vendor": p.DetectedVendor(),
"vendor": vendor,
"stats": map[string]int{
"events": len(result.Events),
"sensors": len(result.Sensors),
@@ -529,7 +557,7 @@ func (s *Server) handleExportCSV(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
w.Header().Set("Content-Disposition", "attachment; filename=serials.csv")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "csv")))
exp := exporter.New(result)
exp.ExportCSV(w)
@@ -539,7 +567,7 @@ func (s *Server) handleExportJSON(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", "attachment; filename=report.json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "json")))
exp := exporter.New(result)
exp.ExportJSON(w)
@@ -549,7 +577,7 @@ func (s *Server) handleExportTXT(w http.ResponseWriter, r *http.Request) {
result := s.GetResult()
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Content-Disposition", "attachment; filename=report.txt")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "txt")))
exp := exporter.New(result)
exp.ExportTXT(w)
@@ -682,7 +710,7 @@ func (s *Server) startCollectionJob(jobID string, req CollectRequest) {
s.jobManager.UpdateJobStatus(jobID, CollectStatusSuccess, 100, "")
s.jobManager.AppendJobLog(jobID, "Сбор завершен")
s.SetResult(result)
s.SetDetectedVendor("")
s.SetDetectedVendor(req.Protocol)
}()
}
@@ -754,6 +782,9 @@ func applyCollectSourceMetadata(result *models.AnalysisResult, req CollectReques
result.Protocol = req.Protocol
result.TargetHost = req.Host
result.CollectedAt = time.Now().UTC()
if strings.TrimSpace(result.Filename) == "" {
result.Filename = fmt.Sprintf("%s://%s", req.Protocol, req.Host)
}
}
func toCollectorRequest(req CollectRequest) collector.Request {
@@ -769,6 +800,39 @@ func toCollectorRequest(req CollectRequest) collector.Request {
}
}
func looksLikeJSONSnapshot(filename string, payload []byte) bool {
ext := strings.ToLower(filepath.Ext(filename))
if ext == ".json" {
return true
}
trimmed := bytes.TrimSpace(payload)
return len(trimmed) > 0 && (trimmed[0] == '{' || trimmed[0] == '[')
}
func parseUploadedSnapshot(payload []byte) (*models.AnalysisResult, error) {
var result models.AnalysisResult
if err := json.Unmarshal(payload, &result); err != nil {
return nil, err
}
if result.Hardware == nil && len(result.Events) == 0 && len(result.Sensors) == 0 && len(result.FRU) == 0 {
return nil, fmt.Errorf("unsupported snapshot format")
}
if strings.TrimSpace(result.SourceType) == "" {
if result.Protocol != "" {
result.SourceType = models.SourceTypeAPI
} else {
result.SourceType = models.SourceTypeArchive
}
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
result.Filename = "uploaded_snapshot.json"
}
return &result, nil
}
func (s *Server) getCollector(protocol string) (collector.Connector, bool) {
if s.collectors == nil {
s.collectors = collector.NewDefaultRegistry()
@@ -808,3 +872,59 @@ func isGPUDevice(deviceClass string) bool {
}
return false
}
func exportFilename(result *models.AnalysisResult, ext string) string {
date := time.Now().UTC().Format("2006-01-02")
model := "SERVER MODEL"
sn := "SERVER SN"
if result != nil {
if !result.CollectedAt.IsZero() {
date = result.CollectedAt.UTC().Format("2006-01-02")
}
if result.Hardware != nil {
if m := strings.TrimSpace(result.Hardware.BoardInfo.ProductName); m != "" {
model = m
}
if serial := strings.TrimSpace(result.Hardware.BoardInfo.SerialNumber); serial != "" {
sn = serial
}
}
}
model = sanitizeFilenamePart(model)
sn = sanitizeFilenamePart(sn)
ext = strings.TrimPrefix(strings.TrimSpace(ext), ".")
if ext == "" {
ext = "txt"
}
return fmt.Sprintf("%s (%s) - %s.%s", date, model, sn, ext)
}
func sanitizeFilenamePart(v string) string {
v = strings.TrimSpace(v)
if v == "" {
return "-"
}
replacer := strings.NewReplacer(
"/", "_",
"\\", "_",
":", "_",
"*", "_",
"?", "_",
"\"", "_",
"<", "_",
">", "_",
"|", "_",
"\n", " ",
"\r", " ",
"\t", " ",
)
v = replacer.Replace(v)
v = strings.Join(strings.Fields(v), " ")
if v == "" {
return "-"
}
return v
}

View File

@@ -154,6 +154,89 @@ func TestCollectStatusNotFoundSmoke(t *testing.T) {
assertJSONError(t, resp, "Collect job not found")
}
func TestUploadRedfishSnapshotJSON(t *testing.T) {
_, ts := newFlowTestServer()
defer ts.Close()
snapshot := `{
"filename": "redfish://bmc01.local",
"source_type": "api",
"protocol": "redfish",
"target_host": "bmc01.local",
"hardware": {
"storage": [
{
"slot": "Drive1",
"type": "NVMe",
"model": "KIOXIA CD8",
"size_gb": 3840,
"serial_number": "SN-NVME-1",
"present": true
}
]
},
"raw_payloads": {
"redfish_tree": {
"/redfish/v1": {"Name": "ServiceRoot"}
}
}
}`
reqBody := &bytes.Buffer{}
writer := multipart.NewWriter(reqBody)
part, err := writer.CreateFormFile("archive", "snapshot.json")
if err != nil {
t.Fatalf("create form file: %v", err)
}
if _, err := part.Write([]byte(snapshot)); err != nil {
t.Fatalf("write snapshot body: %v", err)
}
if err := writer.Close(); err != nil {
t.Fatalf("close multipart writer: %v", err)
}
uploadReq, err := http.NewRequest(http.MethodPost, ts.URL+"/api/upload", reqBody)
if err != nil {
t.Fatalf("build upload request: %v", err)
}
uploadReq.Header.Set("Content-Type", writer.FormDataContentType())
uploadResp, err := http.DefaultClient.Do(uploadReq)
if err != nil {
t.Fatalf("upload request failed: %v", err)
}
defer uploadResp.Body.Close()
if uploadResp.StatusCode != http.StatusOK {
t.Fatalf("expected 200 from /api/upload, got %d", uploadResp.StatusCode)
}
var uploadPayload map[string]interface{}
if err := json.NewDecoder(uploadResp.Body).Decode(&uploadPayload); err != nil {
t.Fatalf("decode upload response: %v", err)
}
if uploadPayload["vendor"] != "redfish" {
t.Fatalf("expected vendor redfish, got %v", uploadPayload["vendor"])
}
statusResp, err := http.Get(ts.URL + "/api/status")
if err != nil {
t.Fatalf("status request failed: %v", err)
}
defer statusResp.Body.Close()
var statusPayload map[string]interface{}
if err := json.NewDecoder(statusResp.Body).Decode(&statusPayload); err != nil {
t.Fatalf("decode status response: %v", err)
}
if statusPayload["protocol"] != "redfish" {
t.Fatalf("expected protocol redfish, got %v", statusPayload["protocol"])
}
if statusPayload["filename"] != "redfish://bmc01.local" {
t.Fatalf("expected snapshot filename, got %v", statusPayload["filename"])
}
}
func buildTarArchive(t *testing.T, name, content string) []byte {
t.Helper()

View File

@@ -426,7 +426,11 @@ async function loadDataFromStatus() {
if (!payload.loaded) {
return;
}
await loadData(payload.vendor || '', payload.filename || '');
const vendor = payload.vendor || payload.protocol || '';
const filename = payload.filename || (payload.protocol && payload.target_host
? `${payload.protocol}://${payload.target_host}`
: '');
await loadData(vendor, filename);
} catch (err) {
console.error('Failed to load data after collect:', err);
}

View File

@@ -21,10 +21,10 @@
<div id="archive-source-content">
<div class="upload-area" id="drop-zone">
<p>Перетащите архив сюда или</p>
<input type="file" id="file-input" accept="application/gzip,application/x-gzip,application/x-tar,application/zip" hidden>
<p>Перетащите архив или JSON snapshot сюда</p>
<input type="file" id="file-input" accept="application/gzip,application/x-gzip,application/x-tar,application/zip,application/json,.json,.tar,.tar.gz,.tgz,.zip" hidden>
<button type="button" onclick="document.getElementById('file-input').click()">Выберите файл</button>
<p class="hint">Поддерживаемые форматы: tar.gz, zip</p>
<p class="hint">Поддерживаемые форматы: tar.gz, zip, json</p>
</div>
<div id="upload-status"></div>
<div id="parsers-info" class="parsers-info"></div>