Add raw export reanalyze flow for Redfish snapshots

This commit is contained in:
Mikhail Chusavitin
2026-02-24 17:23:26 +03:00
parent 5d9e9d73de
commit 810c4b5ff9
7 changed files with 783 additions and 23 deletions

View File

@@ -542,15 +542,15 @@ func (c *RedfishConnector) collectRawRedfishTree(ctx context.Context, client *ht
enqueue(ref)
}
}
n := atomic.AddInt32(&processed, 1)
if err != nil {
c.debugSnapshotf("worker=%d fetch error path=%s err=%v", workerID, current, err)
if emit != nil {
emit(Progress{
Status: "running",
Progress: 92 + int(minInt32(n/200, 6)),
Message: fmt.Sprintf("Redfish snapshot: ошибка на %s", compactProgressPath(current)),
})
n := atomic.AddInt32(&processed, 1)
if err != nil {
c.debugSnapshotf("worker=%d fetch error path=%s err=%v", workerID, current, err)
if emit != nil && shouldReportSnapshotFetchError(err) {
emit(Progress{
Status: "running",
Progress: 92 + int(minInt32(n/200, 6)),
Message: fmt.Sprintf("Redfish snapshot: ошибка на %s", compactProgressPath(current)),
})
}
}
if emit != nil && n%40 == 0 {
@@ -894,14 +894,20 @@ func parsePSU(doc map[string]interface{}, idx int) models.PSU {
}
func parseGPU(doc map[string]interface{}, functionDocs []map[string]interface{}, idx int) models.GPU {
slot := firstNonEmpty(asString(doc["Slot"]), asString(doc["Name"]), asString(doc["Id"]))
slot := firstNonEmpty(
redfishLocationLabel(doc["Slot"]),
redfishLocationLabel(doc["Location"]),
redfishLocationLabel(doc["PhysicalLocation"]),
asString(doc["Name"]),
asString(doc["Id"]),
)
if slot == "" {
slot = fmt.Sprintf("GPU%d", idx)
}
gpu := models.GPU{
Slot: slot,
Location: firstNonEmpty(asString(doc["Location"]), asString(doc["PhysicalLocation"])),
Location: firstNonEmpty(redfishLocationLabel(doc["Location"]), redfishLocationLabel(doc["PhysicalLocation"])),
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
Manufacturer: asString(doc["Manufacturer"]),
SerialNumber: asString(doc["SerialNumber"]),
@@ -958,7 +964,7 @@ func parseGPU(doc map[string]interface{}, functionDocs []map[string]interface{},
func parsePCIeDevice(doc map[string]interface{}, functionDocs []map[string]interface{}) models.PCIeDevice {
dev := models.PCIeDevice{
Slot: firstNonEmpty(asString(doc["Slot"]), asString(doc["Name"]), asString(doc["Id"])),
Slot: firstNonEmpty(redfishLocationLabel(doc["Slot"]), redfishLocationLabel(doc["Location"]), asString(doc["Name"]), asString(doc["Id"])),
BDF: asString(doc["BDF"]),
DeviceClass: firstNonEmpty(asString(doc["DeviceType"]), asString(doc["PCIeType"])),
Manufacturer: asString(doc["Manufacturer"]),
@@ -1013,7 +1019,7 @@ func parsePCIeDevice(doc map[string]interface{}, functionDocs []map[string]inter
}
func parsePCIeFunction(doc map[string]interface{}, idx int) models.PCIeDevice {
slot := firstNonEmpty(asString(doc["Id"]), asString(doc["Name"]))
slot := firstNonEmpty(redfishLocationLabel(doc["Location"]), asString(doc["Id"]), asString(doc["Name"]))
if slot == "" {
slot = fmt.Sprintf("PCIeFn%d", idx)
}
@@ -1367,6 +1373,9 @@ func normalizeRedfishPath(raw string) string {
if raw == "" {
return ""
}
if i := strings.Index(raw, "#"); i >= 0 {
raw = raw[:i]
}
if strings.HasPrefix(raw, "http://") || strings.HasPrefix(raw, "https://") {
u, err := url.Parse(raw)
@@ -1444,6 +1453,45 @@ func topRoots(counts map[string]int, limit int) []string {
return out
}
func redfishLocationLabel(v interface{}) string {
switch typed := v.(type) {
case nil:
return ""
case string:
return strings.TrimSpace(typed)
case map[string]interface{}:
// Common shapes:
// Slot.Location.PartLocation.ServiceLabel
// Location.PartLocation.ServiceLabel
// PartLocation.ServiceLabel
if nested := redfishLocationLabel(typed["Location"]); nested != "" {
return nested
}
if nested := redfishLocationLabel(typed["PartLocation"]); nested != "" {
return nested
}
serviceLabel := asString(typed["ServiceLabel"])
locationType := asString(typed["LocationType"])
ordinal := asString(typed["LocationOrdinalValue"])
if serviceLabel != "" {
return serviceLabel
}
if locationType != "" && ordinal != "" {
return fmt.Sprintf("%s %s", locationType, ordinal)
}
if locationType != "" {
return locationType
}
if ordinal != "" {
return "Slot " + ordinal
}
return ""
default:
// Avoid fmt.Sprint(map[]) style garbage for complex objects in UI/export.
return ""
}
}
func compactProgressPath(p string) string {
const maxLen = 72
if len(p) <= maxLen {
@@ -1452,6 +1500,20 @@ func compactProgressPath(p string) string {
return "..." + p[len(p)-maxLen+3:]
}
func shouldReportSnapshotFetchError(err error) bool {
if err == nil {
return false
}
msg := err.Error()
if strings.HasPrefix(msg, "status 404 ") ||
strings.HasPrefix(msg, "status 405 ") ||
strings.HasPrefix(msg, "status 410 ") ||
strings.HasPrefix(msg, "status 501 ") {
return false
}
return true
}
func minInt32(a, b int32) int32 {
if a < b {
return a

View File

@@ -0,0 +1,433 @@
package collector
import (
"fmt"
"git.mchus.pro/mchus/logpile/internal/models"
)
// ReplayRedfishFromRawPayloads rebuilds AnalysisResult from saved Redfish raw payloads.
// It expects rawPayloads["redfish_tree"] to contain a map[path]document snapshot.
func ReplayRedfishFromRawPayloads(rawPayloads map[string]any, emit ProgressFn) (*models.AnalysisResult, error) {
if len(rawPayloads) == 0 {
return nil, fmt.Errorf("missing raw_payloads")
}
treeAny, ok := rawPayloads["redfish_tree"]
if !ok {
return nil, fmt.Errorf("raw_payloads.redfish_tree is missing")
}
tree, ok := treeAny.(map[string]interface{})
if !ok || len(tree) == 0 {
return nil, fmt.Errorf("raw_payloads.redfish_tree has invalid format")
}
r := redfishSnapshotReader{tree: tree}
if emit != nil {
emit(Progress{Status: "running", Progress: 10, Message: "Redfish snapshot: replay service root..."})
}
if _, err := r.getJSON("/redfish/v1"); err != nil {
return nil, fmt.Errorf("redfish service root: %w", err)
}
systemPaths := r.discoverMemberPaths("/redfish/v1/Systems", "/redfish/v1/Systems/1")
chassisPaths := r.discoverMemberPaths("/redfish/v1/Chassis", "/redfish/v1/Chassis/1")
managerPaths := r.discoverMemberPaths("/redfish/v1/Managers", "/redfish/v1/Managers/1")
primarySystem := firstPathOrDefault(systemPaths, "/redfish/v1/Systems/1")
primaryManager := firstPathOrDefault(managerPaths, "/redfish/v1/Managers/1")
if emit != nil {
emit(Progress{Status: "running", Progress: 30, Message: "Redfish snapshot: replay system..."})
}
systemDoc, err := r.getJSON(primarySystem)
if err != nil {
return nil, fmt.Errorf("system info: %w", err)
}
biosDoc, _ := r.getJSON(joinPath(primarySystem, "/Bios"))
secureBootDoc, _ := r.getJSON(joinPath(primarySystem, "/SecureBoot"))
if emit != nil {
emit(Progress{Status: "running", Progress: 55, Message: "Redfish snapshot: replay CPU/RAM/Storage..."})
}
processors, _ := r.getCollectionMembers(joinPath(primarySystem, "/Processors"))
memory, _ := r.getCollectionMembers(joinPath(primarySystem, "/Memory"))
storageDevices := r.collectStorage(primarySystem)
if emit != nil {
emit(Progress{Status: "running", Progress: 80, Message: "Redfish snapshot: replay network/BMC..."})
}
psus := r.collectPSUs(chassisPaths)
pcieDevices := r.collectPCIeDevices(systemPaths, chassisPaths)
gpus := r.collectGPUs(systemPaths, chassisPaths)
nics := r.collectNICs(chassisPaths)
managerDoc, _ := r.getJSON(primaryManager)
networkProtocolDoc, _ := r.getJSON(joinPath(primaryManager, "/NetworkProtocol"))
result := &models.AnalysisResult{
Events: make([]models.Event, 0),
FRU: make([]models.FRUInfo, 0),
Sensors: make([]models.SensorReading, 0),
RawPayloads: map[string]any{
"redfish_tree": tree,
},
Hardware: &models.HardwareConfig{
BoardInfo: parseBoardInfo(systemDoc),
CPUs: parseCPUs(processors),
Memory: parseMemory(memory),
Storage: storageDevices,
PCIeDevices: pcieDevices,
GPUs: gpus,
PowerSupply: psus,
NetworkAdapters: nics,
Firmware: parseFirmware(systemDoc, biosDoc, managerDoc, secureBootDoc, networkProtocolDoc),
},
}
return result, nil
}
type redfishSnapshotReader struct {
tree map[string]interface{}
}
func (r redfishSnapshotReader) getJSON(requestPath string) (map[string]interface{}, error) {
p := normalizeRedfishPath(requestPath)
if doc, ok := r.tree[p]; ok {
if m, ok := doc.(map[string]interface{}); ok {
return m, nil
}
}
if p != "/" {
if doc, ok := r.tree[stringsTrimTrailingSlash(p)]; ok {
if m, ok := doc.(map[string]interface{}); ok {
return m, nil
}
}
if doc, ok := r.tree[p+"/"]; ok {
if m, ok := doc.(map[string]interface{}); ok {
return m, nil
}
}
}
return nil, fmt.Errorf("snapshot path not found: %s", requestPath)
}
func (r redfishSnapshotReader) getCollectionMembers(collectionPath string) ([]map[string]interface{}, error) {
collection, err := r.getJSON(collectionPath)
if err != nil {
return nil, err
}
refs, ok := collection["Members"].([]interface{})
if !ok || len(refs) == 0 {
return []map[string]interface{}{}, nil
}
out := make([]map[string]interface{}, 0, len(refs))
for _, refAny := range refs {
ref, ok := refAny.(map[string]interface{})
if !ok {
continue
}
memberPath := asString(ref["@odata.id"])
if memberPath == "" {
continue
}
doc, err := r.getJSON(memberPath)
if err != nil {
continue
}
out = append(out, doc)
}
return out, nil
}
func (r redfishSnapshotReader) discoverMemberPaths(collectionPath, fallbackPath string) []string {
collection, err := r.getJSON(collectionPath)
if err == nil {
if refs, ok := collection["Members"].([]interface{}); ok && len(refs) > 0 {
paths := make([]string, 0, len(refs))
for _, refAny := range refs {
ref, ok := refAny.(map[string]interface{})
if !ok {
continue
}
memberPath := asString(ref["@odata.id"])
if memberPath != "" {
paths = append(paths, memberPath)
}
}
if len(paths) > 0 {
return paths
}
}
}
if fallbackPath != "" {
return []string{fallbackPath}
}
return nil
}
func (r redfishSnapshotReader) getLinkedPCIeFunctions(doc map[string]interface{}) []map[string]interface{} {
if links, ok := doc["Links"].(map[string]interface{}); ok {
if refs, ok := links["PCIeFunctions"].([]interface{}); ok && len(refs) > 0 {
out := make([]map[string]interface{}, 0, len(refs))
for _, refAny := range refs {
ref, ok := refAny.(map[string]interface{})
if !ok {
continue
}
memberPath := asString(ref["@odata.id"])
if memberPath == "" {
continue
}
memberDoc, err := r.getJSON(memberPath)
if err != nil {
continue
}
out = append(out, memberDoc)
}
return out
}
}
if pcieFunctions, ok := doc["PCIeFunctions"].(map[string]interface{}); ok {
if collectionPath := asString(pcieFunctions["@odata.id"]); collectionPath != "" {
memberDocs, err := r.getCollectionMembers(collectionPath)
if err == nil {
return memberDocs
}
}
}
return nil
}
func (r redfishSnapshotReader) collectStorage(systemPath string) []models.Storage {
var out []models.Storage
storageMembers, _ := r.getCollectionMembers(joinPath(systemPath, "/Storage"))
for _, member := range storageMembers {
if driveCollection, ok := member["Drives"].(map[string]interface{}); ok {
if driveCollectionPath := asString(driveCollection["@odata.id"]); driveCollectionPath != "" {
driveDocs, err := r.getCollectionMembers(driveCollectionPath)
if err == nil {
for _, driveDoc := range driveDocs {
out = append(out, parseDrive(driveDoc))
}
}
continue
}
}
if drives, ok := member["Drives"].([]interface{}); ok {
for _, driveAny := range drives {
driveRef, ok := driveAny.(map[string]interface{})
if !ok {
continue
}
odata := asString(driveRef["@odata.id"])
if odata == "" {
continue
}
driveDoc, err := r.getJSON(odata)
if err != nil {
continue
}
out = append(out, parseDrive(driveDoc))
}
continue
}
if looksLikeDrive(member) {
out = append(out, parseDrive(member))
}
}
simpleStorageMembers, _ := r.getCollectionMembers(joinPath(systemPath, "/SimpleStorage"))
for _, member := range simpleStorageMembers {
devices, ok := member["Devices"].([]interface{})
if !ok {
continue
}
for _, devAny := range devices {
devDoc, ok := devAny.(map[string]interface{})
if !ok || !looksLikeDrive(devDoc) {
continue
}
out = append(out, parseDrive(devDoc))
}
}
chassisPaths := r.discoverMemberPaths("/redfish/v1/Chassis", "/redfish/v1/Chassis/1")
for _, chassisPath := range chassisPaths {
driveDocs, err := r.getCollectionMembers(joinPath(chassisPath, "/Drives"))
if err != nil {
continue
}
for _, driveDoc := range driveDocs {
if !looksLikeDrive(driveDoc) {
continue
}
out = append(out, parseDrive(driveDoc))
}
}
return dedupeStorage(out)
}
func (r redfishSnapshotReader) collectNICs(chassisPaths []string) []models.NetworkAdapter {
var nics []models.NetworkAdapter
seen := make(map[string]struct{})
for _, chassisPath := range chassisPaths {
adapterDocs, err := r.getCollectionMembers(joinPath(chassisPath, "/NetworkAdapters"))
if err != nil {
continue
}
for _, doc := range adapterDocs {
nic := parseNIC(doc)
key := firstNonEmpty(nic.SerialNumber, nic.Slot+"|"+nic.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
nics = append(nics, nic)
}
}
return nics
}
func (r redfishSnapshotReader) collectPSUs(chassisPaths []string) []models.PSU {
var out []models.PSU
seen := make(map[string]struct{})
idx := 1
for _, chassisPath := range chassisPaths {
if powerDoc, err := r.getJSON(joinPath(chassisPath, "/Power")); err == nil {
if members, ok := powerDoc["PowerSupplies"].([]interface{}); ok && len(members) > 0 {
for _, item := range members {
doc, ok := item.(map[string]interface{})
if !ok {
continue
}
psu := parsePSU(doc, idx)
idx++
key := firstNonEmpty(psu.SerialNumber, psu.Slot+"|"+psu.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, psu)
}
}
}
memberDocs, err := r.getCollectionMembers(joinPath(chassisPath, "/PowerSubsystem/PowerSupplies"))
if err != nil || len(memberDocs) == 0 {
continue
}
for _, doc := range memberDocs {
psu := parsePSU(doc, idx)
idx++
key := firstNonEmpty(psu.SerialNumber, psu.Slot+"|"+psu.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, psu)
}
}
return out
}
func (r redfishSnapshotReader) collectGPUs(systemPaths, chassisPaths []string) []models.GPU {
collections := make([]string, 0, len(systemPaths)*2+len(chassisPaths))
for _, systemPath := range systemPaths {
collections = append(collections, joinPath(systemPath, "/PCIeDevices"))
collections = append(collections, joinPath(systemPath, "/Accelerators"))
}
for _, chassisPath := range chassisPaths {
collections = append(collections, joinPath(chassisPath, "/PCIeDevices"))
}
var out []models.GPU
seen := make(map[string]struct{})
idx := 1
for _, collectionPath := range collections {
memberDocs, err := r.getCollectionMembers(collectionPath)
if err != nil || len(memberDocs) == 0 {
continue
}
for _, doc := range memberDocs {
functionDocs := r.getLinkedPCIeFunctions(doc)
if !looksLikeGPU(doc, functionDocs) {
continue
}
gpu := parseGPU(doc, functionDocs, idx)
idx++
key := firstNonEmpty(gpu.SerialNumber, gpu.BDF, gpu.Slot+"|"+gpu.Model)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, gpu)
}
}
return out
}
func (r redfishSnapshotReader) collectPCIeDevices(systemPaths, chassisPaths []string) []models.PCIeDevice {
collections := make([]string, 0, len(systemPaths)+len(chassisPaths))
for _, systemPath := range systemPaths {
collections = append(collections, joinPath(systemPath, "/PCIeDevices"))
}
for _, chassisPath := range chassisPaths {
collections = append(collections, joinPath(chassisPath, "/PCIeDevices"))
}
var out []models.PCIeDevice
seen := make(map[string]struct{})
for _, collectionPath := range collections {
memberDocs, err := r.getCollectionMembers(collectionPath)
if err != nil || len(memberDocs) == 0 {
continue
}
for _, doc := range memberDocs {
functionDocs := r.getLinkedPCIeFunctions(doc)
dev := parsePCIeDevice(doc, functionDocs)
key := firstNonEmpty(dev.SerialNumber, dev.BDF, dev.Slot+"|"+dev.DeviceClass)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, dev)
}
}
for _, systemPath := range systemPaths {
functionDocs, err := r.getCollectionMembers(joinPath(systemPath, "/PCIeFunctions"))
if err != nil || len(functionDocs) == 0 {
continue
}
for idx, fn := range functionDocs {
dev := parsePCIeFunction(fn, idx+1)
key := firstNonEmpty(dev.BDF, dev.SerialNumber, dev.Slot+"|"+dev.DeviceClass)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
out = append(out, dev)
}
}
return out
}
func stringsTrimTrailingSlash(s string) string {
for len(s) > 1 && s[len(s)-1] == '/' {
s = s[:len(s)-1]
}
return s
}

View File

@@ -200,3 +200,41 @@ func TestRedfishConnectorCollect(t *testing.T) {
t.Fatalf("expected non-empty redfish_tree, got %#v", treeAny)
}
}
func TestParsePCIeDeviceSlot_FromNestedRedfishSlotLocation(t *testing.T) {
doc := map[string]interface{}{
"Id": "NIC1",
"Slot": map[string]interface{}{
"Lanes": 16,
"Location": map[string]interface{}{
"PartLocation": map[string]interface{}{
"LocationOrdinalValue": 1,
"LocationType": "Slot",
"ServiceLabel": "PCIe Slot 1 (1)",
},
},
"PCIeType": "Gen5",
"SlotType": "FullLength",
},
}
got := parsePCIeDevice(doc, nil)
if got.Slot != "PCIe Slot 1 (1)" {
t.Fatalf("unexpected slot: %q", got.Slot)
}
}
func TestParsePCIeDeviceSlot_EmptyMapFallsBackToID(t *testing.T) {
doc := map[string]interface{}{
"Id": "NIC42",
"Slot": map[string]interface{}{},
}
got := parsePCIeDevice(doc, nil)
if got.Slot != "NIC42" {
t.Fatalf("unexpected slot fallback: %q", got.Slot)
}
if got.Slot == "map[]" {
t.Fatalf("slot should not stringify empty map")
}
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"html/template"
@@ -70,15 +71,33 @@ func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
)
if looksLikeJSONSnapshot(header.Filename, payload) {
snapshotResult, snapshotErr := parseUploadedSnapshot(payload)
if snapshotErr != nil {
jsonError(w, "Failed to parse snapshot: "+snapshotErr.Error(), http.StatusBadRequest)
if rawPkg, ok, err := parseRawExportPackage(payload); err != nil {
jsonError(w, "Failed to parse raw export package: "+err.Error(), http.StatusBadRequest)
return
}
result = snapshotResult
vendor = strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
} else if ok {
replayed, replayVendor, replayErr := s.reanalyzeRawExportPackage(rawPkg)
if replayErr != nil {
jsonError(w, "Failed to reanalyze raw export package: "+replayErr.Error(), http.StatusBadRequest)
return
}
result = replayed
vendor = replayVendor
if strings.TrimSpace(vendor) == "" {
vendor = "snapshot"
}
s.SetRawExport(rawPkg)
} else {
snapshotResult, snapshotErr := parseUploadedSnapshot(payload)
if snapshotErr != nil {
jsonError(w, "Failed to parse snapshot: "+snapshotErr.Error(), http.StatusBadRequest)
return
}
result = snapshotResult
vendor = strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
s.SetRawExport(newRawExportFromUploadedFile(header.Filename, header.Header.Get("Content-Type"), payload, result))
}
} else {
// Parse archive
@@ -90,6 +109,7 @@ func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
result = p.Result()
applyArchiveSourceMetadata(result)
vendor = p.DetectedVendor()
s.SetRawExport(newRawExportFromUploadedFile(header.Filename, header.Header.Get("Content-Type"), payload, result))
}
s.SetResult(result)
@@ -108,6 +128,77 @@ func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
})
}
func (s *Server) reanalyzeRawExportPackage(pkg *RawExportPackage) (*models.AnalysisResult, string, error) {
if pkg == nil {
return nil, "", fmt.Errorf("empty package")
}
switch pkg.Source.Kind {
case "file_bytes":
if strings.TrimSpace(pkg.Source.Encoding) != "base64" {
return nil, "", fmt.Errorf("unsupported file_bytes encoding: %s", pkg.Source.Encoding)
}
data, err := base64.StdEncoding.DecodeString(pkg.Source.Data)
if err != nil {
return nil, "", fmt.Errorf("decode source.data: %w", err)
}
return s.parseUploadedPayload(pkg.Source.Filename, data)
case "live_redfish":
if !strings.EqualFold(strings.TrimSpace(pkg.Source.Protocol), "redfish") {
return nil, "", fmt.Errorf("unsupported live protocol: %s", pkg.Source.Protocol)
}
result, err := collector.ReplayRedfishFromRawPayloads(pkg.Source.RawPayloads, nil)
if err != nil {
return nil, "", err
}
if result != nil {
if strings.TrimSpace(result.Protocol) == "" {
result.Protocol = "redfish"
}
if strings.TrimSpace(result.SourceType) == "" {
result.SourceType = models.SourceTypeAPI
}
if strings.TrimSpace(result.TargetHost) == "" {
result.TargetHost = strings.TrimSpace(pkg.Source.TargetHost)
}
if result.CollectedAt.IsZero() {
result.CollectedAt = time.Now().UTC()
}
if strings.TrimSpace(result.Filename) == "" {
target := result.TargetHost
if target == "" {
target = "snapshot"
}
result.Filename = "redfish://" + target
}
}
return result, "redfish", nil
default:
return nil, "", fmt.Errorf("unsupported raw export source kind: %s", pkg.Source.Kind)
}
}
func (s *Server) parseUploadedPayload(filename string, payload []byte) (*models.AnalysisResult, string, error) {
if looksLikeJSONSnapshot(filename, payload) {
snapshotResult, err := parseUploadedSnapshot(payload)
if err != nil {
return nil, "", err
}
vendor := strings.TrimSpace(snapshotResult.Protocol)
if vendor == "" {
vendor = "snapshot"
}
return snapshotResult, vendor, nil
}
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), filename); err != nil {
return nil, "", err
}
result := p.Result()
applyArchiveSourceMetadata(result)
return result, p.DetectedVendor(), nil
}
func (s *Server) handleGetParsers(w http.ResponseWriter, r *http.Request) {
jsonResponse(w, map[string]interface{}{
"parsers": parser.ListParsersInfo(),
@@ -667,8 +758,19 @@ func (s *Server) handleExportJSON(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", exportFilename(result, "json")))
if rawPkg := s.GetRawExport(); rawPkg != nil {
rawPkg.ExportedAt = time.Now().UTC()
rawPkg.Analysis = nil
encoder := json.NewEncoder(w)
encoder.SetIndent("", " ")
if err := encoder.Encode(rawPkg); err != nil {
return
}
return
}
exp := exporter.New(result)
exp.ExportJSON(w)
_ = exp.ExportJSON(w)
}
func (s *Server) handleExportReanimator(w http.ResponseWriter, r *http.Request) {
@@ -702,6 +804,7 @@ func (s *Server) handleExportReanimator(w http.ResponseWriter, r *http.Request)
func (s *Server) handleClear(w http.ResponseWriter, r *http.Request) {
s.SetResult(nil)
s.SetDetectedVendor("")
s.SetRawExport(nil)
jsonResponse(w, map[string]string{
"status": "ok",
"message": "Data cleared",
@@ -827,6 +930,9 @@ func (s *Server) startCollectionJob(jobID string, req CollectRequest) {
s.jobManager.AppendJobLog(jobID, "Сбор завершен")
s.SetResult(result)
s.SetDetectedVendor(req.Protocol)
if job, ok := s.jobManager.GetJob(jobID); ok {
s.SetRawExport(newRawExportFromLiveCollect(result, req, job.Logs))
}
}()
}

View File

@@ -0,0 +1,104 @@
package server
import (
"encoding/base64"
"encoding/json"
"time"
"git.mchus.pro/mchus/logpile/internal/models"
)
const rawExportFormatV1 = "logpile.raw-export.v1"
type RawExportPackage struct {
Format string `json:"format"`
ExportedAt time.Time `json:"exported_at"`
Source RawExportSource `json:"source"`
Analysis *models.AnalysisResult `json:"analysis_result,omitempty"`
}
type RawExportSource struct {
Kind string `json:"kind"` // file_bytes | live_redfish | snapshot_json
Filename string `json:"filename,omitempty"`
MIMEType string `json:"mime_type,omitempty"`
Encoding string `json:"encoding,omitempty"` // base64
Data string `json:"data,omitempty"`
Protocol string `json:"protocol,omitempty"`
TargetHost string `json:"target_host,omitempty"`
RawPayloads map[string]any `json:"raw_payloads,omitempty"`
CollectLogs []string `json:"collect_logs,omitempty"`
CollectMeta *CollectRequestMeta `json:"collect_meta,omitempty"`
}
func newRawExportFromUploadedFile(filename, mimeType string, payload []byte, result *models.AnalysisResult) *RawExportPackage {
return &RawExportPackage{
Format: rawExportFormatV1,
ExportedAt: time.Now().UTC(),
Source: RawExportSource{
Kind: "file_bytes",
Filename: filename,
MIMEType: mimeType,
Encoding: "base64",
Data: base64.StdEncoding.EncodeToString(payload),
Protocol: resultProtocol(result),
TargetHost: resultTargetHost(result),
},
}
}
func newRawExportFromLiveCollect(result *models.AnalysisResult, req CollectRequest, logs []string) *RawExportPackage {
rawPayloads := map[string]any{}
if result != nil && result.RawPayloads != nil {
for k, v := range result.RawPayloads {
rawPayloads[k] = v
}
}
meta := CollectRequestMeta{
Host: req.Host,
Protocol: req.Protocol,
Port: req.Port,
Username: req.Username,
AuthType: req.AuthType,
TLSMode: req.TLSMode,
}
return &RawExportPackage{
Format: rawExportFormatV1,
ExportedAt: time.Now().UTC(),
Source: RawExportSource{
Kind: "live_redfish",
Protocol: req.Protocol,
TargetHost: req.Host,
RawPayloads: rawPayloads,
CollectLogs: append([]string(nil), logs...),
CollectMeta: &meta,
},
}
}
func parseRawExportPackage(payload []byte) (*RawExportPackage, bool, error) {
var pkg RawExportPackage
if err := json.Unmarshal(payload, &pkg); err != nil {
return nil, false, err
}
if pkg.Format != rawExportFormatV1 {
return nil, false, nil
}
if pkg.ExportedAt.IsZero() {
pkg.ExportedAt = time.Now().UTC()
}
return &pkg, true, nil
}
func resultProtocol(result *models.AnalysisResult) string {
if result == nil {
return ""
}
return result.Protocol
}
func resultTargetHost(result *models.AnalysisResult) string {
if result == nil {
return ""
}
return result.TargetHost
}

View File

@@ -29,6 +29,7 @@ type Server struct {
mu sync.RWMutex
result *models.AnalysisResult
detectedVendor string
rawExport *RawExportPackage
jobManager *JobManager
collectors *collector.Registry
@@ -107,6 +108,22 @@ func (s *Server) GetResult() *models.AnalysisResult {
return s.result
}
func (s *Server) SetRawExport(pkg *RawExportPackage) {
s.mu.Lock()
defer s.mu.Unlock()
s.rawExport = pkg
}
func (s *Server) GetRawExport() *RawExportPackage {
s.mu.RLock()
defer s.mu.RUnlock()
if s.rawExport == nil {
return nil
}
cloned := *s.rawExport
return &cloned
}
// SetDetectedVendor sets the detected vendor name
func (s *Server) SetDetectedVendor(vendor string) {
s.mu.Lock()