refactor: unified ingest pipeline + modular Redfish profile framework

Implement the full architectural plan: unified ingest.Service entry point
for archive and Redfish payloads, modular redfishprofile package with
composable profiles (generic, ami-family, msi, supermicro, dell,
hgx-topology), score-based profile matching with fallback expansion mode,
and profile-driven acquisition/analysis plans.

Vendor-specific logic moved out of common executors and into profile hooks.
GPU chassis lookup strategies and known storage recovery collections
(IntelVROC/HA-RAID/MRVL) now live in ResolvedAnalysisPlan, populated by
profiles at analysis time. Replay helpers read from the plan; no hardcoded
path lists remain in generic code.

Also splits redfish_replay.go into domain modules (gpu, storage, inventory,
fru, profiles) and adds full fixture/matcher/directive test coverage
including Dell, AMI, unknown-vendor fallback, and deterministic ordering.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Mikhail Chusavitin
2026-03-18 08:48:58 +03:00
parent d8d3d8c524
commit d650a6ba1c
45 changed files with 5231 additions and 1011 deletions

View File

@@ -21,6 +21,7 @@ import (
"git.mchus.pro/mchus/logpile/internal/collector"
"git.mchus.pro/mchus/logpile/internal/exporter"
"git.mchus.pro/mchus/logpile/internal/ingest"
"git.mchus.pro/mchus/logpile/internal/models"
"git.mchus.pro/mchus/logpile/internal/parser"
chartviewer "reanimator/chart/viewer"
@@ -219,13 +220,12 @@ func (s *Server) analyzeUploadedFile(filename, mimeType string, payload []byte)
return nil, "", nil, fmt.Errorf("unsupported archive format: %s", strings.ToLower(filepath.Ext(filename)))
}
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), filename); err != nil {
result, vendor, err := s.ingestService().AnalyzeArchivePayload(filename, payload)
if err != nil {
return nil, "", nil, err
}
result := p.Result()
applyArchiveSourceMetadata(result)
return result, p.DetectedVendor(), newRawExportFromUploadedFile(filename, mimeType, payload, result), nil
return result, vendor, newRawExportFromUploadedFile(filename, mimeType, payload, result), nil
}
func uploadMultipartMaxBytes() int64 {
@@ -297,33 +297,18 @@ func (s *Server) reanalyzeRawExportPackage(pkg *RawExportPackage) (*models.Analy
if !strings.EqualFold(strings.TrimSpace(pkg.Source.Protocol), "redfish") {
return nil, "", fmt.Errorf("unsupported live protocol: %s", pkg.Source.Protocol)
}
result, err := collector.ReplayRedfishFromRawPayloads(pkg.Source.RawPayloads, nil)
result, vendor, err := s.ingestService().AnalyzeRedfishRawPayloads(pkg.Source.RawPayloads, ingest.RedfishSourceMetadata{
TargetHost: pkg.Source.TargetHost,
SourceTimezone: pkg.Source.SourceTimezone,
Filename: pkg.Source.Filename,
})
if err != nil {
return nil, "", err
}
if result != nil {
if strings.TrimSpace(result.Protocol) == "" {
result.Protocol = "redfish"
}
if strings.TrimSpace(result.SourceType) == "" {
result.SourceType = models.SourceTypeAPI
}
if strings.TrimSpace(result.TargetHost) == "" {
result.TargetHost = strings.TrimSpace(pkg.Source.TargetHost)
}
if strings.TrimSpace(result.SourceTimezone) == "" {
result.SourceTimezone = strings.TrimSpace(pkg.Source.SourceTimezone)
}
result.CollectedAt = inferRawExportCollectedAt(result, pkg)
if strings.TrimSpace(result.Filename) == "" {
target := result.TargetHost
if target == "" {
target = "snapshot"
}
result.Filename = "redfish://" + target
}
}
return result, "redfish", nil
return result, vendor, nil
default:
return nil, "", fmt.Errorf("unsupported raw export source kind: %s", pkg.Source.Kind)
}
@@ -342,13 +327,12 @@ func (s *Server) parseUploadedPayload(filename string, payload []byte) (*models.
return snapshotResult, vendor, nil
}
p := parser.NewBMCParser()
if err := p.ParseFromReader(bytes.NewReader(payload), filename); err != nil {
result, vendor, err := s.ingestService().AnalyzeArchivePayload(filename, payload)
if err != nil {
return nil, "", err
}
result := p.Result()
applyArchiveSourceMetadata(result)
return result, p.DetectedVendor(), nil
return result, vendor, nil
}
func (s *Server) handleGetParsers(w http.ResponseWriter, r *http.Request) {
@@ -1706,6 +1690,51 @@ func (s *Server) startCollectionJob(jobID string, req CollectRequest) {
status = CollectStatusRunning
}
s.jobManager.UpdateJobStatus(jobID, status, update.Progress, "")
if update.CurrentPhase != "" || update.ETASeconds > 0 {
s.jobManager.UpdateJobETA(jobID, update.CurrentPhase, update.ETASeconds)
}
if update.DebugInfo != nil {
debugInfo := &CollectDebugInfo{
AdaptiveThrottled: update.DebugInfo.AdaptiveThrottled,
SnapshotWorkers: update.DebugInfo.SnapshotWorkers,
PrefetchWorkers: update.DebugInfo.PrefetchWorkers,
PrefetchEnabled: update.DebugInfo.PrefetchEnabled,
}
if len(update.DebugInfo.PhaseTelemetry) > 0 {
debugInfo.PhaseTelemetry = make([]CollectPhaseTelemetry, 0, len(update.DebugInfo.PhaseTelemetry))
for _, item := range update.DebugInfo.PhaseTelemetry {
debugInfo.PhaseTelemetry = append(debugInfo.PhaseTelemetry, CollectPhaseTelemetry{
Phase: item.Phase,
Requests: item.Requests,
Errors: item.Errors,
ErrorRate: item.ErrorRate,
AvgMS: item.AvgMS,
P95MS: item.P95MS,
})
}
}
s.jobManager.UpdateJobDebugInfo(jobID, debugInfo)
}
if len(update.ActiveModules) > 0 || len(update.ModuleScores) > 0 {
activeModules := make([]CollectModuleStatus, 0, len(update.ActiveModules))
for _, module := range update.ActiveModules {
activeModules = append(activeModules, CollectModuleStatus{
Name: module.Name,
Score: module.Score,
Active: true,
})
}
moduleScores := make([]CollectModuleStatus, 0, len(update.ModuleScores))
for _, module := range update.ModuleScores {
moduleScores = append(moduleScores, CollectModuleStatus{
Name: module.Name,
Score: module.Score,
Active: module.Active,
Priority: module.Priority,
})
}
s.jobManager.UpdateJobModules(jobID, activeModules, moduleScores)
}
if update.Message != "" {
s.jobManager.AppendJobLog(jobID, update.Message)
}