misc: sds format support, convert limits, dell dedup, supermicro removal, bible updates
Parser / archive: - Add .sds extension as tar-format alias (archive.go) - Add tests for multipart upload size limits (multipart_limits_test.go) - Remove supermicro crashdump parser (ADL-015) Dell parser: - Remove GPU duplicates from PCIeDevices (DCIM_VideoView vs DCIM_PCIDeviceView both list the same GPU; VideoView record is authoritative) Server: - Add LOGPILE_CONVERT_MAX_MB env var for independent convert batch size limit - Improve "file too large" error message with current limit value Web: - Add CONVERT_MAX_FILES_PER_BATCH = 1000 cap - Minor UI copy and CSS fixes Bible: - bible-local/06-parsers.md: add pci.ids enrichment rule (enrich model from pciids when name is empty but vendor_id+device_id are present) - Sync bible submodule and local overview/architecture docs Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -22,6 +22,7 @@ var supportedArchiveExt = map[string]struct{}{
|
||||
".gz": {},
|
||||
".tgz": {},
|
||||
".tar": {},
|
||||
".sds": {},
|
||||
".zip": {},
|
||||
".txt": {},
|
||||
".log": {},
|
||||
@@ -46,7 +47,7 @@ func ExtractArchive(archivePath string) ([]ExtractedFile, error) {
|
||||
switch ext {
|
||||
case ".gz", ".tgz":
|
||||
return extractTarGz(archivePath)
|
||||
case ".tar":
|
||||
case ".tar", ".sds":
|
||||
return extractTar(archivePath)
|
||||
case ".zip":
|
||||
return extractZip(archivePath)
|
||||
@@ -67,7 +68,7 @@ func ExtractArchiveFromReader(r io.Reader, filename string) ([]ExtractedFile, er
|
||||
switch ext {
|
||||
case ".gz", ".tgz":
|
||||
return extractTarGzFromReader(r, filename)
|
||||
case ".tar":
|
||||
case ".tar", ".sds":
|
||||
return extractTarFromReader(r)
|
||||
case ".zip":
|
||||
return extractZipFromReader(r)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -78,6 +79,7 @@ func TestIsSupportedArchiveFilename(t *testing.T) {
|
||||
{name: "dump.tar.gz", want: true},
|
||||
{name: "nvidia-bug-report-1651124000923.log.gz", want: true},
|
||||
{name: "snapshot.zip", want: true},
|
||||
{name: "h3c_20250819.sds", want: true},
|
||||
{name: "report.log", want: true},
|
||||
{name: "xigmanas.txt", want: true},
|
||||
{name: "raw_export.json", want: false},
|
||||
@@ -91,3 +93,34 @@ func TestIsSupportedArchiveFilename(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractArchiveFromReaderSDS(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
|
||||
payload := []byte("STARTTIME:0\nENDTIME:0\n")
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Name: "bmc/pack.info",
|
||||
Mode: 0o600,
|
||||
Size: int64(len(payload)),
|
||||
}); err != nil {
|
||||
t.Fatalf("write tar header: %v", err)
|
||||
}
|
||||
if _, err := tw.Write(payload); err != nil {
|
||||
t.Fatalf("write tar payload: %v", err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("close tar writer: %v", err)
|
||||
}
|
||||
|
||||
files, err := ExtractArchiveFromReader(bytes.NewReader(buf.Bytes()), "sample.sds")
|
||||
if err != nil {
|
||||
t.Fatalf("extract sds from reader: %v", err)
|
||||
}
|
||||
if len(files) != 1 {
|
||||
t.Fatalf("expected 1 extracted file, got %d", len(files))
|
||||
}
|
||||
if files[0].Path != "bmc/pack.info" {
|
||||
t.Fatalf("expected bmc/pack.info, got %q", files[0].Path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ type VendorParser interface {
|
||||
// Name returns human-readable parser name
|
||||
Name() string
|
||||
|
||||
// Vendor returns vendor identifier (e.g., "inspur", "supermicro", "dell")
|
||||
// Vendor returns vendor identifier (e.g., "inspur", "dell", "h3c_g6")
|
||||
Vendor() string
|
||||
|
||||
// Version returns parser version string
|
||||
|
||||
35
internal/parser/vendors/dell/parser.go
vendored
35
internal/parser/vendors/dell/parser.go
vendored
@@ -99,8 +99,8 @@ func (p *Parser) Parse(files []parser.ExtractedFile) (*models.AnalysisResult, er
|
||||
result.Hardware.PowerSupply = dedupePSU(result.Hardware.PowerSupply)
|
||||
result.Hardware.NetworkAdapters = dedupeNetworkAdapters(result.Hardware.NetworkAdapters)
|
||||
result.Hardware.NetworkCards = nicCardsFromAdapters(result.Hardware.NetworkAdapters)
|
||||
result.Hardware.PCIeDevices = dedupePCIe(result.Hardware.PCIeDevices)
|
||||
result.Hardware.GPUs = dedupeGPU(result.Hardware.GPUs)
|
||||
result.Hardware.PCIeDevices = removePCIeOverlappingWithGPUs(dedupePCIe(result.Hardware.PCIeDevices), result.Hardware.GPUs)
|
||||
result.Hardware.CPUs = dedupeCPU(result.Hardware.CPUs)
|
||||
result.Hardware.Memory = dedupeDIMM(result.Hardware.Memory)
|
||||
result.Hardware.Firmware = dedupeFirmware(result.Hardware.Firmware)
|
||||
@@ -1248,6 +1248,39 @@ func nicCardsFromAdapters(items []models.NetworkAdapter) []models.NIC {
|
||||
return out
|
||||
}
|
||||
|
||||
// removePCIeOverlappingWithGPUs drops PCIe entries that duplicate a GPU already
|
||||
// captured from DCIM_VideoView. Dell TSR lists GPUs in both DCIM_VideoView and
|
||||
// DCIM_PCIDeviceView; the VideoView record is authoritative (has serial, firmware,
|
||||
// temperature) so the PCIe duplicate must be removed.
|
||||
func removePCIeOverlappingWithGPUs(pcie []models.PCIeDevice, gpus []models.GPU) []models.PCIeDevice {
|
||||
if len(gpus) == 0 {
|
||||
return pcie
|
||||
}
|
||||
gpuSlots := make(map[string]struct{}, len(gpus))
|
||||
gpuBDFs := make(map[string]struct{}, len(gpus))
|
||||
for _, g := range gpus {
|
||||
if s := strings.ToLower(strings.TrimSpace(g.Slot)); s != "" {
|
||||
gpuSlots[s] = struct{}{}
|
||||
}
|
||||
if b := strings.ToLower(strings.TrimSpace(g.BDF)); b != "" {
|
||||
gpuBDFs[b] = struct{}{}
|
||||
}
|
||||
}
|
||||
out := make([]models.PCIeDevice, 0, len(pcie))
|
||||
for _, p := range pcie {
|
||||
slot := strings.ToLower(strings.TrimSpace(p.Slot))
|
||||
bdf := strings.ToLower(strings.TrimSpace(p.BDF))
|
||||
if _, ok := gpuSlots[slot]; ok && slot != "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := gpuBDFs[bdf]; ok && bdf != "" {
|
||||
continue
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func dedupePCIe(items []models.PCIeDevice) []models.PCIeDevice {
|
||||
out := make([]models.PCIeDevice, 0, len(items))
|
||||
seen := make(map[string]int)
|
||||
|
||||
2
internal/parser/vendors/generic/parser.go
vendored
2
internal/parser/vendors/generic/parser.go
vendored
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// parserVersion - version of this parser module
|
||||
const parserVersion = "1.0.0"
|
||||
const parserVersion = "1.1"
|
||||
|
||||
func init() {
|
||||
parser.Register(&Parser{})
|
||||
|
||||
4
internal/parser/vendors/h3c/parser.go
vendored
4
internal/parser/vendors/h3c/parser.go
vendored
@@ -20,8 +20,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
parserVersionG5 = "1.0.0"
|
||||
parserVersionG6 = "1.0.0"
|
||||
parserVersionG5 = "2.1"
|
||||
parserVersionG6 = "2.1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
2
internal/parser/vendors/inspur/parser.go
vendored
2
internal/parser/vendors/inspur/parser.go
vendored
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
// parserVersion - version of this parser module
|
||||
// IMPORTANT: Increment this version when making changes to parser logic!
|
||||
const parserVersion = "1.4.0"
|
||||
const parserVersion = "1.5"
|
||||
|
||||
func init() {
|
||||
parser.Register(&Parser{})
|
||||
|
||||
2
internal/parser/vendors/nvidia/parser.go
vendored
2
internal/parser/vendors/nvidia/parser.go
vendored
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// parserVersion - version of this parser module
|
||||
// IMPORTANT: Increment this version when making changes to parser logic!
|
||||
const parserVersion = "1.3.0"
|
||||
const parserVersion = "1.4"
|
||||
|
||||
func init() {
|
||||
parser.Register(&Parser{})
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
// parserVersion - version of this parser module
|
||||
const parserVersion = "1.1.0"
|
||||
const parserVersion = "1.2"
|
||||
|
||||
var bugReportDateLineRegex = regexp.MustCompile(`(?m)^Date:\s+(.+?)\s*$`)
|
||||
var dateWithTZAbbrevRegex = regexp.MustCompile(`^([A-Za-z]{3}\s+[A-Za-z]{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+([A-Za-z]{2,5})\s+(\d{4})$`)
|
||||
|
||||
261
internal/parser/vendors/supermicro/crashdump.go
vendored
261
internal/parser/vendors/supermicro/crashdump.go
vendored
@@ -1,261 +0,0 @@
|
||||
package supermicro
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.mchus.pro/mchus/logpile/internal/models"
|
||||
)
|
||||
|
||||
// CrashDumpData represents the structure of CDump.txt
|
||||
type CrashDumpData struct {
|
||||
CrashData struct {
|
||||
METADATA Metadata `json:"METADATA"`
|
||||
PROCESSORS ProcessorsData `json:"PROCESSORS"`
|
||||
} `json:"crash_data"`
|
||||
}
|
||||
|
||||
// ProcessorsData contains processor crash data
|
||||
type ProcessorsData struct {
|
||||
Version string `json:"_version"`
|
||||
CPU0 Processors `json:"cpu0"`
|
||||
CPU1 Processors `json:"cpu1"`
|
||||
}
|
||||
|
||||
// Metadata contains crashdump metadata
|
||||
type Metadata struct {
|
||||
CPU0 CPUMetadata `json:"cpu0"`
|
||||
CPU1 CPUMetadata `json:"cpu1"`
|
||||
BMCFWVer string `json:"bmc_fw_ver"`
|
||||
BIOSId string `json:"bios_id"`
|
||||
MEFWVer string `json:"me_fw_ver"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
TriggerType string `json:"trigger_type"`
|
||||
PlatformName string `json:"platform_name"`
|
||||
CrashdumpVer string `json:"crashdump_ver"`
|
||||
ResetDetected string `json:"_reset_detected"`
|
||||
}
|
||||
|
||||
// CPUMetadata contains CPU metadata
|
||||
type CPUMetadata struct {
|
||||
CPUID string `json:"cpuid"`
|
||||
CoreMask string `json:"core_mask"`
|
||||
CHACount string `json:"cha_count"`
|
||||
CoreCount string `json:"core_count"`
|
||||
PPIN string `json:"ppin"`
|
||||
UcodePatchVer string `json:"ucode_patch_ver"`
|
||||
}
|
||||
|
||||
// Processors contains processor crash data
|
||||
type Processors struct {
|
||||
MCA MCAData `json:"MCA"`
|
||||
}
|
||||
|
||||
// MCAData contains Machine Check Architecture data
|
||||
type MCAData struct {
|
||||
Uncore map[string]interface{} `json:"uncore"`
|
||||
}
|
||||
|
||||
// ParseCrashDump parses CDump.txt file
|
||||
func ParseCrashDump(content []byte, result *models.AnalysisResult) error {
|
||||
var data CrashDumpData
|
||||
if err := json.Unmarshal(content, &data); err != nil {
|
||||
return fmt.Errorf("failed to parse CDump.txt: %w", err)
|
||||
}
|
||||
|
||||
// Initialize Hardware.Firmware slice if nil
|
||||
if result.Hardware.Firmware == nil {
|
||||
result.Hardware.Firmware = make([]models.FirmwareInfo, 0)
|
||||
}
|
||||
|
||||
// Parse metadata
|
||||
parseMetadata(&data.CrashData.METADATA, result)
|
||||
|
||||
// Parse CPU information
|
||||
parseCPUInfo(&data.CrashData.METADATA, result)
|
||||
|
||||
// Parse MCA errors
|
||||
parseMCAErrors(&data.CrashData, result)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseMetadata extracts metadata information
|
||||
func parseMetadata(metadata *Metadata, result *models.AnalysisResult) {
|
||||
// Store firmware versions in HardwareConfig.Firmware
|
||||
if metadata.BMCFWVer != "" {
|
||||
result.Hardware.Firmware = append(result.Hardware.Firmware, models.FirmwareInfo{
|
||||
DeviceName: "BMC",
|
||||
Version: metadata.BMCFWVer,
|
||||
})
|
||||
}
|
||||
|
||||
if metadata.BIOSId != "" {
|
||||
result.Hardware.Firmware = append(result.Hardware.Firmware, models.FirmwareInfo{
|
||||
DeviceName: "BIOS",
|
||||
Version: metadata.BIOSId,
|
||||
})
|
||||
}
|
||||
|
||||
if metadata.MEFWVer != "" {
|
||||
result.Hardware.Firmware = append(result.Hardware.Firmware, models.FirmwareInfo{
|
||||
DeviceName: "ME",
|
||||
Version: metadata.MEFWVer,
|
||||
})
|
||||
}
|
||||
|
||||
// Create event for crashdump trigger
|
||||
timestamp := time.Now()
|
||||
if metadata.Timestamp != "" {
|
||||
if t, err := time.Parse(time.RFC3339, metadata.Timestamp); err == nil {
|
||||
timestamp = t
|
||||
}
|
||||
}
|
||||
|
||||
triggerType := metadata.TriggerType
|
||||
if triggerType == "" {
|
||||
triggerType = "Unknown"
|
||||
}
|
||||
|
||||
severity := models.SeverityInfo
|
||||
if metadata.ResetDetected != "" && metadata.ResetDetected != "NONE" {
|
||||
severity = models.SeverityWarning
|
||||
}
|
||||
|
||||
result.Events = append(result.Events, models.Event{
|
||||
Timestamp: timestamp,
|
||||
Source: "Crashdump",
|
||||
EventType: "System Crashdump",
|
||||
Description: fmt.Sprintf("Crashdump collected (%s)", triggerType),
|
||||
Severity: severity,
|
||||
RawData: fmt.Sprintf("Version: %s, Reset: %s", metadata.CrashdumpVer, metadata.ResetDetected),
|
||||
})
|
||||
}
|
||||
|
||||
// parseCPUInfo extracts CPU information
|
||||
func parseCPUInfo(metadata *Metadata, result *models.AnalysisResult) {
|
||||
cpus := []struct {
|
||||
socket int
|
||||
data CPUMetadata
|
||||
}{
|
||||
{0, metadata.CPU0},
|
||||
{1, metadata.CPU1},
|
||||
}
|
||||
|
||||
for _, cpu := range cpus {
|
||||
if cpu.data.CPUID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse core count
|
||||
coreCount := 0
|
||||
if cpu.data.CoreCount != "" {
|
||||
if count, err := strconv.ParseInt(strings.TrimPrefix(cpu.data.CoreCount, "0x"), 16, 64); err == nil {
|
||||
coreCount = int(count)
|
||||
}
|
||||
}
|
||||
|
||||
cpuModel := models.CPU{
|
||||
Socket: cpu.socket,
|
||||
Model: fmt.Sprintf("Intel CPU (CPUID: %s)", cpu.data.CPUID),
|
||||
Cores: coreCount,
|
||||
}
|
||||
|
||||
// Add PPIN
|
||||
if cpu.data.PPIN != "" && cpu.data.PPIN != "0x0" {
|
||||
cpuModel.PPIN = cpu.data.PPIN
|
||||
}
|
||||
|
||||
result.Hardware.CPUs = append(result.Hardware.CPUs, cpuModel)
|
||||
|
||||
// Add microcode version to firmware list
|
||||
if cpu.data.UcodePatchVer != "" {
|
||||
result.Hardware.Firmware = append(result.Hardware.Firmware, models.FirmwareInfo{
|
||||
DeviceName: fmt.Sprintf("CPU%d Microcode", cpu.socket),
|
||||
Version: cpu.data.UcodePatchVer,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseMCAErrors extracts Machine Check Architecture errors
|
||||
func parseMCAErrors(crashData *struct {
|
||||
METADATA Metadata `json:"METADATA"`
|
||||
PROCESSORS ProcessorsData `json:"PROCESSORS"`
|
||||
}, result *models.AnalysisResult) {
|
||||
timestamp := time.Now()
|
||||
if crashData.METADATA.Timestamp != "" {
|
||||
if t, err := time.Parse(time.RFC3339, crashData.METADATA.Timestamp); err == nil {
|
||||
timestamp = t
|
||||
}
|
||||
}
|
||||
|
||||
// Parse each CPU's MCA data
|
||||
cpuProcs := []struct {
|
||||
name string
|
||||
data Processors
|
||||
}{
|
||||
{"cpu0", crashData.PROCESSORS.CPU0},
|
||||
{"cpu1", crashData.PROCESSORS.CPU1},
|
||||
}
|
||||
|
||||
for _, cpu := range cpuProcs {
|
||||
if cpu.data.MCA.Uncore == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check each MCA bank for errors
|
||||
for bankName, bankDataRaw := range cpu.data.MCA.Uncore {
|
||||
bankData, ok := bankDataRaw.(map[string]interface{})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for status register
|
||||
statusKey := strings.ToLower(bankName) + "_status"
|
||||
statusRaw, ok := bankData[statusKey]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
statusStr, ok := statusRaw.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse status value
|
||||
status, err := strconv.ParseUint(strings.TrimPrefix(statusStr, "0x"), 16, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if MCA error is valid (bit 63 = Valid)
|
||||
if status&(1<<63) != 0 {
|
||||
// MCA error detected
|
||||
severity := models.SeverityWarning
|
||||
if status&(1<<61) != 0 { // UC bit = uncorrected error
|
||||
severity = models.SeverityCritical
|
||||
}
|
||||
|
||||
description := fmt.Sprintf("MCA Error in %s bank %s", cpu.name, bankName)
|
||||
if status&(1<<61) != 0 {
|
||||
description += " (Uncorrected)"
|
||||
} else {
|
||||
description += " (Corrected)"
|
||||
}
|
||||
|
||||
result.Events = append(result.Events, models.Event{
|
||||
Timestamp: timestamp,
|
||||
Source: "MCA",
|
||||
EventType: "Machine Check",
|
||||
Description: description,
|
||||
Severity: severity,
|
||||
RawData: fmt.Sprintf("Status: %s, CPU: %s, Bank: %s", statusStr, cpu.name, bankName),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
98
internal/parser/vendors/supermicro/parser.go
vendored
98
internal/parser/vendors/supermicro/parser.go
vendored
@@ -1,98 +0,0 @@
|
||||
// Package supermicro provides parser for Supermicro BMC crashdump archives
|
||||
// Tested with: Supermicro SYS-821GE-TNHR (Crashdump format)
|
||||
//
|
||||
// IMPORTANT: Increment parserVersion when modifying parser logic!
|
||||
// This helps track which version was used to parse specific logs.
|
||||
package supermicro
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"git.mchus.pro/mchus/logpile/internal/models"
|
||||
"git.mchus.pro/mchus/logpile/internal/parser"
|
||||
)
|
||||
|
||||
// parserVersion - version of this parser module
|
||||
// IMPORTANT: Increment this version when making changes to parser logic!
|
||||
const parserVersion = "1.0.0"
|
||||
|
||||
func init() {
|
||||
parser.Register(&Parser{})
|
||||
}
|
||||
|
||||
// Parser implements VendorParser for Supermicro servers
|
||||
type Parser struct{}
|
||||
|
||||
// Name returns human-readable parser name
|
||||
func (p *Parser) Name() string {
|
||||
return "SMC Crash Dump Parser"
|
||||
}
|
||||
|
||||
// Vendor returns vendor identifier
|
||||
func (p *Parser) Vendor() string {
|
||||
return "supermicro"
|
||||
}
|
||||
|
||||
// Version returns parser version
|
||||
// IMPORTANT: Update parserVersion constant when modifying parser logic!
|
||||
func (p *Parser) Version() string {
|
||||
return parserVersion
|
||||
}
|
||||
|
||||
// Detect checks if archive matches Supermicro crashdump format
|
||||
// Returns confidence 0-100
|
||||
func (p *Parser) Detect(files []parser.ExtractedFile) int {
|
||||
confidence := 0
|
||||
|
||||
for _, f := range files {
|
||||
path := strings.ToLower(f.Path)
|
||||
|
||||
// Strong indicator for Supermicro Crashdump format
|
||||
if strings.HasSuffix(path, "cdump.txt") {
|
||||
// Check if it's really Supermicro crashdump format
|
||||
if containsCrashdumpMarkers(f.Content) {
|
||||
confidence += 80
|
||||
}
|
||||
}
|
||||
|
||||
// Cap at 100
|
||||
if confidence >= 100 {
|
||||
return 100
|
||||
}
|
||||
}
|
||||
|
||||
return confidence
|
||||
}
|
||||
|
||||
// containsCrashdumpMarkers checks if content has Supermicro crashdump markers
|
||||
func containsCrashdumpMarkers(content []byte) bool {
|
||||
s := string(content)
|
||||
// Check for typical Supermicro Crashdump structure
|
||||
return strings.Contains(s, "crash_data") &&
|
||||
strings.Contains(s, "METADATA") &&
|
||||
(strings.Contains(s, "bmc_fw_ver") || strings.Contains(s, "crashdump_ver"))
|
||||
}
|
||||
|
||||
// Parse parses Supermicro crashdump archive
|
||||
func (p *Parser) Parse(files []parser.ExtractedFile) (*models.AnalysisResult, error) {
|
||||
result := &models.AnalysisResult{
|
||||
Events: make([]models.Event, 0),
|
||||
FRU: make([]models.FRUInfo, 0),
|
||||
Sensors: make([]models.SensorReading, 0),
|
||||
}
|
||||
|
||||
// Initialize hardware config
|
||||
result.Hardware = &models.HardwareConfig{
|
||||
CPUs: make([]models.CPU, 0),
|
||||
}
|
||||
|
||||
// Parse CDump.txt (JSON crashdump)
|
||||
if f := parser.FindFileByName(files, "CDump.txt"); f != nil {
|
||||
if err := ParseCrashDump(f.Content, result); err != nil {
|
||||
// Log error but continue parsing other files
|
||||
_ = err // Ignore error for now
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
3
internal/parser/vendors/vendors.go
vendored
3
internal/parser/vendors/vendors.go
vendored
@@ -4,18 +4,17 @@ package vendors
|
||||
|
||||
import (
|
||||
// Import vendor modules to trigger their init() registration
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/dell"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/h3c"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/inspur"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/nvidia"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/nvidia_bug_report"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/supermicro"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/unraid"
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/xigmanas"
|
||||
|
||||
// Generic fallback parser (must be last for lowest priority)
|
||||
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/generic"
|
||||
// Future vendors:
|
||||
// _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/dell"
|
||||
// _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/hpe"
|
||||
// _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/lenovo"
|
||||
)
|
||||
|
||||
2
internal/parser/vendors/xigmanas/parser.go
vendored
2
internal/parser/vendors/xigmanas/parser.go
vendored
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// parserVersion - increment when parsing logic changes.
|
||||
const parserVersion = "2.1.0"
|
||||
const parserVersion = "2.2"
|
||||
|
||||
func init() {
|
||||
parser.Register(&Parser{})
|
||||
|
||||
Reference in New Issue
Block a user