Add Unraid diagnostics parser and fix zip upload support

Implements comprehensive parser for Unraid diagnostics archives with support for:
- System information (OS version, BIOS, motherboard)
- CPU details from lscpu (model, cores, threads, frequency)
- Memory information
- Storage devices with SMART data integration
- Temperature sensors from disk array
- System event logs

Parser intelligently merges data from multiple sources:
- SMART files provide detailed disk information (model, S/N, firmware)
- vars.txt provides disk configuration and filesystem types
- Deduplication ensures clean results

Also fixes critical bug where zip archives could not be uploaded via web interface
due to missing extractZipFromReader implementation.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-05 23:54:55 +03:00
parent 7d9135dc63
commit aa22034944
4 changed files with 919 additions and 0 deletions

View File

@@ -47,6 +47,8 @@ func ExtractArchiveFromReader(r io.Reader, filename string) ([]ExtractedFile, er
return extractTarGzFromReader(r, filename) return extractTarGzFromReader(r, filename)
case ".tar": case ".tar":
return extractTarFromReader(r) return extractTarFromReader(r)
case ".zip":
return extractZipFromReader(r)
case ".txt", ".log": case ".txt", ".log":
return extractSingleFileFromReader(r, filename) return extractSingleFileFromReader(r, filename)
default: default:
@@ -219,6 +221,54 @@ func extractZip(archivePath string) ([]ExtractedFile, error) {
return files, nil return files, nil
} }
func extractZipFromReader(r io.Reader) ([]ExtractedFile, error) {
// Read all data into memory
data, err := io.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("read zip data: %w", err)
}
// Create a ReaderAt from the byte slice
readerAt := bytes.NewReader(data)
// Open the zip archive
zipReader, err := zip.NewReader(readerAt, int64(len(data)))
if err != nil {
return nil, fmt.Errorf("open zip: %w", err)
}
var files []ExtractedFile
for _, f := range zipReader.File {
if f.FileInfo().IsDir() {
continue
}
// Skip large files (>10MB)
if f.FileInfo().Size() > 10*1024*1024 {
continue
}
rc, err := f.Open()
if err != nil {
return nil, fmt.Errorf("open file %s: %w", f.Name, err)
}
content, err := io.ReadAll(rc)
rc.Close()
if err != nil {
return nil, fmt.Errorf("read file %s: %w", f.Name, err)
}
files = append(files, ExtractedFile{
Path: f.Name,
Content: content,
})
}
return files, nil
}
func extractSingleFile(path string) ([]ExtractedFile, error) { func extractSingleFile(path string) ([]ExtractedFile, error) {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {

591
internal/parser/vendors/unraid/parser.go vendored Normal file
View File

@@ -0,0 +1,591 @@
// Package unraid provides parser for Unraid diagnostics archives.
package unraid
import (
"bufio"
"regexp"
"strconv"
"strings"
"time"
"git.mchus.pro/mchus/logpile/internal/models"
"git.mchus.pro/mchus/logpile/internal/parser"
)
// parserVersion - increment when parsing logic changes.
const parserVersion = "1.0.0"
func init() {
parser.Register(&Parser{})
}
// Parser implements VendorParser for Unraid diagnostics.
type Parser struct{}
func (p *Parser) Name() string { return "Unraid Parser" }
func (p *Parser) Vendor() string { return "unraid" }
func (p *Parser) Version() string { return parserVersion }
// Detect checks if files contain typical Unraid markers.
func (p *Parser) Detect(files []parser.ExtractedFile) int {
confidence := 0
hasUnraidVersion := false
hasDiagnosticsDir := false
hasVarsParity := false
for _, f := range files {
path := strings.ToLower(f.Path)
content := string(f.Content)
// Check for unraid version file
if strings.Contains(path, "unraid-") && strings.HasSuffix(path, ".txt") {
hasUnraidVersion = true
confidence += 40
}
// Check for Unraid-specific directories
if strings.Contains(path, "diagnostics-") &&
(strings.Contains(path, "/system/") ||
strings.Contains(path, "/smart/") ||
strings.Contains(path, "/config/")) {
hasDiagnosticsDir = true
if confidence < 60 {
confidence += 20
}
}
// Check file content for Unraid markers
if strings.Contains(content, "Unraid kernel build") {
confidence += 50
}
// Check for vars.txt with disk array info
if strings.Contains(path, "vars.txt") && strings.Contains(content, "[parity]") {
hasVarsParity = true
confidence += 30
}
if confidence >= 100 {
return 100
}
}
// Boost confidence if we see multiple key indicators together
if hasUnraidVersion && (hasDiagnosticsDir || hasVarsParity) {
confidence += 20
}
if confidence > 100 {
return 100
}
return confidence
}
// Parse parses Unraid diagnostics and returns normalized data.
func (p *Parser) Parse(files []parser.ExtractedFile) (*models.AnalysisResult, error) {
result := &models.AnalysisResult{
Events: make([]models.Event, 0),
FRU: make([]models.FRUInfo, 0),
Sensors: make([]models.SensorReading, 0),
Hardware: &models.HardwareConfig{
Firmware: make([]models.FirmwareInfo, 0),
CPUs: make([]models.CPU, 0),
Memory: make([]models.MemoryDIMM, 0),
Storage: make([]models.Storage, 0),
},
}
// Track storage by slot to avoid duplicates
storageBySlot := make(map[string]*models.Storage)
// Parse different file types
for _, f := range files {
path := strings.ToLower(f.Path)
content := string(f.Content)
switch {
case strings.Contains(path, "unraid-") && strings.HasSuffix(path, ".txt"):
parseVersionFile(content, result)
case strings.HasSuffix(path, "/system/lscpu.txt") || strings.HasSuffix(path, "\\system\\lscpu.txt"):
parseLsCPU(content, result)
case strings.HasSuffix(path, "/system/motherboard.txt") || strings.HasSuffix(path, "\\system\\motherboard.txt"):
parseMotherboard(content, result)
case strings.HasSuffix(path, "/system/memory.txt") || strings.HasSuffix(path, "\\system\\memory.txt"):
parseMemory(content, result)
case strings.HasSuffix(path, "/system/vars.txt") || strings.HasSuffix(path, "\\system\\vars.txt"):
parseVarsToMap(content, storageBySlot, result)
case strings.Contains(path, "/smart/") && strings.HasSuffix(path, ".txt"):
parseSMARTFileToMap(content, f.Path, storageBySlot, result)
case strings.HasSuffix(path, "/logs/syslog.txt") || strings.HasSuffix(path, "\\logs\\syslog.txt"):
parseSyslog(content, result)
}
}
// Convert storage map to slice
for _, disk := range storageBySlot {
result.Hardware.Storage = append(result.Hardware.Storage, *disk)
}
return result, nil
}
func parseVersionFile(content string, result *models.AnalysisResult) {
lines := strings.Split(content, "\n")
if len(lines) > 0 {
version := strings.TrimSpace(lines[0])
if version != "" {
result.Hardware.Firmware = append(result.Hardware.Firmware, models.FirmwareInfo{
DeviceName: "Unraid OS",
Version: version,
})
}
}
}
func parseLsCPU(content string, result *models.AnalysisResult) {
// Normalize line endings
content = strings.ReplaceAll(content, "\r\n", "\n")
var cpu models.CPU
cpu.Socket = 0 // Default to socket 0
// Parse CPU model - handle multiple spaces
if m := regexp.MustCompile(`(?m)^Model name:\s+(.+)$`).FindStringSubmatch(content); len(m) == 2 {
cpu.Model = strings.TrimSpace(m[1])
}
// Parse CPU(s) - total thread count
if m := regexp.MustCompile(`(?m)^CPU\(s\):\s+(\d+)$`).FindStringSubmatch(content); len(m) == 2 {
cpu.Threads = parseInt(m[1])
}
// Parse cores per socket
if m := regexp.MustCompile(`(?m)^Core\(s\) per socket:\s+(\d+)$`).FindStringSubmatch(content); len(m) == 2 {
cpu.Cores = parseInt(m[1])
}
// Parse CPU max MHz
if m := regexp.MustCompile(`(?m)^CPU max MHz:\s+([\d.]+)$`).FindStringSubmatch(content); len(m) == 2 {
cpu.FrequencyMHz = int(parseFloat(m[1]))
}
// If no max MHz, try current MHz
if cpu.FrequencyMHz == 0 {
if m := regexp.MustCompile(`(?m)^CPU MHz:\s+([\d.]+)$`).FindStringSubmatch(content); len(m) == 2 {
cpu.FrequencyMHz = int(parseFloat(m[1]))
}
}
// Only add if we got at least the model
if cpu.Model != "" {
result.Hardware.CPUs = append(result.Hardware.CPUs, cpu)
}
}
func parseMotherboard(content string, result *models.AnalysisResult) {
var board models.BoardInfo
// Parse manufacturer from dmidecode output
lines := strings.Split(content, "\n")
inBIOSSection := false
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if strings.Contains(trimmed, "BIOS Information") {
inBIOSSection = true
continue
}
if inBIOSSection {
if strings.HasPrefix(trimmed, "Vendor:") {
parts := strings.SplitN(trimmed, ":", 2)
if len(parts) == 2 {
board.Manufacturer = strings.TrimSpace(parts[1])
}
} else if strings.HasPrefix(trimmed, "Version:") {
parts := strings.SplitN(trimmed, ":", 2)
if len(parts) == 2 {
biosVersion := strings.TrimSpace(parts[1])
result.Hardware.Firmware = append(result.Hardware.Firmware, models.FirmwareInfo{
DeviceName: "System BIOS",
Version: biosVersion,
})
}
} else if strings.HasPrefix(trimmed, "Release Date:") {
// Could extract BIOS date if needed
}
}
}
// Extract product name from first line
if len(lines) > 0 {
firstLine := strings.TrimSpace(lines[0])
if firstLine != "" {
board.ProductName = firstLine
}
}
result.Hardware.BoardInfo = board
}
func parseMemory(content string, result *models.AnalysisResult) {
// Parse memory from free output
// Example: Mem: 50Gi 11Gi 1.4Gi 565Mi 39Gi 39Gi
if m := regexp.MustCompile(`(?m)^Mem:\s+(\d+(?:\.\d+)?)(Ki|Mi|Gi|Ti)`).FindStringSubmatch(content); len(m) >= 3 {
size := parseFloat(m[1])
unit := m[2]
var sizeMB int
switch unit {
case "Ki":
sizeMB = int(size / 1024)
case "Mi":
sizeMB = int(size)
case "Gi":
sizeMB = int(size * 1024)
case "Ti":
sizeMB = int(size * 1024 * 1024)
}
if sizeMB > 0 {
result.Hardware.Memory = append(result.Hardware.Memory, models.MemoryDIMM{
Slot: "system",
Present: true,
SizeMB: sizeMB,
Type: "DRAM",
Status: "ok",
})
}
}
}
func parseVarsToMap(content string, storageBySlot map[string]*models.Storage, result *models.AnalysisResult) {
// Normalize line endings
content = strings.ReplaceAll(content, "\r\n", "\n")
// Parse PHP-style array from vars.txt
// Extract only the first "disks" section to avoid duplicates
disksStart := strings.Index(content, "disks\n(")
if disksStart == -1 {
return
}
// Find the end of this disks array (look for next top-level key or end)
remaining := content[disksStart:]
endPattern := regexp.MustCompile(`(?m)^[a-z_]+\n\(`)
endMatches := endPattern.FindAllStringIndex(remaining, -1)
var disksSection string
if len(endMatches) > 1 {
// Use second match as end (first match is "disks" itself)
disksSection = remaining[:endMatches[1][0]]
} else {
disksSection = remaining
}
// Look for disk entries within this section only
diskRe := regexp.MustCompile(`(?m)^\s+\[(disk\d+|parity|cache\d*)\]\s+=>\s+Array`)
matches := diskRe.FindAllStringSubmatch(disksSection, -1)
seen := make(map[string]bool)
for _, match := range matches {
if len(match) < 2 {
continue
}
diskName := match[1]
// Skip if already processed
if seen[diskName] {
continue
}
seen[diskName] = true
// Find the section for this disk
diskSection := extractDiskSection(disksSection, diskName)
if diskSection == "" {
continue
}
var disk models.Storage
disk.Slot = diskName
// Parse disk properties
if m := regexp.MustCompile(`\[device\]\s*=>\s*(\w+)`).FindStringSubmatch(diskSection); len(m) == 2 {
disk.Interface = "SATA (" + m[1] + ")"
}
if m := regexp.MustCompile(`\[id\]\s*=>\s*([^\n]+)`).FindStringSubmatch(diskSection); len(m) == 2 {
idValue := strings.TrimSpace(m[1])
// Only use if it's not empty or a placeholder
if idValue != "" && !strings.Contains(idValue, "=>") {
disk.Model = idValue
}
}
if m := regexp.MustCompile(`\[size\]\s*=>\s*(\d+)`).FindStringSubmatch(diskSection); len(m) == 2 {
sizeKB := parseInt(m[1])
if sizeKB > 0 {
disk.SizeGB = sizeKB / (1024 * 1024) // Convert KB to GB
}
}
if m := regexp.MustCompile(`\[temp\]\s*=>\s*(\d+)`).FindStringSubmatch(diskSection); len(m) == 2 {
temp := parseInt(m[1])
if temp > 0 {
result.Sensors = append(result.Sensors, models.SensorReading{
Name: diskName + "_temp",
Type: "temperature",
Value: float64(temp),
Unit: "C",
Status: getTempStatus(temp),
RawValue: strconv.Itoa(temp),
})
}
}
if m := regexp.MustCompile(`\[fsType\]\s*=>\s*(\w+)`).FindStringSubmatch(diskSection); len(m) == 2 {
fsType := m[1]
if fsType != "" && fsType != "auto" {
disk.Type = fsType
}
}
disk.Present = true
// Only add/merge disks with meaningful data
if disk.Model != "" && disk.SizeGB > 0 {
// Check if we already have this disk from SMART files
if existing, ok := storageBySlot[diskName]; ok {
// Merge vars.txt data into existing entry, preferring SMART data
if existing.Model == "" && disk.Model != "" {
existing.Model = disk.Model
}
if existing.SizeGB == 0 && disk.SizeGB > 0 {
existing.SizeGB = disk.SizeGB
}
if existing.Type == "" && disk.Type != "" {
existing.Type = disk.Type
}
if existing.Interface == "" && disk.Interface != "" {
existing.Interface = disk.Interface
}
// vars.txt doesn't have serial/firmware, so don't overwrite from SMART
} else {
// New disk not in SMART data
storageBySlot[diskName] = &disk
}
}
}
}
func extractDiskSection(content, diskName string) string {
// Find the start of this disk's array section
startPattern := regexp.MustCompile(`(?m)^\s+\[` + regexp.QuoteMeta(diskName) + `\]\s+=>\s+Array\s*\n\s+\(`)
startIdx := startPattern.FindStringIndex(content)
if startIdx == nil {
return ""
}
// Find the end (next disk or end of disks array)
endPattern := regexp.MustCompile(`(?m)^\s+\)`)
remainingContent := content[startIdx[1]:]
endIdx := endPattern.FindStringIndex(remainingContent)
if endIdx == nil {
return remainingContent
}
return remainingContent[:endIdx[0]]
}
func parseSMARTFileToMap(content, filePath string, storageBySlot map[string]*models.Storage, result *models.AnalysisResult) {
// Extract disk name from filename
// Example: ST4000NM000B-2TF100_WX103EC9-20260205-2333 disk1 (sdi).txt
diskName := ""
if m := regexp.MustCompile(`(disk\d+|parity|cache\d*)`).FindStringSubmatch(filePath); len(m) > 0 {
diskName = m[1]
}
var disk models.Storage
disk.Slot = diskName
// Parse device model
if m := regexp.MustCompile(`(?m)^Device Model:\s+(.+)$`).FindStringSubmatch(content); len(m) == 2 {
disk.Model = strings.TrimSpace(m[1])
}
// Parse serial number
if m := regexp.MustCompile(`(?m)^Serial Number:\s+(.+)$`).FindStringSubmatch(content); len(m) == 2 {
disk.SerialNumber = strings.TrimSpace(m[1])
}
// Parse firmware version
if m := regexp.MustCompile(`(?m)^Firmware Version:\s+(.+)$`).FindStringSubmatch(content); len(m) == 2 {
disk.Firmware = strings.TrimSpace(m[1])
}
// Parse capacity
if m := regexp.MustCompile(`(?m)^User Capacity:\s+([\d,]+)\s+bytes`).FindStringSubmatch(content); len(m) == 2 {
capacityStr := strings.ReplaceAll(m[1], ",", "")
if capacity, err := strconv.ParseInt(capacityStr, 10, 64); err == nil {
disk.SizeGB = int(capacity / 1_000_000_000)
}
}
// Parse rotation rate
if m := regexp.MustCompile(`(?m)^Rotation Rate:\s+(.+)$`).FindStringSubmatch(content); len(m) == 2 {
rateStr := strings.TrimSpace(m[1])
if strings.Contains(strings.ToLower(rateStr), "solid state") {
disk.Type = "ssd"
} else {
disk.Type = "hdd"
}
}
// Parse SATA version for interface
if m := regexp.MustCompile(`(?m)^SATA Version is:\s+(.+?)(?:,|$)`).FindStringSubmatch(content); len(m) == 2 {
disk.Interface = strings.TrimSpace(m[1])
}
// Parse SMART health
if m := regexp.MustCompile(`(?m)^SMART overall-health self-assessment test result:\s+(.+)$`).FindStringSubmatch(content); len(m) == 2 {
health := strings.TrimSpace(m[1])
if !strings.EqualFold(health, "PASSED") {
result.Events = append(result.Events, models.Event{
Timestamp: time.Now(),
Source: "SMART",
EventType: "Disk Health",
Severity: models.SeverityWarning,
Description: "SMART health check failed for " + diskName,
RawData: health,
})
}
}
disk.Present = true
// Only add/merge if we got meaningful data
if disk.Model != "" || disk.SerialNumber != "" {
// Check if we already have this disk from vars.txt
if existing, ok := storageBySlot[diskName]; ok {
// Merge SMART data into existing entry
if existing.Model == "" && disk.Model != "" {
existing.Model = disk.Model
}
if existing.SerialNumber == "" && disk.SerialNumber != "" {
existing.SerialNumber = disk.SerialNumber
}
if existing.Firmware == "" && disk.Firmware != "" {
existing.Firmware = disk.Firmware
}
if existing.SizeGB == 0 && disk.SizeGB > 0 {
existing.SizeGB = disk.SizeGB
}
if existing.Type == "" && disk.Type != "" {
existing.Type = disk.Type
}
if existing.Interface == "" && disk.Interface != "" {
existing.Interface = disk.Interface
}
} else {
// New disk not in vars.txt
storageBySlot[diskName] = &disk
}
}
}
func parseSyslog(content string, result *models.AnalysisResult) {
scanner := bufio.NewScanner(strings.NewReader(content))
lineCount := 0
maxLines := 100 // Limit parsing to avoid too many events
for scanner.Scan() && lineCount < maxLines {
line := scanner.Text()
if strings.TrimSpace(line) == "" {
continue
}
// Parse syslog line
// Example: Feb 5 23:33:01 box3 kernel: Linux version 6.12.54-Unraid
timestamp, message, severity := parseSyslogLine(line)
result.Events = append(result.Events, models.Event{
Timestamp: timestamp,
Source: "syslog",
EventType: "System Log",
Severity: severity,
Description: message,
RawData: line,
})
lineCount++
}
}
func parseSyslogLine(line string) (time.Time, string, models.Severity) {
// Simple syslog parser
// Format: Feb 5 23:33:01 hostname process[pid]: message
timestamp := time.Now()
message := line
severity := models.SeverityInfo
// Try to parse timestamp
syslogRe := regexp.MustCompile(`^(\w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+\S+\s+(.+)$`)
if m := syslogRe.FindStringSubmatch(line); len(m) == 3 {
timeStr := m[1]
message = m[2]
// Parse timestamp (add current year)
year := time.Now().Year()
if ts, err := time.Parse("Jan 2 15:04:05 2006", timeStr+" "+strconv.Itoa(year)); err == nil {
timestamp = ts
}
}
// Classify severity
lowerMsg := strings.ToLower(message)
switch {
case strings.Contains(lowerMsg, "panic"),
strings.Contains(lowerMsg, "fatal"),
strings.Contains(lowerMsg, "critical"):
severity = models.SeverityCritical
case strings.Contains(lowerMsg, "error"),
strings.Contains(lowerMsg, "warning"),
strings.Contains(lowerMsg, "failed"):
severity = models.SeverityWarning
default:
severity = models.SeverityInfo
}
return timestamp, message, severity
}
func getTempStatus(temp int) string {
switch {
case temp >= 60:
return "critical"
case temp >= 50:
return "warning"
default:
return "ok"
}
}
func parseInt(s string) int {
v, _ := strconv.Atoi(strings.TrimSpace(s))
return v
}
func parseFloat(s string) float64 {
v, _ := strconv.ParseFloat(strings.TrimSpace(s), 64)
return v
}

View File

@@ -0,0 +1,277 @@
package unraid
import (
"testing"
"git.mchus.pro/mchus/logpile/internal/parser"
)
func TestDetect(t *testing.T) {
tests := []struct {
name string
files []parser.ExtractedFile
wantMin int
wantMax int
shouldFind bool
}{
{
name: "typical unraid diagnostics",
files: []parser.ExtractedFile{
{
Path: "box3-diagnostics-20260205-2333/unraid-7.2.0.txt",
Content: []byte("7.2.0\n"),
},
{
Path: "box3-diagnostics-20260205-2333/system/vars.txt",
Content: []byte("[parity] => Array\n[disk1] => Array\n"),
},
},
wantMin: 50,
wantMax: 100,
shouldFind: true,
},
{
name: "unraid with kernel marker",
files: []parser.ExtractedFile{
{
Path: "diagnostics/system/lscpu.txt",
Content: []byte("Unraid kernel build 6.12.54"),
},
},
wantMin: 50,
wantMax: 100,
shouldFind: true,
},
{
name: "not unraid",
files: []parser.ExtractedFile{
{
Path: "some/random/file.txt",
Content: []byte("just some random content"),
},
},
wantMin: 0,
wantMax: 0,
shouldFind: false,
},
}
p := &Parser{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := p.Detect(tt.files)
if tt.shouldFind && got < tt.wantMin {
t.Errorf("Detect() = %v, want at least %v", got, tt.wantMin)
}
if got > tt.wantMax {
t.Errorf("Detect() = %v, want at most %v", got, tt.wantMax)
}
if !tt.shouldFind && got > 0 {
t.Errorf("Detect() = %v, want 0 (should not detect)", got)
}
})
}
}
func TestParse_Version(t *testing.T) {
files := []parser.ExtractedFile{
{
Path: "unraid-7.2.0.txt",
Content: []byte("7.2.0\n"),
},
}
p := &Parser{}
result, err := p.Parse(files)
if err != nil {
t.Fatalf("Parse() error = %v", err)
}
if len(result.Hardware.Firmware) == 0 {
t.Fatal("expected firmware info")
}
fw := result.Hardware.Firmware[0]
if fw.DeviceName != "Unraid OS" {
t.Errorf("DeviceName = %v, want 'Unraid OS'", fw.DeviceName)
}
if fw.Version != "7.2.0" {
t.Errorf("Version = %v, want '7.2.0'", fw.Version)
}
}
func TestParse_CPU(t *testing.T) {
lscpuContent := `Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
CPU(s): 16
Model name: Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz
Core(s) per socket: 8
Socket(s): 1
CPU max MHz: 3400.0000
`
files := []parser.ExtractedFile{
{
Path: "diagnostics/system/lscpu.txt",
Content: []byte(lscpuContent),
},
}
p := &Parser{}
result, err := p.Parse(files)
if err != nil {
t.Fatalf("Parse() error = %v", err)
}
if len(result.Hardware.CPUs) == 0 {
t.Fatal("expected CPU info")
}
cpu := result.Hardware.CPUs[0]
if cpu.Model != "Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz" {
t.Errorf("Model = %v", cpu.Model)
}
if cpu.Cores != 8 {
t.Errorf("Cores = %v, want 8", cpu.Cores)
}
if cpu.Threads != 16 {
t.Errorf("Threads = %v, want 16", cpu.Threads)
}
if cpu.FrequencyMHz != 3400 {
t.Errorf("FrequencyMHz = %v, want 3400", cpu.FrequencyMHz)
}
}
func TestParse_Memory(t *testing.T) {
memContent := ` total used free shared buff/cache available
Mem: 50Gi 11Gi 1.4Gi 565Mi 39Gi 39Gi
Swap: 0B 0B 0B
Total: 50Gi 11Gi 1.4Gi
`
files := []parser.ExtractedFile{
{
Path: "diagnostics/system/memory.txt",
Content: []byte(memContent),
},
}
p := &Parser{}
result, err := p.Parse(files)
if err != nil {
t.Fatalf("Parse() error = %v", err)
}
if len(result.Hardware.Memory) == 0 {
t.Fatal("expected memory info")
}
mem := result.Hardware.Memory[0]
expectedSizeMB := 50 * 1024 // 50 GiB in MB
if mem.SizeMB != expectedSizeMB {
t.Errorf("SizeMB = %v, want %v", mem.SizeMB, expectedSizeMB)
}
if mem.Type != "DRAM" {
t.Errorf("Type = %v, want 'DRAM'", mem.Type)
}
}
func TestParse_SMART(t *testing.T) {
smartContent := `smartctl 7.5 2025-04-30 r5714 [x86_64-linux-6.12.54-Unraid] (local build)
Copyright (C) 2002-25, Bruce Allen, Christian Franke, www.smartmontools.org
=== START OF INFORMATION SECTION ===
Device Model: ST4000NM000B-2TF100
Serial Number: WX103EC9
LU WWN Device Id: 5 000c50 0ed59db60
Firmware Version: TNA1
User Capacity: 4,000,787,030,016 bytes [4.00 TB]
Sector Size: 512 bytes logical/physical
Rotation Rate: 7200 rpm
Form Factor: 3.5 inches
SATA Version is: SATA 3.3, 6.0 Gb/s (current: 6.0 Gb/s)
=== START OF READ SMART DATA SECTION ===
SMART overall-health self-assessment test result: PASSED
`
files := []parser.ExtractedFile{
{
Path: "diagnostics/smart/ST4000NM000B-2TF100_WX103EC9-20260205-2333 disk1 (sdi).txt",
Content: []byte(smartContent),
},
}
p := &Parser{}
result, err := p.Parse(files)
if err != nil {
t.Fatalf("Parse() error = %v", err)
}
if len(result.Hardware.Storage) == 0 {
t.Fatal("expected storage info")
}
disk := result.Hardware.Storage[0]
if disk.Model != "ST4000NM000B-2TF100" {
t.Errorf("Model = %v, want 'ST4000NM000B-2TF100'", disk.Model)
}
if disk.SerialNumber != "WX103EC9" {
t.Errorf("SerialNumber = %v, want 'WX103EC9'", disk.SerialNumber)
}
if disk.Firmware != "TNA1" {
t.Errorf("Firmware = %v, want 'TNA1'", disk.Firmware)
}
if disk.SizeGB != 4000 {
t.Errorf("SizeGB = %v, want 4000", disk.SizeGB)
}
if disk.Type != "hdd" {
t.Errorf("Type = %v, want 'hdd'", disk.Type)
}
// Check that no health warnings were generated (PASSED health)
healthWarnings := 0
for _, event := range result.Events {
if event.EventType == "Disk Health" && event.Severity == "warning" {
healthWarnings++
}
}
if healthWarnings != 0 {
t.Errorf("Expected no health warnings for PASSED disk, got %v", healthWarnings)
}
}
func TestParser_Metadata(t *testing.T) {
p := &Parser{}
if p.Name() != "Unraid Parser" {
t.Errorf("Name() = %v, want 'Unraid Parser'", p.Name())
}
if p.Vendor() != "unraid" {
t.Errorf("Vendor() = %v, want 'unraid'", p.Vendor())
}
if p.Version() == "" {
t.Error("Version() should not be empty")
}
}

View File

@@ -8,6 +8,7 @@ import (
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/nvidia" _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/nvidia"
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/nvidia_bug_report" _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/nvidia_bug_report"
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/supermicro" _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/supermicro"
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/unraid"
_ "git.mchus.pro/mchus/logpile/internal/parser/vendors/xigmanas" _ "git.mchus.pro/mchus/logpile/internal/parser/vendors/xigmanas"
// Generic fallback parser (must be last for lowest priority) // Generic fallback parser (must be last for lowest priority)