749 lines
17 KiB
Go
749 lines
17 KiB
Go
package collector
|
|
|
|
import (
|
|
"bee/audit/internal/schema"
|
|
"encoding/json"
|
|
"log/slog"
|
|
"os"
|
|
"os/exec"
|
|
"regexp"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
)
|
|
|
|
const (
|
|
vendorBroadcomLSI = 0x1000
|
|
vendorAdaptec = 0x9005
|
|
vendorHPE = 0x103c
|
|
vendorIntel = 0x8086
|
|
)
|
|
|
|
var raidToolQuery = func(name string, args ...string) ([]byte, error) {
|
|
return exec.Command(name, args...).Output()
|
|
}
|
|
|
|
var readMDStat = func() ([]byte, error) {
|
|
return os.ReadFile("/proc/mdstat")
|
|
}
|
|
|
|
// collectRAIDStorage collects physical disks behind RAID controllers that may
|
|
// not be exposed as regular block devices.
|
|
func collectRAIDStorage(pcie []schema.HardwarePCIeDevice) []schema.HardwareStorage {
|
|
vendors := detectRAIDVendors(pcie)
|
|
if len(vendors) == 0 {
|
|
return nil
|
|
}
|
|
|
|
var out []schema.HardwareStorage
|
|
|
|
if vendors[vendorBroadcomLSI] {
|
|
if drives := collectStorcliDrives(); len(drives) > 0 {
|
|
out = append(out, drives...)
|
|
}
|
|
if drives := collectSASIrcuDrives("sas3ircu"); len(drives) > 0 {
|
|
out = append(out, drives...)
|
|
}
|
|
if drives := collectSASIrcuDrives("sas2ircu"); len(drives) > 0 {
|
|
out = append(out, drives...)
|
|
}
|
|
}
|
|
|
|
if vendors[vendorAdaptec] {
|
|
if drives := collectArcconfDrives(); len(drives) > 0 {
|
|
out = append(out, drives...)
|
|
}
|
|
}
|
|
if vendors[vendorHPE] {
|
|
if drives := collectSSACLIDrives(); len(drives) > 0 {
|
|
out = append(out, drives...)
|
|
}
|
|
}
|
|
|
|
if len(out) > 0 {
|
|
slog.Info("raid: collected physical drives", "count", len(out))
|
|
}
|
|
return out
|
|
}
|
|
|
|
func detectRAIDVendors(pcie []schema.HardwarePCIeDevice) map[int]bool {
|
|
out := map[int]bool{}
|
|
for _, dev := range pcie {
|
|
if dev.VendorID == nil {
|
|
continue
|
|
}
|
|
if isLikelyRAIDController(dev) {
|
|
out[*dev.VendorID] = true
|
|
}
|
|
}
|
|
return out
|
|
}
|
|
|
|
func isLikelyRAIDController(dev schema.HardwarePCIeDevice) bool {
|
|
if dev.DeviceClass == nil {
|
|
return false
|
|
}
|
|
c := strings.ToLower(*dev.DeviceClass)
|
|
return strings.Contains(c, "raid") ||
|
|
strings.Contains(c, "sas") ||
|
|
strings.Contains(c, "mass storage") ||
|
|
strings.Contains(c, "serial attached scsi")
|
|
}
|
|
|
|
func collectStorcliDrives() []schema.HardwareStorage {
|
|
out, err := raidToolQuery("storcli64", "/call/eall/sall", "show", "all", "J")
|
|
if err != nil {
|
|
slog.Info("raid: storcli unavailable", "err", err)
|
|
return nil
|
|
}
|
|
drives := parseStorcliDrivesJSON(out)
|
|
if len(drives) == 0 {
|
|
slog.Info("raid: storcli returned no drives")
|
|
}
|
|
return drives
|
|
}
|
|
|
|
func collectSASIrcuDrives(tool string) []schema.HardwareStorage {
|
|
out, err := raidToolQuery(tool, "list")
|
|
if err != nil {
|
|
slog.Info("raid: "+tool+" unavailable", "err", err)
|
|
return nil
|
|
}
|
|
|
|
var drives []schema.HardwareStorage
|
|
for _, ctlID := range parseSASIrcuControllerIDs(string(out)) {
|
|
raw, err := raidToolQuery(tool, strconv.Itoa(ctlID), "display")
|
|
if err != nil {
|
|
continue
|
|
}
|
|
drives = append(drives, parseSASIrcuDisplay(string(raw))...)
|
|
}
|
|
return drives
|
|
}
|
|
|
|
func parseSASIrcuControllerIDs(raw string) []int {
|
|
lines := strings.Split(raw, "\n")
|
|
idsMap := map[int]bool{}
|
|
for _, line := range lines {
|
|
fields := strings.Fields(strings.TrimSpace(line))
|
|
if len(fields) == 0 {
|
|
continue
|
|
}
|
|
id, err := strconv.Atoi(fields[0])
|
|
if err != nil {
|
|
continue
|
|
}
|
|
idsMap[id] = true
|
|
}
|
|
var ids []int
|
|
for id := range idsMap {
|
|
ids = append(ids, id)
|
|
}
|
|
sort.Ints(ids)
|
|
return ids
|
|
}
|
|
|
|
func parseSASIrcuDisplay(raw string) []schema.HardwareStorage {
|
|
var blocks []map[string]string
|
|
var cur map[string]string
|
|
var currentType string
|
|
|
|
for _, line := range strings.Split(raw, "\n") {
|
|
trimmed := strings.TrimSpace(line)
|
|
if strings.HasPrefix(trimmed, "Device is a ") {
|
|
if cur != nil {
|
|
cur["__device_type"] = currentType
|
|
blocks = append(blocks, cur)
|
|
}
|
|
cur = map[string]string{}
|
|
currentType = strings.TrimSpace(strings.TrimPrefix(trimmed, "Device is a "))
|
|
continue
|
|
}
|
|
if cur == nil {
|
|
continue
|
|
}
|
|
if idx := strings.Index(trimmed, ":"); idx > 0 {
|
|
key := strings.TrimSpace(trimmed[:idx])
|
|
val := strings.TrimSpace(trimmed[idx+1:])
|
|
cur[key] = val
|
|
}
|
|
}
|
|
if cur != nil {
|
|
cur["__device_type"] = currentType
|
|
blocks = append(blocks, cur)
|
|
}
|
|
|
|
var out []schema.HardwareStorage
|
|
for _, b := range blocks {
|
|
dt := strings.ToLower(b["__device_type"])
|
|
if !strings.Contains(dt, "hard disk") && !strings.Contains(dt, "ssd") && !strings.Contains(dt, "nvme") {
|
|
continue
|
|
}
|
|
|
|
present := true
|
|
status := mapRAIDDriveStatus(b["State"])
|
|
s := schema.HardwareStorage{Present: &present, Status: &status}
|
|
|
|
enclosure := strings.TrimSpace(b["Enclosure #"])
|
|
slot := strings.TrimSpace(b["Slot #"])
|
|
if enclosure != "" || slot != "" {
|
|
v := enclosure + ":" + slot
|
|
v = strings.Trim(v, ":")
|
|
s.Slot = &v
|
|
}
|
|
|
|
if v := strings.TrimSpace(b["Model Number"]); v != "" {
|
|
s.Model = &v
|
|
}
|
|
if v := strings.TrimSpace(b["Serial No"]); v != "" {
|
|
s.SerialNumber = &v
|
|
}
|
|
if v := strings.ToUpper(strings.TrimSpace(b["Protocol"])); v != "" {
|
|
s.Interface = &v
|
|
}
|
|
|
|
media := strings.ToUpper(strings.TrimSpace(b["Drive Type"]))
|
|
if media == "" {
|
|
media = strings.ToUpper(dt)
|
|
}
|
|
intf := ""
|
|
if s.Interface != nil {
|
|
intf = *s.Interface
|
|
}
|
|
devType := inferDriveType(media, intf)
|
|
s.Type = &devType
|
|
|
|
if mb := parseSASIrcuMB(b["Size (in MB)/(in sectors)"]); mb > 0 {
|
|
gb := mb / 1000
|
|
if gb == 0 {
|
|
gb = 1
|
|
}
|
|
s.SizeGB = &gb
|
|
}
|
|
|
|
if s.Slot != nil || s.SerialNumber != nil || s.Model != nil {
|
|
out = append(out, s)
|
|
}
|
|
}
|
|
return out
|
|
}
|
|
|
|
func parseSASIrcuMB(raw string) int {
|
|
raw = strings.TrimSpace(raw)
|
|
if raw == "" {
|
|
return 0
|
|
}
|
|
head := strings.SplitN(raw, "/", 2)[0]
|
|
n, err := strconv.Atoi(strings.TrimSpace(head))
|
|
if err != nil {
|
|
return 0
|
|
}
|
|
return n
|
|
}
|
|
|
|
func collectArcconfDrives() []schema.HardwareStorage {
|
|
raw, err := raidToolQuery("arcconf", "getconfig", "1", "pd")
|
|
if err != nil {
|
|
slog.Info("raid: arcconf unavailable", "err", err)
|
|
return nil
|
|
}
|
|
return parseArcconfPhysicalDrives(string(raw))
|
|
}
|
|
|
|
func parseArcconfPhysicalDrives(raw string) []schema.HardwareStorage {
|
|
lines := strings.Split(raw, "\n")
|
|
var blocks []map[string]string
|
|
var cur map[string]string
|
|
|
|
for _, line := range lines {
|
|
trimmed := strings.TrimSpace(line)
|
|
if strings.HasPrefix(strings.ToLower(trimmed), "device #") {
|
|
if cur != nil {
|
|
blocks = append(blocks, cur)
|
|
}
|
|
cur = map[string]string{}
|
|
continue
|
|
}
|
|
if cur == nil {
|
|
continue
|
|
}
|
|
if idx := strings.Index(trimmed, ":"); idx > 0 {
|
|
key := strings.TrimSpace(trimmed[:idx])
|
|
val := strings.TrimSpace(trimmed[idx+1:])
|
|
cur[key] = val
|
|
}
|
|
}
|
|
if cur != nil {
|
|
blocks = append(blocks, cur)
|
|
}
|
|
|
|
var out []schema.HardwareStorage
|
|
for _, b := range blocks {
|
|
present := true
|
|
status := mapRAIDDriveStatus(b["State"])
|
|
s := schema.HardwareStorage{Present: &present, Status: &status}
|
|
|
|
if v := strings.TrimSpace(b["Reported Location"]); v != "" {
|
|
s.Slot = &v
|
|
}
|
|
if v := strings.TrimSpace(b["Model"]); v != "" {
|
|
s.Model = &v
|
|
}
|
|
if v := strings.TrimSpace(b["Serial number"]); v != "" {
|
|
s.SerialNumber = &v
|
|
}
|
|
if gb := parseHumanSizeToGB(b["Total Size"]); gb > 0 {
|
|
s.SizeGB = &gb
|
|
}
|
|
|
|
intf := parseArcconfInterface(b["Transfer Speed"])
|
|
if intf != "" {
|
|
s.Interface = &intf
|
|
}
|
|
media := strings.ToUpper(strings.TrimSpace(b["SSD"]))
|
|
if media == "YES" || media == "TRUE" {
|
|
media = "SSD"
|
|
}
|
|
devType := inferDriveType(media, intf)
|
|
s.Type = &devType
|
|
|
|
if s.Slot != nil || s.SerialNumber != nil || s.Model != nil {
|
|
out = append(out, s)
|
|
}
|
|
}
|
|
return out
|
|
}
|
|
|
|
func parseArcconfInterface(raw string) string {
|
|
u := strings.ToUpper(raw)
|
|
switch {
|
|
case strings.Contains(u, "SAS"):
|
|
return "SAS"
|
|
case strings.Contains(u, "SATA"):
|
|
return "SATA"
|
|
case strings.Contains(u, "NVME"):
|
|
return "NVME"
|
|
default:
|
|
return ""
|
|
}
|
|
}
|
|
|
|
var ssacliPhysicalDriveLine = regexp.MustCompile(`(?i)^physicaldrive\s+(\S+)\s+\(([^)]*)\)$`)
|
|
|
|
func collectSSACLIDrives() []schema.HardwareStorage {
|
|
raw, err := raidToolQuery("ssacli", "ctrl", "all", "show", "config", "detail")
|
|
if err != nil {
|
|
slog.Info("raid: ssacli unavailable", "err", err)
|
|
return nil
|
|
}
|
|
return parseSSACLIPhysicalDrives(string(raw))
|
|
}
|
|
|
|
func parseSSACLIPhysicalDrives(raw string) []schema.HardwareStorage {
|
|
lines := strings.Split(raw, "\n")
|
|
var out []schema.HardwareStorage
|
|
var cur *schema.HardwareStorage
|
|
|
|
flush := func() {
|
|
if cur == nil {
|
|
return
|
|
}
|
|
if cur.Slot != nil || cur.SerialNumber != nil || cur.Model != nil {
|
|
out = append(out, *cur)
|
|
}
|
|
cur = nil
|
|
}
|
|
|
|
for _, line := range lines {
|
|
trimmed := strings.TrimSpace(line)
|
|
if trimmed == "" {
|
|
continue
|
|
}
|
|
if m := ssacliPhysicalDriveLine.FindStringSubmatch(trimmed); len(m) == 3 {
|
|
flush()
|
|
present := true
|
|
status := "UNKNOWN"
|
|
s := schema.HardwareStorage{Present: &present, Status: &status}
|
|
slot := m[1]
|
|
s.Slot = &slot
|
|
|
|
meta := strings.Split(m[2], ",")
|
|
if len(meta) > 0 {
|
|
if gb := parseHumanSizeToGB(strings.TrimSpace(meta[0])); gb > 0 {
|
|
s.SizeGB = &gb
|
|
}
|
|
}
|
|
if len(meta) > 1 {
|
|
intf := parseSSACLIInterface(meta[1])
|
|
if intf != "" {
|
|
s.Interface = &intf
|
|
}
|
|
devType := inferDriveType(strings.ToUpper(meta[1]), intf)
|
|
s.Type = &devType
|
|
}
|
|
if len(meta) > 2 {
|
|
st := mapRAIDDriveStatus(meta[len(meta)-1])
|
|
s.Status = &st
|
|
}
|
|
cur = &s
|
|
continue
|
|
}
|
|
if cur == nil {
|
|
continue
|
|
}
|
|
if idx := strings.Index(trimmed, ":"); idx > 0 {
|
|
key := strings.ToLower(strings.TrimSpace(trimmed[:idx]))
|
|
val := strings.TrimSpace(trimmed[idx+1:])
|
|
switch key {
|
|
case "serial number":
|
|
if val != "" {
|
|
cur.SerialNumber = &val
|
|
}
|
|
case "model":
|
|
if val != "" {
|
|
cur.Model = &val
|
|
}
|
|
case "status":
|
|
st := mapRAIDDriveStatus(val)
|
|
cur.Status = &st
|
|
}
|
|
}
|
|
}
|
|
flush()
|
|
return out
|
|
}
|
|
|
|
func parseSSACLIInterface(raw string) string {
|
|
u := strings.ToUpper(raw)
|
|
switch {
|
|
case strings.Contains(u, "SAS"):
|
|
return "SAS"
|
|
case strings.Contains(u, "SATA"):
|
|
return "SATA"
|
|
case strings.Contains(u, "NVME"):
|
|
return "NVME"
|
|
default:
|
|
return ""
|
|
}
|
|
}
|
|
|
|
func parseStorcliDrivesJSON(raw []byte) []schema.HardwareStorage {
|
|
var doc struct {
|
|
Controllers []struct {
|
|
ResponseData struct {
|
|
DriveInformation []struct {
|
|
EIDSlt string `json:"EID:Slt"`
|
|
State string `json:"State"`
|
|
Size string `json:"Size"`
|
|
Intf string `json:"Intf"`
|
|
Med string `json:"Med"`
|
|
Model string `json:"Model"`
|
|
SN string `json:"SN"`
|
|
Sp string `json:"Sp"`
|
|
Type string `json:"Type"`
|
|
} `json:"Drive Information"`
|
|
} `json:"Response Data"`
|
|
} `json:"Controllers"`
|
|
}
|
|
if err := json.Unmarshal(raw, &doc); err != nil {
|
|
slog.Warn("raid: parse storcli json failed", "err", err)
|
|
return nil
|
|
}
|
|
|
|
var drives []schema.HardwareStorage
|
|
for _, ctl := range doc.Controllers {
|
|
for _, d := range ctl.ResponseData.DriveInformation {
|
|
if s := storcliDriveToStorage(d); s != nil {
|
|
drives = append(drives, *s)
|
|
}
|
|
}
|
|
}
|
|
return drives
|
|
}
|
|
|
|
func storcliDriveToStorage(d struct {
|
|
EIDSlt string `json:"EID:Slt"`
|
|
State string `json:"State"`
|
|
Size string `json:"Size"`
|
|
Intf string `json:"Intf"`
|
|
Med string `json:"Med"`
|
|
Model string `json:"Model"`
|
|
SN string `json:"SN"`
|
|
Sp string `json:"Sp"`
|
|
Type string `json:"Type"`
|
|
}) *schema.HardwareStorage {
|
|
present := true
|
|
status := mapRAIDDriveStatus(d.State)
|
|
s := schema.HardwareStorage{
|
|
Present: &present,
|
|
Status: &status,
|
|
}
|
|
|
|
if v := strings.TrimSpace(d.EIDSlt); v != "" {
|
|
s.Slot = &v
|
|
}
|
|
if v := strings.TrimSpace(d.Model); v != "" {
|
|
s.Model = &v
|
|
}
|
|
if v := strings.TrimSpace(d.SN); v != "" {
|
|
s.SerialNumber = &v
|
|
}
|
|
if v := strings.TrimSpace(strings.ToUpper(d.Intf)); v != "" {
|
|
s.Interface = &v
|
|
}
|
|
|
|
devType := inferDriveType(strings.TrimSpace(strings.ToUpper(d.Med)), strings.TrimSpace(strings.ToUpper(d.Intf)))
|
|
if devType != "" {
|
|
s.Type = &devType
|
|
}
|
|
|
|
if gb := parseHumanSizeToGB(d.Size); gb > 0 {
|
|
s.SizeGB = &gb
|
|
}
|
|
|
|
// return only meaningful records
|
|
if s.Model == nil && s.SerialNumber == nil && s.Slot == nil {
|
|
return nil
|
|
}
|
|
return &s
|
|
}
|
|
|
|
func inferDriveType(med, intf string) string {
|
|
switch {
|
|
case strings.Contains(med, "SSD"):
|
|
return "SSD"
|
|
case strings.Contains(intf, "NVME"):
|
|
return "NVMe"
|
|
case strings.Contains(med, "HDD"):
|
|
return "HDD"
|
|
case strings.Contains(intf, "SAS") || strings.Contains(intf, "SATA"):
|
|
return "HDD"
|
|
default:
|
|
return "Unknown"
|
|
}
|
|
}
|
|
|
|
func mapRAIDDriveStatus(raw string) string {
|
|
u := strings.ToUpper(strings.TrimSpace(raw))
|
|
switch {
|
|
case strings.Contains(u, "OK"), strings.Contains(u, "OPTIMAL"), strings.Contains(u, "READY"):
|
|
return "OK"
|
|
case strings.Contains(u, "ONLN"), strings.Contains(u, "ONLINE"):
|
|
return "OK"
|
|
case strings.Contains(u, "RBLD"), strings.Contains(u, "REBUILD"):
|
|
return "WARNING"
|
|
case strings.Contains(u, "FAIL"), strings.Contains(u, "OFFLINE"):
|
|
return "CRITICAL"
|
|
default:
|
|
return "UNKNOWN"
|
|
}
|
|
}
|
|
|
|
func parseHumanSizeToGB(raw string) int {
|
|
parts := strings.Fields(strings.TrimSpace(raw))
|
|
if len(parts) < 2 {
|
|
return 0
|
|
}
|
|
value, err := strconv.ParseFloat(strings.TrimSpace(parts[0]), 64)
|
|
if err != nil {
|
|
return 0
|
|
}
|
|
unit := strings.ToUpper(parts[1])
|
|
switch {
|
|
case strings.HasPrefix(unit, "TB"):
|
|
return int(value * 1000)
|
|
case strings.HasPrefix(unit, "GB"):
|
|
return int(value)
|
|
case strings.HasPrefix(unit, "MB"):
|
|
return int(value / 1000)
|
|
default:
|
|
return 0
|
|
}
|
|
}
|
|
|
|
func appendUniqueStorage(base, extra []schema.HardwareStorage) []schema.HardwareStorage {
|
|
if len(extra) == 0 {
|
|
return base
|
|
}
|
|
seen := map[string]bool{}
|
|
for _, d := range base {
|
|
seen[storageIdentityKey(d)] = true
|
|
}
|
|
for _, d := range extra {
|
|
key := storageIdentityKey(d)
|
|
if key == "" || seen[key] {
|
|
continue
|
|
}
|
|
base = append(base, d)
|
|
seen[key] = true
|
|
}
|
|
return base
|
|
}
|
|
|
|
func storageIdentityKey(d schema.HardwareStorage) string {
|
|
if d.SerialNumber != nil && strings.TrimSpace(*d.SerialNumber) != "" {
|
|
return "sn:" + strings.ToLower(strings.TrimSpace(*d.SerialNumber))
|
|
}
|
|
if d.Model != nil && d.Slot != nil {
|
|
return "modelslot:" + strings.ToLower(strings.TrimSpace(*d.Model)) + ":" + strings.ToLower(strings.TrimSpace(*d.Slot))
|
|
}
|
|
return ""
|
|
}
|
|
|
|
type mdArray struct {
|
|
Name string
|
|
Degraded bool
|
|
Members []string
|
|
}
|
|
|
|
func enrichStorageWithVROC(storage []schema.HardwareStorage, pcie []schema.HardwarePCIeDevice) []schema.HardwareStorage {
|
|
if !hasVROCController(pcie) {
|
|
return storage
|
|
}
|
|
|
|
raw, err := readMDStat()
|
|
if err != nil {
|
|
slog.Info("vroc: cannot read /proc/mdstat", "err", err)
|
|
return storage
|
|
}
|
|
arrays := parseMDStatArrays(string(raw))
|
|
if len(arrays) == 0 {
|
|
slog.Info("vroc: no md arrays found")
|
|
return storage
|
|
}
|
|
|
|
serialToArray := map[string]mdArray{}
|
|
for _, arr := range arrays {
|
|
for _, member := range arr.Members {
|
|
serial := queryDeviceSerial("/dev/" + member)
|
|
if serial == "" {
|
|
continue
|
|
}
|
|
serialToArray[strings.ToLower(serial)] = arr
|
|
}
|
|
}
|
|
if len(serialToArray) == 0 {
|
|
return storage
|
|
}
|
|
|
|
updated := 0
|
|
for i := range storage {
|
|
if storage[i].SerialNumber == nil || strings.TrimSpace(*storage[i].SerialNumber) == "" {
|
|
continue
|
|
}
|
|
arr, ok := serialToArray[strings.ToLower(strings.TrimSpace(*storage[i].SerialNumber))]
|
|
if !ok {
|
|
continue
|
|
}
|
|
if storage[i].Telemetry == nil {
|
|
storage[i].Telemetry = map[string]any{}
|
|
}
|
|
storage[i].Telemetry["vroc_array"] = arr.Name
|
|
storage[i].Telemetry["vroc_degraded"] = arr.Degraded
|
|
if arr.Degraded {
|
|
status := "WARNING"
|
|
storage[i].Status = &status
|
|
}
|
|
updated++
|
|
}
|
|
|
|
slog.Info("vroc: enriched storage members", "count", updated)
|
|
return storage
|
|
}
|
|
|
|
func hasVROCController(pcie []schema.HardwarePCIeDevice) bool {
|
|
for _, dev := range pcie {
|
|
if dev.VendorID == nil || *dev.VendorID != vendorIntel {
|
|
continue
|
|
}
|
|
|
|
class := ""
|
|
if dev.DeviceClass != nil {
|
|
class = strings.ToLower(*dev.DeviceClass)
|
|
}
|
|
model := ""
|
|
if dev.Model != nil {
|
|
model = strings.ToLower(*dev.Model)
|
|
}
|
|
|
|
if strings.Contains(class, "raid") ||
|
|
strings.Contains(model, "vroc") ||
|
|
strings.Contains(model, "volume management device") ||
|
|
strings.Contains(model, "vmd") {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
var mdHealthPattern = regexp.MustCompile(`\[[U_]+\]`)
|
|
|
|
func parseMDStatArrays(raw string) []mdArray {
|
|
lines := strings.Split(raw, "\n")
|
|
var arrays []mdArray
|
|
var current *mdArray
|
|
|
|
for _, line := range lines {
|
|
trimmed := strings.TrimSpace(line)
|
|
if trimmed == "" {
|
|
continue
|
|
}
|
|
|
|
if strings.Contains(line, " : ") && !strings.HasPrefix(strings.TrimLeft(line, " \t"), "[") {
|
|
left := strings.TrimSpace(strings.SplitN(line, " : ", 2)[0])
|
|
if strings.EqualFold(left, "Personalities") || strings.EqualFold(left, "unused devices") {
|
|
continue
|
|
}
|
|
if current != nil {
|
|
arrays = append(arrays, *current)
|
|
}
|
|
|
|
name := left
|
|
fields := strings.Fields(strings.SplitN(line, " : ", 2)[1])
|
|
|
|
arr := mdArray{Name: name}
|
|
for _, f := range fields {
|
|
if i := strings.IndexByte(f, '['); i > 0 {
|
|
member := strings.TrimSpace(f[:i])
|
|
if member != "" {
|
|
arr.Members = append(arr.Members, member)
|
|
}
|
|
}
|
|
}
|
|
current = &arr
|
|
continue
|
|
}
|
|
|
|
if current == nil {
|
|
continue
|
|
}
|
|
if m := mdHealthPattern.FindString(trimmed); m != "" && strings.Contains(m, "_") {
|
|
current.Degraded = true
|
|
}
|
|
}
|
|
if current != nil {
|
|
arrays = append(arrays, *current)
|
|
}
|
|
return arrays
|
|
}
|
|
|
|
func queryDeviceSerial(devPath string) string {
|
|
if out, err := exec.Command("nvme", "id-ctrl", devPath, "-o", "json").Output(); err == nil {
|
|
var ctrl nvmeIDCtrl
|
|
if json.Unmarshal(out, &ctrl) == nil {
|
|
if v := cleanDMIValue(strings.TrimSpace(ctrl.SerialNumber)); v != "" {
|
|
return v
|
|
}
|
|
}
|
|
}
|
|
if out, err := exec.Command("smartctl", "-j", "-i", devPath).Output(); err == nil {
|
|
var info smartctlInfo
|
|
if json.Unmarshal(out, &info) == nil {
|
|
if v := cleanDMIValue(strings.TrimSpace(info.SerialNumber)); v != "" {
|
|
return v
|
|
}
|
|
}
|
|
}
|
|
return ""
|
|
}
|