2807 lines
80 KiB
Go
2807 lines
80 KiB
Go
package collector
|
||
|
||
import (
|
||
"context"
|
||
"crypto/tls"
|
||
"encoding/json"
|
||
"fmt"
|
||
"io"
|
||
"log"
|
||
"net/http"
|
||
"net/url"
|
||
"os"
|
||
"path"
|
||
"sort"
|
||
"strconv"
|
||
"strings"
|
||
"sync"
|
||
"sync/atomic"
|
||
"time"
|
||
|
||
"git.mchus.pro/mchus/logpile/internal/models"
|
||
"git.mchus.pro/mchus/logpile/internal/parser/vendors/pciids"
|
||
)
|
||
|
||
type RedfishConnector struct {
|
||
timeout time.Duration
|
||
debug bool
|
||
debugSnapshot bool
|
||
}
|
||
|
||
func NewRedfishConnector() *RedfishConnector {
|
||
debug := false
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_DEBUG")); v != "" && v != "0" && !strings.EqualFold(v, "false") {
|
||
debug = true
|
||
}
|
||
debugSnapshot := false
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_SNAPSHOT_DEBUG")); v != "" && v != "0" && !strings.EqualFold(v, "false") {
|
||
debugSnapshot = true
|
||
}
|
||
return &RedfishConnector{
|
||
timeout: 10 * time.Second,
|
||
debug: debug,
|
||
debugSnapshot: debugSnapshot || debug,
|
||
}
|
||
}
|
||
|
||
func (c *RedfishConnector) Protocol() string {
|
||
return "redfish"
|
||
}
|
||
|
||
func (c *RedfishConnector) debugf(format string, args ...interface{}) {
|
||
if !c.debug {
|
||
return
|
||
}
|
||
log.Printf("redfish-debug: "+format, args...)
|
||
}
|
||
|
||
func (c *RedfishConnector) debugSnapshotf(format string, args ...interface{}) {
|
||
if !c.debugSnapshot {
|
||
return
|
||
}
|
||
log.Printf("redfish-snapshot-debug: "+format, args...)
|
||
}
|
||
|
||
func (c *RedfishConnector) Collect(ctx context.Context, req Request, emit ProgressFn) (*models.AnalysisResult, error) {
|
||
baseURL, err := c.baseURL(req)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
client := c.httpClient(req)
|
||
|
||
if emit != nil {
|
||
emit(Progress{Status: "running", Progress: 10, Message: "Redfish: подключение к BMC..."})
|
||
}
|
||
if _, err := c.getJSON(ctx, client, req, baseURL, "/redfish/v1"); err != nil {
|
||
return nil, fmt.Errorf("redfish service root: %w", err)
|
||
}
|
||
|
||
systemPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Systems", "/redfish/v1/Systems/1")
|
||
chassisPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Chassis", "/redfish/v1/Chassis/1")
|
||
managerPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Managers", "/redfish/v1/Managers/1")
|
||
criticalPaths := redfishCriticalEndpoints(systemPaths, chassisPaths, managerPaths)
|
||
criticalClient := c.httpClientWithTimeout(req, redfishCriticalRequestTimeout())
|
||
criticalWarmDocs, criticalWarmErrs := c.collectCriticalRedfishDocsSequential(ctx, criticalClient, req, baseURL, criticalPaths)
|
||
|
||
if emit != nil {
|
||
emit(Progress{Status: "running", Progress: 30, Message: "Redfish: чтение структуры Redfish..."})
|
||
emit(Progress{Status: "running", Progress: 55, Message: "Redfish: подготовка snapshot..."})
|
||
emit(Progress{Status: "running", Progress: 80, Message: "Redfish: подготовка расширенного snapshot..."})
|
||
emit(Progress{Status: "running", Progress: 90, Message: "Redfish: сбор расширенного snapshot..."})
|
||
}
|
||
c.debugSnapshotf("snapshot crawl start host=%s port=%d", req.Host, req.Port)
|
||
rawTree, fetchErrors := c.collectRawRedfishTree(ctx, client, req, baseURL, redfishSnapshotPrioritySeeds(systemPaths, chassisPaths, managerPaths), emit)
|
||
c.debugSnapshotf("snapshot crawl done docs=%d", len(rawTree))
|
||
for p, doc := range criticalWarmDocs {
|
||
if _, ok := rawTree[p]; !ok {
|
||
rawTree[p] = doc
|
||
}
|
||
}
|
||
fetchErrMap := redfishFetchErrorListToMap(fetchErrors)
|
||
for p, msg := range criticalWarmErrs {
|
||
if _, ok := rawTree[p]; ok {
|
||
continue
|
||
}
|
||
if _, exists := fetchErrMap[p]; !exists {
|
||
fetchErrMap[p] = msg
|
||
}
|
||
}
|
||
if recoveredN := c.recoverCriticalRedfishDocsPlanB(ctx, criticalClient, req, baseURL, criticalPaths, rawTree, fetchErrMap, emit); recoveredN > 0 {
|
||
c.debugSnapshotf("critical plan-b recovered docs=%d", recoveredN)
|
||
}
|
||
// Hide transient fetch errors for endpoints that were eventually recovered into rawTree.
|
||
for p := range fetchErrMap {
|
||
if _, ok := rawTree[p]; ok {
|
||
delete(fetchErrMap, p)
|
||
}
|
||
}
|
||
if emit != nil {
|
||
emit(Progress{Status: "running", Progress: 99, Message: "Redfish: анализ raw snapshot..."})
|
||
}
|
||
rawPayloads := map[string]any{
|
||
"redfish_tree": rawTree,
|
||
}
|
||
if len(fetchErrMap) > 0 {
|
||
rawPayloads["redfish_fetch_errors"] = redfishFetchErrorMapToList(fetchErrMap)
|
||
}
|
||
// Unified tunnel: live collection and raw import go through the same analyzer over redfish_tree.
|
||
return ReplayRedfishFromRawPayloads(rawPayloads, nil)
|
||
}
|
||
|
||
func (c *RedfishConnector) httpClient(req Request) *http.Client {
|
||
return c.httpClientWithTimeout(req, c.timeout)
|
||
}
|
||
|
||
func (c *RedfishConnector) httpClientWithTimeout(req Request, timeout time.Duration) *http.Client {
|
||
transport := &http.Transport{}
|
||
if req.TLSMode == "insecure" {
|
||
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint:gosec
|
||
}
|
||
return &http.Client{
|
||
Transport: transport,
|
||
Timeout: timeout,
|
||
}
|
||
}
|
||
|
||
func (c *RedfishConnector) baseURL(req Request) (string, error) {
|
||
host := strings.TrimSpace(req.Host)
|
||
if host == "" {
|
||
return "", fmt.Errorf("empty host")
|
||
}
|
||
|
||
if strings.HasPrefix(host, "http://") || strings.HasPrefix(host, "https://") {
|
||
u, err := url.Parse(host)
|
||
if err != nil {
|
||
return "", fmt.Errorf("invalid host URL: %w", err)
|
||
}
|
||
u.Path = ""
|
||
u.RawQuery = ""
|
||
u.Fragment = ""
|
||
return strings.TrimRight(u.String(), "/"), nil
|
||
}
|
||
|
||
scheme := "https"
|
||
if req.TLSMode == "insecure" && req.Port == 80 {
|
||
scheme = "http"
|
||
}
|
||
return fmt.Sprintf("%s://%s:%d", scheme, host, req.Port), nil
|
||
}
|
||
|
||
func (c *RedfishConnector) collectStorage(ctx context.Context, client *http.Client, req Request, baseURL, systemPath string) []models.Storage {
|
||
var out []models.Storage
|
||
storageMembers, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/Storage"))
|
||
for _, member := range storageMembers {
|
||
// "Drives" can be embedded refs or a link to a collection.
|
||
if driveCollection, ok := member["Drives"].(map[string]interface{}); ok {
|
||
if driveCollectionPath := asString(driveCollection["@odata.id"]); driveCollectionPath != "" {
|
||
driveDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, driveCollectionPath)
|
||
if err == nil {
|
||
for _, driveDoc := range driveDocs {
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
if len(driveDocs) == 0 {
|
||
for _, driveDoc := range c.probeDirectDiskBayChildren(ctx, client, req, baseURL, driveCollectionPath) {
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
}
|
||
}
|
||
continue
|
||
}
|
||
}
|
||
if drives, ok := member["Drives"].([]interface{}); ok {
|
||
for _, driveAny := range drives {
|
||
driveRef, ok := driveAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
odata := asString(driveRef["@odata.id"])
|
||
if odata == "" {
|
||
continue
|
||
}
|
||
driveDoc, err := c.getJSON(ctx, client, req, baseURL, odata)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
continue
|
||
}
|
||
|
||
// Some implementations return drive fields right in storage member object.
|
||
if looksLikeDrive(member) {
|
||
out = append(out, parseDrive(member))
|
||
}
|
||
|
||
// Supermicro/RAID implementations can expose physical disks under chassis enclosures
|
||
// linked from Storage.Links.Enclosures, while Storage.Drives stays empty.
|
||
for _, enclosurePath := range redfishLinkRefs(member, "Links", "Enclosures") {
|
||
driveDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(enclosurePath, "/Drives"))
|
||
if err == nil {
|
||
for _, driveDoc := range driveDocs {
|
||
if looksLikeDrive(driveDoc) {
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
}
|
||
if len(driveDocs) == 0 {
|
||
for _, driveDoc := range c.probeDirectDiskBayChildren(ctx, client, req, baseURL, joinPath(enclosurePath, "/Drives")) {
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// IntelVROC often exposes rich drive inventory via dedicated child collections.
|
||
for _, driveDoc := range c.collectKnownStorageMembers(ctx, client, req, baseURL, systemPath, []string{
|
||
"/Storage/IntelVROC/Drives",
|
||
"/Storage/IntelVROC/Controllers/1/Drives",
|
||
}) {
|
||
if looksLikeDrive(driveDoc) {
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
}
|
||
|
||
// Fallback for platforms that expose disks in SimpleStorage.
|
||
simpleStorageMembers, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/SimpleStorage"))
|
||
for _, member := range simpleStorageMembers {
|
||
devices, ok := member["Devices"].([]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
for _, devAny := range devices {
|
||
devDoc, ok := devAny.(map[string]interface{})
|
||
if !ok || !looksLikeDrive(devDoc) {
|
||
continue
|
||
}
|
||
out = append(out, parseDrive(devDoc))
|
||
}
|
||
}
|
||
|
||
// Fallback for platforms exposing physical drives under Chassis.
|
||
chassisPaths := c.discoverMemberPaths(ctx, client, req, baseURL, "/redfish/v1/Chassis", "/redfish/v1/Chassis/1")
|
||
for _, chassisPath := range chassisPaths {
|
||
driveDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(chassisPath, "/Drives"))
|
||
if err != nil {
|
||
continue
|
||
}
|
||
for _, driveDoc := range driveDocs {
|
||
if !looksLikeDrive(driveDoc) {
|
||
continue
|
||
}
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
}
|
||
for _, chassisPath := range chassisPaths {
|
||
if !isSupermicroNVMeBackplanePath(chassisPath) {
|
||
continue
|
||
}
|
||
for _, driveDoc := range c.probeSupermicroNVMeDiskBays(ctx, client, req, baseURL, chassisPath) {
|
||
if !looksLikeDrive(driveDoc) {
|
||
continue
|
||
}
|
||
out = append(out, parseDrive(driveDoc))
|
||
}
|
||
}
|
||
|
||
out = dedupeStorage(out)
|
||
return out
|
||
}
|
||
|
||
func (c *RedfishConnector) collectStorageVolumes(ctx context.Context, client *http.Client, req Request, baseURL, systemPath string) []models.StorageVolume {
|
||
var out []models.StorageVolume
|
||
storageMembers, _ := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/Storage"))
|
||
for _, member := range storageMembers {
|
||
controller := firstNonEmpty(asString(member["Id"]), asString(member["Name"]))
|
||
volumeCollectionPath := redfishLinkedPath(member, "Volumes")
|
||
if volumeCollectionPath == "" {
|
||
continue
|
||
}
|
||
volumeDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, volumeCollectionPath)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
for _, volDoc := range volumeDocs {
|
||
if !looksLikeVolume(volDoc) {
|
||
continue
|
||
}
|
||
out = append(out, parseStorageVolume(volDoc, controller))
|
||
}
|
||
}
|
||
for _, volDoc := range c.collectKnownStorageMembers(ctx, client, req, baseURL, systemPath, []string{
|
||
"/Storage/IntelVROC/Volumes",
|
||
"/Storage/HA-RAID/Volumes",
|
||
"/Storage/MRVL.HA-RAID/Volumes",
|
||
}) {
|
||
if !looksLikeVolume(volDoc) {
|
||
continue
|
||
}
|
||
out = append(out, parseStorageVolume(volDoc, storageControllerFromPath(asString(volDoc["@odata.id"]))))
|
||
}
|
||
return dedupeStorageVolumes(out)
|
||
}
|
||
|
||
func (c *RedfishConnector) collectNICs(ctx context.Context, client *http.Client, req Request, baseURL string, chassisPaths []string) []models.NetworkAdapter {
|
||
var nics []models.NetworkAdapter
|
||
seen := make(map[string]struct{})
|
||
for _, chassisPath := range chassisPaths {
|
||
adapterDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(chassisPath, "/NetworkAdapters"))
|
||
if err != nil {
|
||
continue
|
||
}
|
||
for _, doc := range adapterDocs {
|
||
nic := parseNIC(doc)
|
||
for _, pciePath := range networkAdapterPCIeDevicePaths(doc) {
|
||
pcieDoc, err := c.getJSON(ctx, client, req, baseURL, pciePath)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
functionDocs := c.getLinkedPCIeFunctions(ctx, client, req, baseURL, pcieDoc)
|
||
enrichNICFromPCIe(&nic, pcieDoc, functionDocs)
|
||
}
|
||
key := firstNonEmpty(nic.SerialNumber, nic.Slot+"|"+nic.Model)
|
||
if key == "" {
|
||
continue
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
continue
|
||
}
|
||
seen[key] = struct{}{}
|
||
nics = append(nics, nic)
|
||
}
|
||
}
|
||
return nics
|
||
}
|
||
|
||
func (c *RedfishConnector) collectPSUs(ctx context.Context, client *http.Client, req Request, baseURL string, chassisPaths []string) []models.PSU {
|
||
var out []models.PSU
|
||
seen := make(map[string]struct{})
|
||
idx := 1
|
||
for _, chassisPath := range chassisPaths {
|
||
// Redfish 2022+/X14+ commonly uses PowerSubsystem as the primary source.
|
||
if memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(chassisPath, "/PowerSubsystem/PowerSupplies")); err == nil && len(memberDocs) > 0 {
|
||
for _, doc := range memberDocs {
|
||
idx = appendPSU(&out, seen, parsePSU(doc, idx), idx)
|
||
}
|
||
continue
|
||
}
|
||
|
||
// Legacy source: embedded array in Chassis/<id>/Power.
|
||
if powerDoc, err := c.getJSON(ctx, client, req, baseURL, joinPath(chassisPath, "/Power")); err == nil {
|
||
if members, ok := powerDoc["PowerSupplies"].([]interface{}); ok && len(members) > 0 {
|
||
for _, item := range members {
|
||
doc, ok := item.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
idx = appendPSU(&out, seen, parsePSU(doc, idx), idx)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
return out
|
||
}
|
||
|
||
func appendPSU(out *[]models.PSU, seen map[string]struct{}, psu models.PSU, currentIdx int) int {
|
||
nextIdx := currentIdx + 1
|
||
key := firstNonEmpty(psu.SerialNumber, psu.Slot+"|"+psu.Model)
|
||
if key == "" {
|
||
return nextIdx
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
return nextIdx
|
||
}
|
||
seen[key] = struct{}{}
|
||
*out = append(*out, psu)
|
||
return len(*out) + 1
|
||
}
|
||
|
||
func (c *RedfishConnector) collectKnownStorageMembers(ctx context.Context, client *http.Client, req Request, baseURL, systemPath string, relativeCollections []string) []map[string]interface{} {
|
||
var out []map[string]interface{}
|
||
for _, rel := range relativeCollections {
|
||
docs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, rel))
|
||
if err != nil || len(docs) == 0 {
|
||
continue
|
||
}
|
||
out = append(out, docs...)
|
||
}
|
||
return out
|
||
}
|
||
|
||
func redfishLinkedPath(doc map[string]interface{}, key string) string {
|
||
if v, ok := doc[key].(map[string]interface{}); ok {
|
||
return asString(v["@odata.id"])
|
||
}
|
||
return ""
|
||
}
|
||
|
||
func (c *RedfishConnector) collectGPUs(ctx context.Context, client *http.Client, req Request, baseURL string, systemPaths, chassisPaths []string) []models.GPU {
|
||
collections := make([]string, 0, len(systemPaths)*3+len(chassisPaths)*2)
|
||
for _, systemPath := range systemPaths {
|
||
collections = append(collections, joinPath(systemPath, "/PCIeDevices"))
|
||
collections = append(collections, joinPath(systemPath, "/Accelerators"))
|
||
collections = append(collections, joinPath(systemPath, "/GraphicsControllers"))
|
||
}
|
||
for _, chassisPath := range chassisPaths {
|
||
collections = append(collections, joinPath(chassisPath, "/PCIeDevices"))
|
||
collections = append(collections, joinPath(chassisPath, "/Accelerators"))
|
||
}
|
||
|
||
var out []models.GPU
|
||
seen := make(map[string]struct{})
|
||
idx := 1
|
||
for _, collectionPath := range collections {
|
||
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, collectionPath)
|
||
if err != nil || len(memberDocs) == 0 {
|
||
continue
|
||
}
|
||
|
||
for _, doc := range memberDocs {
|
||
functionDocs := c.getLinkedPCIeFunctions(ctx, client, req, baseURL, doc)
|
||
if !looksLikeGPU(doc, functionDocs) {
|
||
continue
|
||
}
|
||
|
||
gpu := parseGPU(doc, functionDocs, idx)
|
||
idx++
|
||
if shouldSkipGenericGPUDuplicate(out, gpu) {
|
||
continue
|
||
}
|
||
|
||
key := gpuDedupKey(gpu)
|
||
if key == "" {
|
||
continue
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
continue
|
||
}
|
||
seen[key] = struct{}{}
|
||
out = append(out, gpu)
|
||
}
|
||
}
|
||
|
||
return dropModelOnlyGPUPlaceholders(out)
|
||
}
|
||
|
||
func (c *RedfishConnector) collectPCIeDevices(ctx context.Context, client *http.Client, req Request, baseURL string, systemPaths, chassisPaths []string) []models.PCIeDevice {
|
||
collections := make([]string, 0, len(systemPaths)+len(chassisPaths))
|
||
for _, systemPath := range systemPaths {
|
||
collections = append(collections, joinPath(systemPath, "/PCIeDevices"))
|
||
}
|
||
for _, chassisPath := range chassisPaths {
|
||
collections = append(collections, joinPath(chassisPath, "/PCIeDevices"))
|
||
}
|
||
|
||
var out []models.PCIeDevice
|
||
seen := make(map[string]struct{})
|
||
for _, collectionPath := range collections {
|
||
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, collectionPath)
|
||
if err != nil || len(memberDocs) == 0 {
|
||
continue
|
||
}
|
||
|
||
for _, doc := range memberDocs {
|
||
functionDocs := c.getLinkedPCIeFunctions(ctx, client, req, baseURL, doc)
|
||
dev := parsePCIeDevice(doc, functionDocs)
|
||
key := pcieDeviceDedupKey(dev)
|
||
if key == "" {
|
||
continue
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
continue
|
||
}
|
||
seen[key] = struct{}{}
|
||
out = append(out, dev)
|
||
}
|
||
}
|
||
|
||
// Fallback: some BMCs expose only PCIeFunctions collection without PCIeDevices.
|
||
for _, systemPath := range systemPaths {
|
||
functionDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, joinPath(systemPath, "/PCIeFunctions"))
|
||
if err != nil || len(functionDocs) == 0 {
|
||
continue
|
||
}
|
||
for idx, fn := range functionDocs {
|
||
dev := parsePCIeFunction(fn, idx+1)
|
||
key := pcieDeviceDedupKey(dev)
|
||
if key == "" {
|
||
continue
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
continue
|
||
}
|
||
seen[key] = struct{}{}
|
||
out = append(out, dev)
|
||
}
|
||
}
|
||
|
||
return out
|
||
}
|
||
|
||
func (c *RedfishConnector) discoverMemberPaths(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath, fallbackPath string) []string {
|
||
collection, err := c.getJSON(ctx, client, req, baseURL, collectionPath)
|
||
if err == nil {
|
||
if refs, ok := collection["Members"].([]interface{}); ok && len(refs) > 0 {
|
||
paths := make([]string, 0, len(refs))
|
||
for _, refAny := range refs {
|
||
ref, ok := refAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
memberPath := asString(ref["@odata.id"])
|
||
if memberPath != "" {
|
||
paths = append(paths, memberPath)
|
||
}
|
||
}
|
||
if len(paths) > 0 {
|
||
return paths
|
||
}
|
||
}
|
||
}
|
||
|
||
if fallbackPath != "" {
|
||
return []string{fallbackPath}
|
||
}
|
||
return nil
|
||
}
|
||
|
||
func (c *RedfishConnector) collectRawRedfishTree(ctx context.Context, client *http.Client, req Request, baseURL string, seedPaths []string, emit ProgressFn) (map[string]interface{}, []map[string]interface{}) {
|
||
maxDocuments := redfishSnapshotMaxDocuments()
|
||
const workers = 6
|
||
const heartbeatInterval = 5 * time.Second
|
||
crawlStart := time.Now()
|
||
|
||
out := make(map[string]interface{}, maxDocuments)
|
||
fetchErrors := make(map[string]string)
|
||
seen := make(map[string]struct{}, maxDocuments)
|
||
rootCounts := make(map[string]int)
|
||
var mu sync.Mutex
|
||
var processed int32
|
||
var lastPath atomic.Value
|
||
|
||
// Workers enqueue newly discovered links into the same queue they consume.
|
||
// The queue capacity must be at least the crawl cap to avoid producer/consumer
|
||
// deadlock when several workers discover many links at once.
|
||
jobs := make(chan string, maxDocuments)
|
||
var wg sync.WaitGroup
|
||
|
||
enqueue := func(path string) {
|
||
path = normalizeRedfishPath(path)
|
||
if !shouldCrawlPath(path) {
|
||
return
|
||
}
|
||
mu.Lock()
|
||
if len(seen) >= maxDocuments {
|
||
mu.Unlock()
|
||
return
|
||
}
|
||
if _, ok := seen[path]; ok {
|
||
mu.Unlock()
|
||
return
|
||
}
|
||
seen[path] = struct{}{}
|
||
wg.Add(1)
|
||
mu.Unlock()
|
||
jobs <- path
|
||
}
|
||
|
||
enqueue("/redfish/v1")
|
||
for _, seed := range seedPaths {
|
||
enqueue(seed)
|
||
}
|
||
c.debugSnapshotf("snapshot queue initialized workers=%d max_documents=%d", workers, maxDocuments)
|
||
stopHeartbeat := make(chan struct{})
|
||
if emit != nil {
|
||
go func() {
|
||
ticker := time.NewTicker(heartbeatInterval)
|
||
defer ticker.Stop()
|
||
for {
|
||
select {
|
||
case <-ticker.C:
|
||
n := atomic.LoadInt32(&processed)
|
||
mu.Lock()
|
||
countsCopy := make(map[string]int, len(rootCounts))
|
||
for k, v := range rootCounts {
|
||
countsCopy[k] = v
|
||
}
|
||
seenN := len(seen)
|
||
outN := len(out)
|
||
mu.Unlock()
|
||
roots := topRoots(countsCopy, 2)
|
||
last := "/redfish/v1"
|
||
if v := lastPath.Load(); v != nil {
|
||
if s, ok := v.(string); ok && s != "" {
|
||
last = s
|
||
}
|
||
}
|
||
eta := formatETA(estimateSnapshotETA(crawlStart, int(n), seenN, len(jobs), workers, client.Timeout))
|
||
emit(Progress{
|
||
Status: "running",
|
||
Progress: 92 + int(minInt32(n/200, 6)),
|
||
Message: fmt.Sprintf("Redfish snapshot: heartbeat документов=%d (ok=%d, seen=%d), ETA≈%s, корни=%s, последний=%s", n, outN, seenN, eta, strings.Join(roots, ", "), compactProgressPath(last)),
|
||
})
|
||
case <-stopHeartbeat:
|
||
return
|
||
case <-ctx.Done():
|
||
return
|
||
}
|
||
}
|
||
}()
|
||
}
|
||
for i := 0; i < workers; i++ {
|
||
go func(workerID int) {
|
||
for current := range jobs {
|
||
lastPath.Store(current)
|
||
c.debugSnapshotf("worker=%d fetch start path=%s queue_len=%d", workerID, current, len(jobs))
|
||
doc, err := c.getJSON(ctx, client, req, baseURL, current)
|
||
if err == nil {
|
||
mu.Lock()
|
||
out[current] = doc
|
||
rootCounts[redfishTopRoot(current)]++
|
||
mu.Unlock()
|
||
|
||
for _, ref := range extractODataIDs(doc) {
|
||
enqueue(ref)
|
||
}
|
||
}
|
||
n := atomic.AddInt32(&processed, 1)
|
||
if err != nil {
|
||
mu.Lock()
|
||
if _, ok := fetchErrors[current]; !ok {
|
||
fetchErrors[current] = err.Error()
|
||
}
|
||
mu.Unlock()
|
||
c.debugSnapshotf("worker=%d fetch error path=%s err=%v", workerID, current, err)
|
||
if emit != nil && shouldReportSnapshotFetchError(err) {
|
||
emit(Progress{
|
||
Status: "running",
|
||
Progress: 92 + int(minInt32(n/200, 6)),
|
||
Message: fmt.Sprintf("Redfish snapshot: ошибка на %s", compactProgressPath(current)),
|
||
})
|
||
}
|
||
}
|
||
if emit != nil && n%40 == 0 {
|
||
mu.Lock()
|
||
countsCopy := make(map[string]int, len(rootCounts))
|
||
for k, v := range rootCounts {
|
||
countsCopy[k] = v
|
||
}
|
||
seenN := len(seen)
|
||
mu.Unlock()
|
||
roots := topRoots(countsCopy, 2)
|
||
last := current
|
||
if v := lastPath.Load(); v != nil {
|
||
if s, ok := v.(string); ok && s != "" {
|
||
last = s
|
||
}
|
||
}
|
||
eta := formatETA(estimateSnapshotETA(crawlStart, int(n), seenN, len(jobs), workers, client.Timeout))
|
||
emit(Progress{
|
||
Status: "running",
|
||
Progress: 92 + int(minInt32(n/200, 6)),
|
||
Message: fmt.Sprintf("Redfish snapshot: документов=%d, ETA≈%s, корни=%s, последний=%s", n, eta, strings.Join(roots, ", "), compactProgressPath(last)),
|
||
})
|
||
}
|
||
if n%20 == 0 || err != nil {
|
||
mu.Lock()
|
||
seenN := len(seen)
|
||
outN := len(out)
|
||
mu.Unlock()
|
||
c.debugSnapshotf("snapshot progress processed=%d stored=%d seen=%d queue_len=%d", n, outN, seenN, len(jobs))
|
||
}
|
||
|
||
wg.Done()
|
||
}
|
||
}(i + 1)
|
||
}
|
||
|
||
wg.Wait()
|
||
close(stopHeartbeat)
|
||
close(jobs)
|
||
|
||
// Some Supermicro BMCs expose NVMe disks at direct Disk.Bay endpoints even when the
|
||
// Drives collection returns Members: []. Probe those paths so raw export can be replayed.
|
||
for path := range out {
|
||
if !strings.HasSuffix(normalizeRedfishPath(path), "/Drives") {
|
||
continue
|
||
}
|
||
for _, bayPath := range directDiskBayCandidates(path) {
|
||
doc, err := c.getJSON(ctx, client, req, baseURL, bayPath)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
if !looksLikeDrive(doc) {
|
||
continue
|
||
}
|
||
out[normalizeRedfishPath(bayPath)] = doc
|
||
c.debugSnapshotf("snapshot nvme bay probe hit path=%s", bayPath)
|
||
}
|
||
}
|
||
// Some BMCs under-report collection Members for sensors/PSU subresources but still serve
|
||
// direct numeric child endpoints. Probe common collections to maximize raw snapshot fidelity.
|
||
for path := range out {
|
||
for childPath, doc := range c.probeDirectRedfishCollectionChildren(ctx, client, req, baseURL, path) {
|
||
if _, exists := out[childPath]; exists {
|
||
continue
|
||
}
|
||
out[childPath] = doc
|
||
}
|
||
}
|
||
|
||
if emit != nil {
|
||
emit(Progress{
|
||
Status: "running",
|
||
Progress: 98,
|
||
Message: fmt.Sprintf("Redfish snapshot: собрано %d документов", len(out)),
|
||
})
|
||
}
|
||
|
||
errorList := make([]map[string]interface{}, 0, len(fetchErrors))
|
||
for p, msg := range fetchErrors {
|
||
errorList = append(errorList, map[string]interface{}{
|
||
"path": p,
|
||
"error": msg,
|
||
})
|
||
}
|
||
sort.Slice(errorList, func(i, j int) bool {
|
||
return asString(errorList[i]["path"]) < asString(errorList[j]["path"])
|
||
})
|
||
|
||
return out, errorList
|
||
}
|
||
|
||
func (c *RedfishConnector) probeSupermicroNVMeDiskBays(ctx context.Context, client *http.Client, req Request, baseURL, backplanePath string) []map[string]interface{} {
|
||
return c.probeDirectDiskBayChildren(ctx, client, req, baseURL, joinPath(backplanePath, "/Drives"))
|
||
}
|
||
|
||
func isSupermicroNVMeBackplanePath(path string) bool {
|
||
path = normalizeRedfishPath(path)
|
||
return strings.Contains(path, "/Chassis/NVMeSSD.") && strings.Contains(path, ".StorageBackplane")
|
||
}
|
||
|
||
func supermicroNVMeDiskBayCandidates(backplanePath string) []string {
|
||
return directDiskBayCandidates(joinPath(backplanePath, "/Drives"))
|
||
}
|
||
|
||
func (c *RedfishConnector) probeDirectDiskBayChildren(ctx context.Context, client *http.Client, req Request, baseURL, drivesCollectionPath string) []map[string]interface{} {
|
||
var out []map[string]interface{}
|
||
for _, path := range directDiskBayCandidates(drivesCollectionPath) {
|
||
doc, err := c.getJSON(ctx, client, req, baseURL, path)
|
||
if err != nil || !looksLikeDrive(doc) {
|
||
continue
|
||
}
|
||
out = append(out, doc)
|
||
}
|
||
return out
|
||
}
|
||
|
||
func directDiskBayCandidates(drivesCollectionPath string) []string {
|
||
const maxBays = 128
|
||
prefix := normalizeRedfishPath(drivesCollectionPath)
|
||
out := make([]string, 0, maxBays*3)
|
||
for i := 0; i < maxBays; i++ {
|
||
out = append(out, fmt.Sprintf("%s/Disk.Bay.%d", prefix, i))
|
||
out = append(out, fmt.Sprintf("%s/Disk.Bay%d", prefix, i))
|
||
out = append(out, fmt.Sprintf("%s/%d", prefix, i))
|
||
}
|
||
return out
|
||
}
|
||
|
||
func (c *RedfishConnector) probeDirectRedfishCollectionChildren(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath string) map[string]map[string]interface{} {
|
||
normalized := normalizeRedfishPath(collectionPath)
|
||
maxItems, startIndex, missBudget := directNumericProbePlan(normalized)
|
||
if maxItems <= 0 {
|
||
return nil
|
||
}
|
||
out := make(map[string]map[string]interface{})
|
||
consecutiveMisses := 0
|
||
for i := startIndex; i <= maxItems; i++ {
|
||
path := fmt.Sprintf("%s/%d", normalized, i)
|
||
doc, err := c.getJSON(ctx, client, req, baseURL, path)
|
||
if err != nil {
|
||
consecutiveMisses++
|
||
if consecutiveMisses >= missBudget {
|
||
break
|
||
}
|
||
continue
|
||
}
|
||
consecutiveMisses = 0
|
||
if !looksLikeRedfishResource(doc) {
|
||
continue
|
||
}
|
||
out[normalizeRedfishPath(path)] = doc
|
||
}
|
||
return out
|
||
}
|
||
|
||
func (c *RedfishConnector) probeDirectRedfishCollectionChildrenSlow(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath string) map[string]map[string]interface{} {
|
||
normalized := normalizeRedfishPath(collectionPath)
|
||
maxItems, startIndex, missBudget := directNumericProbePlan(normalized)
|
||
if maxItems <= 0 {
|
||
return nil
|
||
}
|
||
out := make(map[string]map[string]interface{})
|
||
consecutiveMisses := 0
|
||
for i := startIndex; i <= maxItems; i++ {
|
||
if len(out) > 0 || i > startIndex {
|
||
select {
|
||
case <-time.After(redfishCriticalSlowGap()):
|
||
case <-ctx.Done():
|
||
return out
|
||
}
|
||
}
|
||
path := fmt.Sprintf("%s/%d", normalized, i)
|
||
doc, err := c.getJSONWithRetry(ctx, client, req, baseURL, path, redfishCriticalPlanBAttempts(), redfishCriticalRetryBackoff())
|
||
if err != nil {
|
||
consecutiveMisses++
|
||
if consecutiveMisses >= missBudget {
|
||
break
|
||
}
|
||
continue
|
||
}
|
||
consecutiveMisses = 0
|
||
if !looksLikeRedfishResource(doc) {
|
||
continue
|
||
}
|
||
out[normalizeRedfishPath(path)] = doc
|
||
}
|
||
return out
|
||
}
|
||
|
||
func directNumericProbePlan(collectionPath string) (maxItems, startIndex, missBudget int) {
|
||
switch {
|
||
case strings.HasSuffix(collectionPath, "/Systems"):
|
||
return 32, 1, 8
|
||
case strings.HasSuffix(collectionPath, "/Chassis"):
|
||
return 64, 1, 12
|
||
case strings.HasSuffix(collectionPath, "/Managers"):
|
||
return 16, 1, 6
|
||
case strings.HasSuffix(collectionPath, "/Processors"):
|
||
return 32, 1, 12
|
||
case strings.HasSuffix(collectionPath, "/Memory"):
|
||
return 512, 1, 48
|
||
case strings.HasSuffix(collectionPath, "/Storage"):
|
||
return 128, 1, 24
|
||
case strings.HasSuffix(collectionPath, "/Drives"):
|
||
return 256, 0, 24
|
||
case strings.HasSuffix(collectionPath, "/Volumes"):
|
||
return 128, 1, 16
|
||
case strings.HasSuffix(collectionPath, "/PCIeDevices"):
|
||
return 256, 1, 24
|
||
case strings.HasSuffix(collectionPath, "/PCIeFunctions"):
|
||
return 512, 1, 32
|
||
case strings.HasSuffix(collectionPath, "/NetworkAdapters"):
|
||
return 128, 1, 20
|
||
case strings.HasSuffix(collectionPath, "/NetworkPorts"):
|
||
return 256, 1, 24
|
||
case strings.HasSuffix(collectionPath, "/Ports"):
|
||
return 256, 1, 24
|
||
case strings.HasSuffix(collectionPath, "/EthernetInterfaces"):
|
||
return 256, 1, 24
|
||
case strings.HasSuffix(collectionPath, "/Certificates"):
|
||
return 256, 1, 24
|
||
case strings.HasSuffix(collectionPath, "/Accounts"):
|
||
return 128, 1, 16
|
||
case strings.HasSuffix(collectionPath, "/LogServices"):
|
||
return 32, 1, 8
|
||
case strings.HasSuffix(collectionPath, "/Sensors"):
|
||
return 512, 1, 48
|
||
case strings.HasSuffix(collectionPath, "/Temperatures"):
|
||
return 256, 1, 32
|
||
case strings.HasSuffix(collectionPath, "/Fans"):
|
||
return 256, 1, 32
|
||
case strings.HasSuffix(collectionPath, "/Voltages"):
|
||
return 256, 1, 32
|
||
case strings.HasSuffix(collectionPath, "/PowerSupplies"):
|
||
return 64, 1, 16
|
||
default:
|
||
return 0, 0, 0
|
||
}
|
||
}
|
||
|
||
func looksLikeRedfishResource(doc map[string]interface{}) bool {
|
||
if len(doc) == 0 {
|
||
return false
|
||
}
|
||
if asString(doc["@odata.id"]) != "" {
|
||
return true
|
||
}
|
||
if asString(doc["Id"]) != "" || asString(doc["Name"]) != "" {
|
||
return true
|
||
}
|
||
if _, ok := doc["Status"]; ok {
|
||
return true
|
||
}
|
||
if _, ok := doc["Reading"]; ok {
|
||
return true
|
||
}
|
||
if _, ok := doc["ReadingCelsius"]; ok {
|
||
return true
|
||
}
|
||
return false
|
||
}
|
||
|
||
func shouldSlowProbeCriticalCollection(p string) bool {
|
||
p = normalizeRedfishPath(p)
|
||
for _, suffix := range []string{
|
||
"/Processors",
|
||
"/Memory",
|
||
"/Storage",
|
||
"/Drives",
|
||
"/Volumes",
|
||
"/PCIeDevices",
|
||
"/PCIeFunctions",
|
||
"/NetworkAdapters",
|
||
"/EthernetInterfaces",
|
||
"/NetworkInterfaces",
|
||
"/Sensors",
|
||
"/Fans",
|
||
"/Temperatures",
|
||
"/Voltages",
|
||
} {
|
||
if strings.HasSuffix(p, suffix) {
|
||
return true
|
||
}
|
||
}
|
||
return false
|
||
}
|
||
|
||
func redfishCriticalEndpoints(systemPaths, chassisPaths, managerPaths []string) []string {
|
||
var out []string
|
||
seen := make(map[string]struct{})
|
||
add := func(p string) {
|
||
p = normalizeRedfishPath(p)
|
||
if p == "" {
|
||
return
|
||
}
|
||
if _, ok := seen[p]; ok {
|
||
return
|
||
}
|
||
seen[p] = struct{}{}
|
||
out = append(out, p)
|
||
}
|
||
for _, p := range systemPaths {
|
||
add(p)
|
||
add(joinPath(p, "/Bios"))
|
||
add(joinPath(p, "/SecureBoot"))
|
||
add(joinPath(p, "/Oem/Public"))
|
||
add(joinPath(p, "/Oem/Public/FRU"))
|
||
add(joinPath(p, "/Oem/Public/ThermalConfig"))
|
||
add(joinPath(p, "/ThermalConfig"))
|
||
add(joinPath(p, "/Processors"))
|
||
add(joinPath(p, "/Storage"))
|
||
add(joinPath(p, "/SimpleStorage"))
|
||
add(joinPath(p, "/PCIeDevices"))
|
||
add(joinPath(p, "/Accelerators"))
|
||
add(joinPath(p, "/GraphicsControllers"))
|
||
add(joinPath(p, "/EthernetInterfaces"))
|
||
add(joinPath(p, "/NetworkInterfaces"))
|
||
}
|
||
for _, p := range chassisPaths {
|
||
add(p)
|
||
add(joinPath(p, "/Oem/Public"))
|
||
add(joinPath(p, "/Oem/Public/FRU"))
|
||
add(joinPath(p, "/Oem/Public/ThermalConfig"))
|
||
add(joinPath(p, "/ThermalConfig"))
|
||
add(joinPath(p, "/Power"))
|
||
add(joinPath(p, "/Thermal"))
|
||
add(joinPath(p, "/Sensors"))
|
||
add(joinPath(p, "/HealthSummary"))
|
||
add(joinPath(p, "/ThresholdSensors"))
|
||
add(joinPath(p, "/DiscreteSensors"))
|
||
add(joinPath(p, "/Boards"))
|
||
add(joinPath(p, "/Backplanes"))
|
||
add(joinPath(p, "/Assembly"))
|
||
add(joinPath(p, "/NetworkAdapters"))
|
||
add(joinPath(p, "/PCIeDevices"))
|
||
add(joinPath(p, "/Accelerators"))
|
||
add(joinPath(p, "/Drives"))
|
||
}
|
||
for _, p := range managerPaths {
|
||
add(p)
|
||
add(joinPath(p, "/NetworkProtocol"))
|
||
}
|
||
add("/redfish/v1/UpdateService")
|
||
add("/redfish/v1/UpdateService/FirmwareInventory")
|
||
return out
|
||
}
|
||
|
||
func redfishFetchErrorListToMap(list []map[string]interface{}) map[string]string {
|
||
out := make(map[string]string, len(list))
|
||
for _, item := range list {
|
||
p := normalizeRedfishPath(asString(item["path"]))
|
||
if p == "" {
|
||
continue
|
||
}
|
||
out[p] = asString(item["error"])
|
||
}
|
||
return out
|
||
}
|
||
|
||
func redfishFetchErrorMapToList(m map[string]string) []map[string]interface{} {
|
||
if len(m) == 0 {
|
||
return nil
|
||
}
|
||
out := make([]map[string]interface{}, 0, len(m))
|
||
for p, msg := range m {
|
||
out = append(out, map[string]interface{}{"path": p, "error": msg})
|
||
}
|
||
sort.Slice(out, func(i, j int) bool {
|
||
return asString(out[i]["path"]) < asString(out[j]["path"])
|
||
})
|
||
return out
|
||
}
|
||
|
||
func isRetryableRedfishFetchError(err error) bool {
|
||
if err == nil {
|
||
return false
|
||
}
|
||
msg := strings.ToLower(err.Error())
|
||
if strings.Contains(msg, "timeout") || strings.Contains(msg, "deadline exceeded") || strings.Contains(msg, "connection reset") || strings.Contains(msg, "unexpected eof") {
|
||
return true
|
||
}
|
||
if strings.HasPrefix(msg, "status 500 ") || strings.HasPrefix(msg, "status 502 ") || strings.HasPrefix(msg, "status 503 ") || strings.HasPrefix(msg, "status 504 ") {
|
||
return true
|
||
}
|
||
return false
|
||
}
|
||
|
||
func redfishCriticalRequestTimeout() time.Duration {
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_CRITICAL_TIMEOUT")); v != "" {
|
||
if d, err := time.ParseDuration(v); err == nil && d > 0 {
|
||
return d
|
||
}
|
||
}
|
||
return 45 * time.Second
|
||
}
|
||
|
||
func redfishCriticalRetryAttempts() int {
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_CRITICAL_RETRIES")); v != "" {
|
||
if n, err := strconv.Atoi(v); err == nil && n >= 1 && n <= 10 {
|
||
return n
|
||
}
|
||
}
|
||
return 3
|
||
}
|
||
|
||
func redfishCriticalPlanBAttempts() int {
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_CRITICAL_PLANB_RETRIES")); v != "" {
|
||
if n, err := strconv.Atoi(v); err == nil && n >= 1 && n <= 10 {
|
||
return n
|
||
}
|
||
}
|
||
return 3
|
||
}
|
||
|
||
func redfishCriticalRetryBackoff() time.Duration {
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_CRITICAL_BACKOFF")); v != "" {
|
||
if d, err := time.ParseDuration(v); err == nil && d >= 0 {
|
||
return d
|
||
}
|
||
}
|
||
return 1500 * time.Millisecond
|
||
}
|
||
|
||
func redfishCriticalCooldown() time.Duration {
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_CRITICAL_COOLDOWN")); v != "" {
|
||
if d, err := time.ParseDuration(v); err == nil && d >= 0 {
|
||
return d
|
||
}
|
||
}
|
||
return 4 * time.Second
|
||
}
|
||
|
||
func redfishCriticalSlowGap() time.Duration {
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_CRITICAL_SLOW_GAP")); v != "" {
|
||
if d, err := time.ParseDuration(v); err == nil && d >= 0 {
|
||
return d
|
||
}
|
||
}
|
||
return 1200 * time.Millisecond
|
||
}
|
||
|
||
func redfishLinkRefs(doc map[string]interface{}, topKey, nestedKey string) []string {
|
||
top, ok := doc[topKey].(map[string]interface{})
|
||
if !ok {
|
||
return nil
|
||
}
|
||
items, ok := top[nestedKey].([]interface{})
|
||
if !ok {
|
||
return nil
|
||
}
|
||
out := make([]string, 0, len(items))
|
||
for _, itemAny := range items {
|
||
item, ok := itemAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
if p := asString(item["@odata.id"]); p != "" {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
return out
|
||
}
|
||
|
||
func pcieDeviceDedupKey(dev models.PCIeDevice) string {
|
||
if bdf := strings.TrimSpace(dev.BDF); looksLikeCanonicalBDF(bdf) {
|
||
return strings.ToLower(bdf)
|
||
}
|
||
if s := strings.TrimSpace(dev.SerialNumber); s != "" {
|
||
return s
|
||
}
|
||
return firstNonEmpty(
|
||
strings.TrimSpace(dev.Slot)+"|"+strings.TrimSpace(dev.PartNumber)+"|"+strings.TrimSpace(dev.DeviceClass),
|
||
strings.TrimSpace(dev.Slot)+"|"+strings.TrimSpace(dev.DeviceClass),
|
||
strings.TrimSpace(dev.PartNumber)+"|"+strings.TrimSpace(dev.DeviceClass),
|
||
strings.TrimSpace(dev.Description)+"|"+strings.TrimSpace(dev.DeviceClass),
|
||
)
|
||
}
|
||
|
||
func looksLikeCanonicalBDF(bdf string) bool {
|
||
bdf = strings.TrimSpace(strings.ToLower(bdf))
|
||
if bdf == "" {
|
||
return false
|
||
}
|
||
// Accept common forms: 0000:65:00.0 or 65:00.0
|
||
if strings.Count(bdf, ":") == 2 && strings.Contains(bdf, ".") {
|
||
return true
|
||
}
|
||
if strings.Count(bdf, ":") == 1 && strings.Contains(bdf, ".") {
|
||
return true
|
||
}
|
||
return false
|
||
}
|
||
|
||
func shouldCrawlPath(path string) bool {
|
||
if path == "" {
|
||
return false
|
||
}
|
||
normalized := normalizeRedfishPath(path)
|
||
if strings.Contains(normalized, "/Memory/") {
|
||
after := strings.SplitN(normalized, "/Memory/", 2)
|
||
if len(after) == 2 && strings.Count(after[1], "/") >= 1 {
|
||
// Keep direct DIMM resources (/Memory/<slot>) but skip nested subresources
|
||
// like /Memory/<slot>/Assembly and /Memory/<slot>/MemoryMetrics.
|
||
return false
|
||
}
|
||
}
|
||
heavyParts := []string{
|
||
"/JsonSchemas",
|
||
"/LogServices/",
|
||
"/Entries/",
|
||
"/TelemetryService/",
|
||
"/MetricReports/",
|
||
"/SessionService/Sessions",
|
||
"/TaskService/Tasks",
|
||
}
|
||
for _, part := range heavyParts {
|
||
if strings.Contains(path, part) {
|
||
return false
|
||
}
|
||
}
|
||
return true
|
||
}
|
||
|
||
func (c *RedfishConnector) getLinkedPCIeFunctions(ctx context.Context, client *http.Client, req Request, baseURL string, doc map[string]interface{}) []map[string]interface{} {
|
||
// Newer Redfish payloads often keep function references in Links.PCIeFunctions.
|
||
if links, ok := doc["Links"].(map[string]interface{}); ok {
|
||
if refs, ok := links["PCIeFunctions"].([]interface{}); ok && len(refs) > 0 {
|
||
out := make([]map[string]interface{}, 0, len(refs))
|
||
for _, refAny := range refs {
|
||
ref, ok := refAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
memberPath := asString(ref["@odata.id"])
|
||
if memberPath == "" {
|
||
continue
|
||
}
|
||
memberDoc, err := c.getJSON(ctx, client, req, baseURL, memberPath)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
out = append(out, memberDoc)
|
||
}
|
||
return out
|
||
}
|
||
}
|
||
|
||
// Some implementations expose a collection object in PCIeFunctions.@odata.id.
|
||
if pcieFunctions, ok := doc["PCIeFunctions"].(map[string]interface{}); ok {
|
||
if collectionPath := asString(pcieFunctions["@odata.id"]); collectionPath != "" {
|
||
memberDocs, err := c.getCollectionMembers(ctx, client, req, baseURL, collectionPath)
|
||
if err == nil {
|
||
return memberDocs
|
||
}
|
||
}
|
||
}
|
||
|
||
return nil
|
||
}
|
||
|
||
func (c *RedfishConnector) getCollectionMembers(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath string) ([]map[string]interface{}, error) {
|
||
collection, err := c.getJSON(ctx, client, req, baseURL, collectionPath)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
|
||
refs, ok := collection["Members"].([]interface{})
|
||
if !ok || len(refs) == 0 {
|
||
return []map[string]interface{}{}, nil
|
||
}
|
||
|
||
out := make([]map[string]interface{}, 0, len(refs))
|
||
for _, refAny := range refs {
|
||
ref, ok := refAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
memberPath := asString(ref["@odata.id"])
|
||
if memberPath == "" {
|
||
continue
|
||
}
|
||
memberDoc, err := c.getJSON(ctx, client, req, baseURL, memberPath)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
out = append(out, memberDoc)
|
||
}
|
||
return out, nil
|
||
}
|
||
|
||
func (c *RedfishConnector) getJSON(ctx context.Context, client *http.Client, req Request, baseURL, requestPath string) (map[string]interface{}, error) {
|
||
start := time.Now()
|
||
rel := requestPath
|
||
if rel == "" {
|
||
rel = "/"
|
||
}
|
||
if !strings.HasPrefix(rel, "/") {
|
||
rel = "/" + rel
|
||
}
|
||
|
||
u, err := url.Parse(baseURL)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
u.Path = path.Join(strings.TrimSuffix(u.Path, "/"), rel)
|
||
|
||
httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
httpReq.Header.Set("Accept", "application/json")
|
||
|
||
switch req.AuthType {
|
||
case "password":
|
||
httpReq.SetBasicAuth(req.Username, req.Password)
|
||
case "token":
|
||
httpReq.Header.Set("X-Auth-Token", req.Token)
|
||
httpReq.Header.Set("Authorization", "Bearer "+req.Token)
|
||
}
|
||
|
||
resp, err := client.Do(httpReq)
|
||
if err != nil {
|
||
c.debugf("http get path=%s error=%v dur=%s", requestPath, err, time.Since(start).Round(time.Millisecond))
|
||
return nil, err
|
||
}
|
||
defer resp.Body.Close()
|
||
|
||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||
body, _ := io.ReadAll(io.LimitReader(resp.Body, 512))
|
||
err := fmt.Errorf("status %d from %s: %s", resp.StatusCode, requestPath, strings.TrimSpace(string(body)))
|
||
c.debugf("http get path=%s status=%d dur=%s", requestPath, resp.StatusCode, time.Since(start).Round(time.Millisecond))
|
||
return nil, err
|
||
}
|
||
|
||
var doc map[string]interface{}
|
||
dec := json.NewDecoder(resp.Body)
|
||
dec.UseNumber()
|
||
if err := dec.Decode(&doc); err != nil {
|
||
c.debugf("http get path=%s decode_error=%v dur=%s", requestPath, err, time.Since(start).Round(time.Millisecond))
|
||
return nil, err
|
||
}
|
||
c.debugf("http get path=%s status=%d dur=%s", requestPath, resp.StatusCode, time.Since(start).Round(time.Millisecond))
|
||
|
||
return doc, nil
|
||
}
|
||
|
||
func (c *RedfishConnector) getJSONWithRetry(ctx context.Context, client *http.Client, req Request, baseURL, requestPath string, attempts int, backoff time.Duration) (map[string]interface{}, error) {
|
||
if attempts < 1 {
|
||
attempts = 1
|
||
}
|
||
var lastErr error
|
||
for i := 0; i < attempts; i++ {
|
||
doc, err := c.getJSON(ctx, client, req, baseURL, requestPath)
|
||
if err == nil {
|
||
return doc, nil
|
||
}
|
||
lastErr = err
|
||
if i == attempts-1 || !isRetryableRedfishFetchError(err) {
|
||
break
|
||
}
|
||
if backoff > 0 {
|
||
select {
|
||
case <-time.After(backoff * time.Duration(i+1)):
|
||
case <-ctx.Done():
|
||
return nil, ctx.Err()
|
||
}
|
||
}
|
||
}
|
||
return nil, lastErr
|
||
}
|
||
|
||
func (c *RedfishConnector) collectCriticalRedfishDocsSequential(ctx context.Context, client *http.Client, req Request, baseURL string, paths []string) (map[string]interface{}, map[string]string) {
|
||
docs := make(map[string]interface{})
|
||
errs := make(map[string]string)
|
||
for _, p := range paths {
|
||
doc, err := c.getJSONWithRetry(ctx, client, req, baseURL, p, redfishCriticalRetryAttempts(), redfishCriticalRetryBackoff())
|
||
if err != nil {
|
||
errs[p] = err.Error()
|
||
continue
|
||
}
|
||
docs[p] = doc
|
||
// For critical collections, eagerly fetch members sequentially with the same slow policy.
|
||
if members, ok := c.collectCriticalCollectionMembersSequential(ctx, client, req, baseURL, p, doc); ok {
|
||
for mp, md := range members {
|
||
docs[mp] = md
|
||
}
|
||
}
|
||
}
|
||
return docs, errs
|
||
}
|
||
|
||
func (c *RedfishConnector) collectCriticalCollectionMembersSequential(ctx context.Context, client *http.Client, req Request, baseURL, collectionPath string, collectionDoc map[string]interface{}) (map[string]interface{}, bool) {
|
||
refs, ok := collectionDoc["Members"].([]interface{})
|
||
if !ok || len(refs) == 0 {
|
||
return nil, false
|
||
}
|
||
out := make(map[string]interface{})
|
||
for _, refAny := range refs {
|
||
ref, ok := refAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
memberPath := normalizeRedfishPath(asString(ref["@odata.id"]))
|
||
if memberPath == "" {
|
||
continue
|
||
}
|
||
doc, err := c.getJSONWithRetry(ctx, client, req, baseURL, memberPath, redfishCriticalRetryAttempts(), redfishCriticalRetryBackoff())
|
||
if err != nil {
|
||
continue
|
||
}
|
||
out[memberPath] = doc
|
||
}
|
||
return out, true
|
||
}
|
||
|
||
func (c *RedfishConnector) recoverCriticalRedfishDocsPlanB(ctx context.Context, client *http.Client, req Request, baseURL string, criticalPaths []string, rawTree map[string]interface{}, fetchErrs map[string]string, emit ProgressFn) int {
|
||
var targets []string
|
||
for _, p := range criticalPaths {
|
||
p = normalizeRedfishPath(p)
|
||
if p == "" {
|
||
continue
|
||
}
|
||
if _, ok := rawTree[p]; ok {
|
||
continue
|
||
}
|
||
errMsg, hasErr := fetchErrs[p]
|
||
if !hasErr || !isRetryableRedfishFetchError(fmt.Errorf("%s", errMsg)) {
|
||
continue
|
||
}
|
||
targets = append(targets, p)
|
||
}
|
||
if len(targets) == 0 {
|
||
return 0
|
||
}
|
||
if emit != nil {
|
||
totalETA := redfishCriticalCooldown() + estimatePlanBETA(len(targets))
|
||
emit(Progress{
|
||
Status: "running",
|
||
Progress: 97,
|
||
Message: fmt.Sprintf("Redfish: cooldown перед повторным добором критичных endpoint... ETA≈%s", formatETA(totalETA)),
|
||
})
|
||
}
|
||
select {
|
||
case <-time.After(redfishCriticalCooldown()):
|
||
case <-ctx.Done():
|
||
return 0
|
||
}
|
||
|
||
recovered := 0
|
||
for i, p := range targets {
|
||
if emit != nil {
|
||
remaining := len(targets) - i
|
||
emit(Progress{
|
||
Status: "running",
|
||
Progress: 97,
|
||
Message: fmt.Sprintf("Redfish: plan-B (%d/%d, ETA≈%s) %s", i+1, len(targets), formatETA(estimatePlanBETA(remaining)), compactProgressPath(p)),
|
||
})
|
||
}
|
||
if i > 0 {
|
||
select {
|
||
case <-time.After(redfishCriticalSlowGap()):
|
||
case <-ctx.Done():
|
||
return recovered
|
||
}
|
||
}
|
||
doc, err := c.getJSONWithRetry(ctx, client, req, baseURL, p, redfishCriticalPlanBAttempts(), redfishCriticalRetryBackoff())
|
||
if err == nil {
|
||
rawTree[p] = doc
|
||
delete(fetchErrs, p)
|
||
recovered++
|
||
if members, ok := c.collectCriticalCollectionMembersSequential(ctx, client, req, baseURL, p, doc); ok {
|
||
for mp, md := range members {
|
||
if _, exists := rawTree[mp]; !exists {
|
||
rawTree[mp] = md
|
||
recovered++
|
||
}
|
||
}
|
||
}
|
||
if shouldSlowProbeCriticalCollection(p) {
|
||
if children := c.probeDirectRedfishCollectionChildrenSlow(ctx, client, req, baseURL, p); len(children) > 0 {
|
||
for cp, cd := range children {
|
||
if _, exists := rawTree[cp]; exists {
|
||
continue
|
||
}
|
||
rawTree[cp] = cd
|
||
recovered++
|
||
}
|
||
}
|
||
}
|
||
continue
|
||
}
|
||
fetchErrs[p] = err.Error()
|
||
// If collection endpoint times out, still try direct child probing for common numeric paths.
|
||
if shouldSlowProbeCriticalCollection(p) {
|
||
if children := c.probeDirectRedfishCollectionChildrenSlow(ctx, client, req, baseURL, p); len(children) > 0 {
|
||
for cp, cd := range children {
|
||
if _, exists := rawTree[cp]; exists {
|
||
continue
|
||
}
|
||
rawTree[cp] = cd
|
||
recovered++
|
||
}
|
||
delete(fetchErrs, p)
|
||
}
|
||
}
|
||
}
|
||
return recovered
|
||
}
|
||
|
||
func parseBoardInfo(system map[string]interface{}) models.BoardInfo {
|
||
return models.BoardInfo{
|
||
Manufacturer: normalizeRedfishIdentityField(asString(system["Manufacturer"])),
|
||
ProductName: normalizeRedfishIdentityField(firstNonEmpty(
|
||
asString(system["Model"]),
|
||
asString(system["ProductName"]),
|
||
asString(system["Name"]),
|
||
)),
|
||
SerialNumber: normalizeRedfishIdentityField(asString(system["SerialNumber"])),
|
||
PartNumber: normalizeRedfishIdentityField(asString(system["PartNumber"])),
|
||
UUID: normalizeRedfishIdentityField(asString(system["UUID"])),
|
||
}
|
||
}
|
||
|
||
func parseBoardInfoWithFallback(system, chassis, fru map[string]interface{}) models.BoardInfo {
|
||
board := parseBoardInfo(system)
|
||
chassisBoard := parseBoardInfo(chassis)
|
||
fruBoard := parseBoardInfoFromFRUDoc(fru)
|
||
|
||
if board.Manufacturer == "" {
|
||
board.Manufacturer = firstNonEmpty(chassisBoard.Manufacturer, fruBoard.Manufacturer)
|
||
}
|
||
if board.ProductName == "" {
|
||
board.ProductName = firstNonEmpty(chassisBoard.ProductName, fruBoard.ProductName)
|
||
}
|
||
if board.SerialNumber == "" {
|
||
board.SerialNumber = firstNonEmpty(chassisBoard.SerialNumber, fruBoard.SerialNumber)
|
||
}
|
||
if board.PartNumber == "" {
|
||
board.PartNumber = firstNonEmpty(chassisBoard.PartNumber, fruBoard.PartNumber)
|
||
}
|
||
if board.UUID == "" {
|
||
board.UUID = chassisBoard.UUID
|
||
}
|
||
return board
|
||
}
|
||
|
||
func parseBoardInfoFromFRUDoc(doc map[string]interface{}) models.BoardInfo {
|
||
if len(doc) == 0 {
|
||
return models.BoardInfo{}
|
||
}
|
||
return models.BoardInfo{
|
||
Manufacturer: findFirstNormalizedStringByKeys(doc, "Manufacturer", "BoardManufacturer", "Vendor"),
|
||
ProductName: findFirstNormalizedStringByKeys(doc, "ProductName", "BoardName", "PlatformId", "PlatformName", "MachineTypeModel", "Model"),
|
||
SerialNumber: findFirstNormalizedStringByKeys(doc, "SerialNumber", "BoardSerialNumber"),
|
||
PartNumber: findFirstNormalizedStringByKeys(doc, "PartNumber", "BoardPartNumber", "ProductPartNumber"),
|
||
}
|
||
}
|
||
|
||
func findFirstNormalizedStringByKeys(doc map[string]interface{}, keys ...string) string {
|
||
if len(doc) == 0 || len(keys) == 0 {
|
||
return ""
|
||
}
|
||
keySet := make(map[string]struct{}, len(keys))
|
||
for _, key := range keys {
|
||
k := strings.ToLower(strings.TrimSpace(key))
|
||
if k != "" {
|
||
keySet[k] = struct{}{}
|
||
}
|
||
}
|
||
|
||
stack := []any{doc}
|
||
for len(stack) > 0 {
|
||
last := len(stack) - 1
|
||
node := stack[last]
|
||
stack = stack[:last]
|
||
|
||
switch v := node.(type) {
|
||
case map[string]interface{}:
|
||
for k, raw := range v {
|
||
if _, ok := keySet[strings.ToLower(strings.TrimSpace(k))]; ok {
|
||
if s, ok := raw.(string); ok {
|
||
if normalized := normalizeRedfishIdentityField(s); normalized != "" {
|
||
return normalized
|
||
}
|
||
}
|
||
}
|
||
switch nested := raw.(type) {
|
||
case map[string]interface{}, []interface{}:
|
||
stack = append(stack, nested)
|
||
}
|
||
}
|
||
case []interface{}:
|
||
for _, item := range v {
|
||
switch nested := item.(type) {
|
||
case map[string]interface{}, []interface{}:
|
||
stack = append(stack, nested)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
return ""
|
||
}
|
||
|
||
func parseCPUs(docs []map[string]interface{}) []models.CPU {
|
||
cpus := make([]models.CPU, 0, len(docs))
|
||
for idx, doc := range docs {
|
||
cpus = append(cpus, models.CPU{
|
||
Socket: idx,
|
||
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
|
||
Cores: asInt(doc["TotalCores"]),
|
||
Threads: asInt(doc["TotalThreads"]),
|
||
FrequencyMHz: asInt(doc["OperatingSpeedMHz"]),
|
||
MaxFreqMHz: asInt(doc["MaxSpeedMHz"]),
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
})
|
||
}
|
||
return cpus
|
||
}
|
||
|
||
func parseMemory(docs []map[string]interface{}) []models.MemoryDIMM {
|
||
out := make([]models.MemoryDIMM, 0, len(docs))
|
||
for _, doc := range docs {
|
||
slot := firstNonEmpty(asString(doc["DeviceLocator"]), asString(doc["Name"]), asString(doc["Id"]))
|
||
present := true
|
||
if strings.EqualFold(asString(doc["Status"]), "Absent") {
|
||
present = false
|
||
}
|
||
if status, ok := doc["Status"].(map[string]interface{}); ok {
|
||
state := asString(status["State"])
|
||
if strings.EqualFold(state, "Absent") || strings.EqualFold(state, "Disabled") {
|
||
present = false
|
||
}
|
||
}
|
||
|
||
out = append(out, models.MemoryDIMM{
|
||
Slot: slot,
|
||
Location: slot,
|
||
Present: present,
|
||
SizeMB: asInt(doc["CapacityMiB"]),
|
||
Type: firstNonEmpty(asString(doc["MemoryDeviceType"]), asString(doc["MemoryType"])),
|
||
MaxSpeedMHz: asInt(doc["MaxSpeedMHz"]),
|
||
CurrentSpeedMHz: asInt(doc["OperatingSpeedMhz"]),
|
||
Manufacturer: asString(doc["Manufacturer"]),
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
PartNumber: asString(doc["PartNumber"]),
|
||
Status: mapStatus(doc["Status"]),
|
||
})
|
||
}
|
||
return out
|
||
}
|
||
|
||
func parseDrive(doc map[string]interface{}) models.Storage {
|
||
sizeGB := 0
|
||
if capBytes := asInt64(doc["CapacityBytes"]); capBytes > 0 {
|
||
sizeGB = int(capBytes / (1024 * 1024 * 1024))
|
||
}
|
||
if sizeGB == 0 {
|
||
sizeGB = asInt(doc["CapacityGB"])
|
||
}
|
||
if sizeGB == 0 {
|
||
sizeGB = asInt(doc["CapacityMiB"]) / 1024
|
||
}
|
||
|
||
storageType := classifyStorageType(doc)
|
||
|
||
return models.Storage{
|
||
Slot: firstNonEmpty(asString(doc["Id"]), asString(doc["Name"])),
|
||
Type: storageType,
|
||
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
|
||
SizeGB: sizeGB,
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
Manufacturer: asString(doc["Manufacturer"]),
|
||
Firmware: asString(doc["Revision"]),
|
||
Interface: asString(doc["Protocol"]),
|
||
Present: true,
|
||
}
|
||
}
|
||
|
||
func parseStorageVolume(doc map[string]interface{}, controller string) models.StorageVolume {
|
||
sizeGB := 0
|
||
capBytes := asInt64(doc["CapacityBytes"])
|
||
if capBytes > 0 {
|
||
sizeGB = int(capBytes / (1024 * 1024 * 1024))
|
||
}
|
||
if sizeGB == 0 {
|
||
sizeGB = asInt(doc["CapacityGB"])
|
||
}
|
||
raidLevel := firstNonEmpty(asString(doc["RAIDType"]), asString(doc["VolumeType"]))
|
||
if raidLevel == "" {
|
||
if v, ok := doc["Oem"].(map[string]interface{}); ok {
|
||
if smc, ok := v["Supermicro"].(map[string]interface{}); ok {
|
||
raidLevel = firstNonEmpty(raidLevel, asString(smc["RAIDType"]), asString(smc["VolumeType"]))
|
||
}
|
||
}
|
||
}
|
||
return models.StorageVolume{
|
||
ID: asString(doc["Id"]),
|
||
Name: firstNonEmpty(asString(doc["Name"]), asString(doc["Id"])),
|
||
Controller: strings.TrimSpace(controller),
|
||
RAIDLevel: raidLevel,
|
||
SizeGB: sizeGB,
|
||
CapacityBytes: capBytes,
|
||
Status: mapStatus(doc["Status"]),
|
||
Bootable: asBool(doc["Bootable"]),
|
||
Encrypted: asBool(doc["Encrypted"]),
|
||
}
|
||
}
|
||
|
||
func parseNIC(doc map[string]interface{}) models.NetworkAdapter {
|
||
vendorID := asHexOrInt(doc["VendorId"])
|
||
deviceID := asHexOrInt(doc["DeviceId"])
|
||
model := firstNonEmpty(asString(doc["Model"]), asString(doc["Name"]))
|
||
if isMissingOrRawPCIModel(model) {
|
||
if resolved := pciids.DeviceName(vendorID, deviceID); resolved != "" {
|
||
model = resolved
|
||
}
|
||
}
|
||
vendor := asString(doc["Manufacturer"])
|
||
if strings.TrimSpace(vendor) == "" {
|
||
vendor = pciids.VendorName(vendorID)
|
||
}
|
||
location := redfishLocationLabel(doc["Location"])
|
||
var firmware string
|
||
var portCount int
|
||
if controllers, ok := doc["Controllers"].([]interface{}); ok && len(controllers) > 0 {
|
||
if ctrl, ok := controllers[0].(map[string]interface{}); ok {
|
||
location = firstNonEmpty(location, redfishLocationLabel(ctrl["Location"]))
|
||
firmware = asString(ctrl["FirmwarePackageVersion"])
|
||
if caps, ok := ctrl["ControllerCapabilities"].(map[string]interface{}); ok {
|
||
portCount = asInt(caps["NetworkPortCount"])
|
||
}
|
||
}
|
||
}
|
||
|
||
return models.NetworkAdapter{
|
||
Slot: firstNonEmpty(asString(doc["Id"]), asString(doc["Name"])),
|
||
Location: location,
|
||
Present: !strings.EqualFold(mapStatus(doc["Status"]), "Absent"),
|
||
Model: strings.TrimSpace(model),
|
||
Vendor: strings.TrimSpace(vendor),
|
||
VendorID: vendorID,
|
||
DeviceID: deviceID,
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
PartNumber: asString(doc["PartNumber"]),
|
||
Firmware: firmware,
|
||
PortCount: portCount,
|
||
Status: mapStatus(doc["Status"]),
|
||
}
|
||
}
|
||
|
||
func networkAdapterPCIeDevicePaths(doc map[string]interface{}) []string {
|
||
var out []string
|
||
if controllers, ok := doc["Controllers"].([]interface{}); ok {
|
||
for _, ctrlAny := range controllers {
|
||
ctrl, ok := ctrlAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
links, ok := ctrl["Links"].(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
refs, ok := links["PCIeDevices"].([]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
for _, refAny := range refs {
|
||
ref, ok := refAny.(map[string]interface{})
|
||
if !ok {
|
||
continue
|
||
}
|
||
if p := asString(ref["@odata.id"]); p != "" {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
return out
|
||
}
|
||
|
||
func enrichNICFromPCIe(nic *models.NetworkAdapter, pcieDoc map[string]interface{}, functionDocs []map[string]interface{}) {
|
||
if nic == nil {
|
||
return
|
||
}
|
||
if nic.VendorID == 0 {
|
||
nic.VendorID = asHexOrInt(pcieDoc["VendorId"])
|
||
}
|
||
if nic.DeviceID == 0 {
|
||
nic.DeviceID = asHexOrInt(pcieDoc["DeviceId"])
|
||
}
|
||
for _, fn := range functionDocs {
|
||
if nic.VendorID == 0 {
|
||
nic.VendorID = asHexOrInt(fn["VendorId"])
|
||
}
|
||
if nic.DeviceID == 0 {
|
||
nic.DeviceID = asHexOrInt(fn["DeviceId"])
|
||
}
|
||
}
|
||
if strings.TrimSpace(nic.Vendor) == "" {
|
||
nic.Vendor = pciids.VendorName(nic.VendorID)
|
||
}
|
||
if isMissingOrRawPCIModel(nic.Model) {
|
||
if resolved := pciids.DeviceName(nic.VendorID, nic.DeviceID); resolved != "" {
|
||
nic.Model = resolved
|
||
}
|
||
}
|
||
}
|
||
|
||
func parsePSU(doc map[string]interface{}, idx int) models.PSU {
|
||
status := mapStatus(doc["Status"])
|
||
present := true
|
||
if statusMap, ok := doc["Status"].(map[string]interface{}); ok {
|
||
state := asString(statusMap["State"])
|
||
if strings.EqualFold(state, "Absent") || strings.EqualFold(state, "Disabled") {
|
||
present = false
|
||
}
|
||
}
|
||
|
||
slot := firstNonEmpty(
|
||
asString(doc["MemberId"]),
|
||
asString(doc["Id"]),
|
||
asString(doc["Name"]),
|
||
)
|
||
if slot == "" {
|
||
slot = fmt.Sprintf("PSU%d", idx)
|
||
}
|
||
|
||
return models.PSU{
|
||
Slot: slot,
|
||
Present: present,
|
||
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
|
||
Vendor: asString(doc["Manufacturer"]),
|
||
WattageW: asInt(doc["PowerCapacityWatts"]),
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
PartNumber: asString(doc["PartNumber"]),
|
||
Firmware: asString(doc["FirmwareVersion"]),
|
||
Status: status,
|
||
InputType: asString(doc["LineInputVoltageType"]),
|
||
InputPowerW: asInt(doc["PowerInputWatts"]),
|
||
OutputPowerW: asInt(doc["LastPowerOutputWatts"]),
|
||
InputVoltage: asFloat(doc["LineInputVoltage"]),
|
||
}
|
||
}
|
||
|
||
func parseGPU(doc map[string]interface{}, functionDocs []map[string]interface{}, idx int) models.GPU {
|
||
slot := firstNonEmpty(
|
||
redfishLocationLabel(doc["Slot"]),
|
||
redfishLocationLabel(doc["Location"]),
|
||
redfishLocationLabel(doc["PhysicalLocation"]),
|
||
asString(doc["Name"]),
|
||
asString(doc["Id"]),
|
||
)
|
||
if slot == "" {
|
||
slot = fmt.Sprintf("GPU%d", idx)
|
||
}
|
||
|
||
gpu := models.GPU{
|
||
Slot: slot,
|
||
Location: firstNonEmpty(redfishLocationLabel(doc["Location"]), redfishLocationLabel(doc["PhysicalLocation"])),
|
||
Model: firstNonEmpty(asString(doc["Model"]), asString(doc["Name"])),
|
||
Manufacturer: asString(doc["Manufacturer"]),
|
||
SerialNumber: strings.TrimSpace(asString(doc["SerialNumber"])),
|
||
PartNumber: asString(doc["PartNumber"]),
|
||
Firmware: asString(doc["FirmwareVersion"]),
|
||
Status: mapStatus(doc["Status"]),
|
||
}
|
||
|
||
if bdf := asString(doc["BDF"]); bdf != "" {
|
||
gpu.BDF = bdf
|
||
}
|
||
if gpu.BDF == "" {
|
||
gpu.BDF = buildBDFfromOemPublic(doc)
|
||
}
|
||
if gpu.VendorID == 0 {
|
||
gpu.VendorID = asHexOrInt(doc["VendorId"])
|
||
}
|
||
if gpu.DeviceID == 0 {
|
||
gpu.DeviceID = asHexOrInt(doc["DeviceId"])
|
||
}
|
||
|
||
for _, fn := range functionDocs {
|
||
if gpu.BDF == "" {
|
||
gpu.BDF = asString(fn["FunctionId"])
|
||
}
|
||
if gpu.VendorID == 0 {
|
||
gpu.VendorID = asHexOrInt(fn["VendorId"])
|
||
}
|
||
if gpu.DeviceID == 0 {
|
||
gpu.DeviceID = asHexOrInt(fn["DeviceId"])
|
||
}
|
||
if gpu.MaxLinkWidth == 0 {
|
||
gpu.MaxLinkWidth = asInt(fn["MaxLinkWidth"])
|
||
}
|
||
if gpu.CurrentLinkWidth == 0 {
|
||
gpu.CurrentLinkWidth = asInt(fn["CurrentLinkWidth"])
|
||
}
|
||
if gpu.MaxLinkSpeed == "" {
|
||
gpu.MaxLinkSpeed = firstNonEmpty(asString(fn["MaxLinkSpeedGTs"]), asString(fn["MaxLinkSpeed"]))
|
||
}
|
||
if gpu.CurrentLinkSpeed == "" {
|
||
gpu.CurrentLinkSpeed = firstNonEmpty(asString(fn["CurrentLinkSpeedGTs"]), asString(fn["CurrentLinkSpeed"]))
|
||
}
|
||
}
|
||
|
||
if isMissingOrRawPCIModel(gpu.Model) {
|
||
if resolved := pciids.DeviceName(gpu.VendorID, gpu.DeviceID); resolved != "" {
|
||
gpu.Model = resolved
|
||
}
|
||
}
|
||
if strings.TrimSpace(gpu.Manufacturer) == "" {
|
||
gpu.Manufacturer = pciids.VendorName(gpu.VendorID)
|
||
}
|
||
|
||
return gpu
|
||
}
|
||
|
||
func parsePCIeDevice(doc map[string]interface{}, functionDocs []map[string]interface{}) models.PCIeDevice {
|
||
dev := models.PCIeDevice{
|
||
Slot: firstNonEmpty(redfishLocationLabel(doc["Slot"]), redfishLocationLabel(doc["Location"]), asString(doc["Name"]), asString(doc["Id"])),
|
||
BDF: asString(doc["BDF"]),
|
||
DeviceClass: asString(doc["DeviceType"]),
|
||
Manufacturer: asString(doc["Manufacturer"]),
|
||
PartNumber: asString(doc["PartNumber"]),
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
VendorID: asHexOrInt(doc["VendorId"]),
|
||
DeviceID: asHexOrInt(doc["DeviceId"]),
|
||
}
|
||
if strings.TrimSpace(dev.BDF) == "" {
|
||
dev.BDF = buildBDFfromOemPublic(doc)
|
||
}
|
||
|
||
for _, fn := range functionDocs {
|
||
if dev.BDF == "" {
|
||
dev.BDF = asString(fn["FunctionId"])
|
||
}
|
||
if dev.DeviceClass == "" || isGenericPCIeClassLabel(dev.DeviceClass) {
|
||
dev.DeviceClass = firstNonEmpty(asString(fn["DeviceClass"]), asString(fn["ClassCode"]))
|
||
}
|
||
if dev.VendorID == 0 {
|
||
dev.VendorID = asHexOrInt(fn["VendorId"])
|
||
}
|
||
if dev.DeviceID == 0 {
|
||
dev.DeviceID = asHexOrInt(fn["DeviceId"])
|
||
}
|
||
if dev.LinkWidth == 0 {
|
||
dev.LinkWidth = asInt(fn["CurrentLinkWidth"])
|
||
}
|
||
if dev.MaxLinkWidth == 0 {
|
||
dev.MaxLinkWidth = asInt(fn["MaxLinkWidth"])
|
||
}
|
||
if dev.LinkSpeed == "" {
|
||
dev.LinkSpeed = firstNonEmpty(asString(fn["CurrentLinkSpeedGTs"]), asString(fn["CurrentLinkSpeed"]))
|
||
}
|
||
if dev.MaxLinkSpeed == "" {
|
||
dev.MaxLinkSpeed = firstNonEmpty(asString(fn["MaxLinkSpeedGTs"]), asString(fn["MaxLinkSpeed"]))
|
||
}
|
||
}
|
||
|
||
if dev.DeviceClass == "" {
|
||
dev.DeviceClass = "PCIe device"
|
||
}
|
||
if isGenericPCIeClassLabel(dev.DeviceClass) {
|
||
if resolved := pciids.DeviceName(dev.VendorID, dev.DeviceID); resolved != "" {
|
||
dev.DeviceClass = resolved
|
||
}
|
||
}
|
||
if isGenericPCIeClassLabel(dev.DeviceClass) {
|
||
// Redfish DeviceType (e.g. MultiFunction/Simulated) is a topology attribute,
|
||
// not a user-facing device name. Prefer model/part labels when class cannot be resolved.
|
||
dev.DeviceClass = firstNonEmpty(asString(doc["Model"]), dev.PartNumber, dev.DeviceClass)
|
||
}
|
||
if strings.TrimSpace(dev.Manufacturer) == "" {
|
||
dev.Manufacturer = pciids.VendorName(dev.VendorID)
|
||
}
|
||
if strings.TrimSpace(dev.PartNumber) == "" {
|
||
dev.PartNumber = pciids.DeviceName(dev.VendorID, dev.DeviceID)
|
||
}
|
||
return dev
|
||
}
|
||
|
||
func parsePCIeFunction(doc map[string]interface{}, idx int) models.PCIeDevice {
|
||
slot := firstNonEmpty(redfishLocationLabel(doc["Location"]), asString(doc["Id"]), asString(doc["Name"]))
|
||
if slot == "" {
|
||
slot = fmt.Sprintf("PCIeFn%d", idx)
|
||
}
|
||
|
||
dev := models.PCIeDevice{
|
||
Slot: slot,
|
||
BDF: asString(doc["FunctionId"]),
|
||
VendorID: asHexOrInt(doc["VendorId"]),
|
||
DeviceID: asHexOrInt(doc["DeviceId"]),
|
||
DeviceClass: firstNonEmpty(asString(doc["DeviceClass"]), asString(doc["ClassCode"]), "PCIe device"),
|
||
Manufacturer: asString(doc["Manufacturer"]),
|
||
SerialNumber: asString(doc["SerialNumber"]),
|
||
LinkWidth: asInt(doc["CurrentLinkWidth"]),
|
||
LinkSpeed: firstNonEmpty(asString(doc["CurrentLinkSpeedGTs"]), asString(doc["CurrentLinkSpeed"])),
|
||
MaxLinkWidth: asInt(doc["MaxLinkWidth"]),
|
||
MaxLinkSpeed: firstNonEmpty(asString(doc["MaxLinkSpeedGTs"]), asString(doc["MaxLinkSpeed"])),
|
||
}
|
||
if isGenericPCIeClassLabel(dev.DeviceClass) {
|
||
if resolved := pciids.DeviceName(dev.VendorID, dev.DeviceID); resolved != "" {
|
||
dev.DeviceClass = resolved
|
||
}
|
||
}
|
||
if strings.TrimSpace(dev.Manufacturer) == "" {
|
||
dev.Manufacturer = pciids.VendorName(dev.VendorID)
|
||
}
|
||
if strings.TrimSpace(dev.PartNumber) == "" {
|
||
dev.PartNumber = pciids.DeviceName(dev.VendorID, dev.DeviceID)
|
||
}
|
||
return dev
|
||
}
|
||
|
||
func isMissingOrRawPCIModel(model string) bool {
|
||
model = strings.TrimSpace(model)
|
||
if model == "" {
|
||
return true
|
||
}
|
||
l := strings.ToLower(model)
|
||
if l == "unknown" || l == "n/a" || l == "na" || l == "none" {
|
||
return true
|
||
}
|
||
if strings.HasPrefix(l, "0x") && len(l) <= 6 {
|
||
return true
|
||
}
|
||
if len(model) <= 4 {
|
||
isHex := true
|
||
for _, c := range l {
|
||
if (c < '0' || c > '9') && (c < 'a' || c > 'f') {
|
||
isHex = false
|
||
break
|
||
}
|
||
}
|
||
if isHex {
|
||
return true
|
||
}
|
||
}
|
||
return false
|
||
}
|
||
|
||
func isGenericPCIeClassLabel(v string) bool {
|
||
switch strings.ToLower(strings.TrimSpace(v)) {
|
||
case "", "pcie device", "display", "display controller", "vga", "3d controller", "network", "network controller", "storage", "storage controller", "other", "unknown", "singlefunction", "multifunction", "simulated":
|
||
return true
|
||
default:
|
||
return strings.HasPrefix(strings.ToLower(strings.TrimSpace(v)), "0x")
|
||
}
|
||
}
|
||
|
||
func buildBDFfromOemPublic(doc map[string]interface{}) string {
|
||
if len(doc) == 0 {
|
||
return ""
|
||
}
|
||
oem, ok := doc["Oem"].(map[string]interface{})
|
||
if !ok {
|
||
return ""
|
||
}
|
||
public, ok := oem["Public"].(map[string]interface{})
|
||
if !ok {
|
||
return ""
|
||
}
|
||
|
||
bus := asHexOrInt(public["BusNumber"])
|
||
dev := asHexOrInt(public["DeviceNumber"])
|
||
fn := asHexOrInt(public["FunctionNumber"])
|
||
if bus < 0 || dev < 0 || fn < 0 {
|
||
return ""
|
||
}
|
||
segment := asHexOrInt(public["Segment"])
|
||
if segment < 0 {
|
||
segment = 0
|
||
}
|
||
// Require at least bus + dev numbers to avoid inventing meaningless BDFs.
|
||
if bus == 0 && dev == 0 && fn == 0 {
|
||
return ""
|
||
}
|
||
return fmt.Sprintf("%04x:%02x:%02x.%x", segment, bus, dev, fn)
|
||
}
|
||
|
||
func normalizeRedfishIdentityField(v string) string {
|
||
v = strings.TrimSpace(v)
|
||
if v == "" {
|
||
return ""
|
||
}
|
||
switch strings.ToLower(v) {
|
||
case "n/a", "na", "none", "null", "unknown", "0":
|
||
return ""
|
||
default:
|
||
return v
|
||
}
|
||
}
|
||
|
||
func gpuDedupKey(gpu models.GPU) string {
|
||
if serial := normalizeRedfishIdentityField(gpu.SerialNumber); serial != "" {
|
||
return serial
|
||
}
|
||
if bdf := strings.TrimSpace(gpu.BDF); bdf != "" {
|
||
return bdf
|
||
}
|
||
return firstNonEmpty(strings.TrimSpace(gpu.Slot)+"|"+strings.TrimSpace(gpu.Model), strings.TrimSpace(gpu.Slot))
|
||
}
|
||
|
||
func shouldSkipGenericGPUDuplicate(existing []models.GPU, candidate models.GPU) bool {
|
||
if len(existing) == 0 {
|
||
return false
|
||
}
|
||
if normalizeRedfishIdentityField(candidate.SerialNumber) != "" || strings.TrimSpace(candidate.BDF) != "" {
|
||
return false
|
||
}
|
||
slot := strings.TrimSpace(candidate.Slot)
|
||
model := strings.TrimSpace(candidate.Model)
|
||
if slot == "" || model == "" {
|
||
return false
|
||
}
|
||
|
||
// Typical GraphicsControllers fallback on some BMCs reports only model/name
|
||
// as slot and lacks stable identifiers. If we already have concrete GPUs of the
|
||
// same model/manufacturer from PCIe inventory, this candidate is a duplicate.
|
||
if !strings.EqualFold(slot, model) {
|
||
return false
|
||
}
|
||
for _, gpu := range existing {
|
||
if !strings.EqualFold(strings.TrimSpace(gpu.Model), model) {
|
||
continue
|
||
}
|
||
existingMfr := strings.TrimSpace(gpu.Manufacturer)
|
||
candidateMfr := strings.TrimSpace(candidate.Manufacturer)
|
||
if existingMfr != "" && candidateMfr != "" && !strings.EqualFold(existingMfr, candidateMfr) {
|
||
continue
|
||
}
|
||
if normalizeRedfishIdentityField(gpu.SerialNumber) != "" || strings.TrimSpace(gpu.BDF) != "" {
|
||
return true
|
||
}
|
||
}
|
||
return false
|
||
}
|
||
|
||
func dropModelOnlyGPUPlaceholders(items []models.GPU) []models.GPU {
|
||
if len(items) < 2 {
|
||
return items
|
||
}
|
||
|
||
concreteByModel := make(map[string]struct{}, len(items))
|
||
for _, gpu := range items {
|
||
modelKey := strings.ToLower(strings.TrimSpace(gpu.Model))
|
||
if modelKey == "" {
|
||
continue
|
||
}
|
||
if normalizeRedfishIdentityField(gpu.SerialNumber) != "" || strings.TrimSpace(gpu.BDF) != "" {
|
||
concreteByModel[modelKey] = struct{}{}
|
||
}
|
||
}
|
||
if len(concreteByModel) == 0 {
|
||
return items
|
||
}
|
||
|
||
out := make([]models.GPU, 0, len(items))
|
||
for _, gpu := range items {
|
||
modelKey := strings.ToLower(strings.TrimSpace(gpu.Model))
|
||
slot := strings.TrimSpace(gpu.Slot)
|
||
if _, hasConcrete := concreteByModel[modelKey]; hasConcrete &&
|
||
normalizeRedfishIdentityField(gpu.SerialNumber) == "" &&
|
||
strings.TrimSpace(gpu.BDF) == "" &&
|
||
(strings.EqualFold(slot, strings.TrimSpace(gpu.Model)) ||
|
||
strings.HasPrefix(strings.ToUpper(slot), "GPU")) {
|
||
continue
|
||
}
|
||
out = append(out, gpu)
|
||
}
|
||
return out
|
||
}
|
||
|
||
func looksLikeGPU(doc map[string]interface{}, functionDocs []map[string]interface{}) bool {
|
||
deviceType := strings.ToLower(asString(doc["DeviceType"]))
|
||
if strings.Contains(deviceType, "gpu") || strings.Contains(deviceType, "graphics") || strings.Contains(deviceType, "accelerator") {
|
||
return true
|
||
}
|
||
if strings.Contains(deviceType, "network") {
|
||
return false
|
||
}
|
||
|
||
if oem, ok := doc["Oem"].(map[string]interface{}); ok {
|
||
if public, ok := oem["Public"].(map[string]interface{}); ok {
|
||
if dc := strings.ToLower(asString(public["DeviceClass"])); strings.Contains(dc, "network") {
|
||
return false
|
||
}
|
||
}
|
||
}
|
||
|
||
modelText := strings.ToLower(strings.Join([]string{
|
||
asString(doc["Name"]),
|
||
asString(doc["Model"]),
|
||
asString(doc["Manufacturer"]),
|
||
}, " "))
|
||
gpuHints := []string{"gpu", "nvidia", "tesla", "a100", "h100", "l40", "rtx", "radeon", "instinct"}
|
||
for _, hint := range gpuHints {
|
||
if strings.Contains(modelText, hint) {
|
||
return true
|
||
}
|
||
}
|
||
|
||
for _, fn := range functionDocs {
|
||
classCode := strings.ToLower(strings.TrimPrefix(asString(fn["ClassCode"]), "0x"))
|
||
if strings.HasPrefix(classCode, "03") || strings.HasPrefix(classCode, "12") {
|
||
return true
|
||
}
|
||
}
|
||
|
||
return false
|
||
}
|
||
|
||
func looksLikeDrive(doc map[string]interface{}) bool {
|
||
if asString(doc["MediaType"]) != "" {
|
||
return true
|
||
}
|
||
if asString(doc["Protocol"]) != "" && (asInt(doc["CapacityGB"]) > 0 || asInt(doc["CapacityBytes"]) > 0) {
|
||
return true
|
||
}
|
||
if asString(doc["Type"]) != "" && (asString(doc["Model"]) != "" || asInt(doc["CapacityGB"]) > 0 || asInt(doc["CapacityBytes"]) > 0) {
|
||
return true
|
||
}
|
||
return false
|
||
}
|
||
|
||
func classifyStorageType(doc map[string]interface{}) string {
|
||
protocol := strings.ToUpper(asString(doc["Protocol"]))
|
||
if strings.Contains(protocol, "NVME") {
|
||
return "NVMe"
|
||
}
|
||
|
||
media := strings.ToUpper(asString(doc["MediaType"]))
|
||
if media == "SSD" {
|
||
return "SSD"
|
||
}
|
||
if media == "HDD" || media == "HDDT" {
|
||
return "HDD"
|
||
}
|
||
|
||
nameModel := strings.ToUpper(strings.Join([]string{
|
||
asString(doc["Name"]),
|
||
asString(doc["Model"]),
|
||
asString(doc["Description"]),
|
||
}, " "))
|
||
if strings.Contains(nameModel, "NVME") {
|
||
return "NVMe"
|
||
}
|
||
if strings.Contains(nameModel, "SSD") {
|
||
return "SSD"
|
||
}
|
||
if strings.Contains(nameModel, "HDD") {
|
||
return "HDD"
|
||
}
|
||
if protocol != "" {
|
||
return protocol
|
||
}
|
||
return firstNonEmpty(asString(doc["Type"]), "Storage")
|
||
}
|
||
|
||
func looksLikeVolume(doc map[string]interface{}) bool {
|
||
if asString(doc["RAIDType"]) != "" || asString(doc["VolumeType"]) != "" {
|
||
return true
|
||
}
|
||
if strings.Contains(strings.ToLower(asString(doc["@odata.type"])), "volume") && (asInt64(doc["CapacityBytes"]) > 0 || asString(doc["Name"]) != "") {
|
||
return true
|
||
}
|
||
return false
|
||
}
|
||
|
||
func dedupeStorage(items []models.Storage) []models.Storage {
|
||
if len(items) <= 1 {
|
||
return items
|
||
}
|
||
out := make([]models.Storage, 0, len(items))
|
||
seen := make(map[string]struct{}, len(items))
|
||
for _, item := range items {
|
||
key := firstNonEmpty(
|
||
normalizeRedfishIdentityField(item.SerialNumber),
|
||
strings.TrimSpace(item.Slot)+"|"+strings.TrimSpace(item.Model),
|
||
)
|
||
if key == "" {
|
||
continue
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
continue
|
||
}
|
||
seen[key] = struct{}{}
|
||
out = append(out, item)
|
||
}
|
||
return out
|
||
}
|
||
|
||
func dedupeStorageVolumes(items []models.StorageVolume) []models.StorageVolume {
|
||
seen := make(map[string]struct{}, len(items))
|
||
out := make([]models.StorageVolume, 0, len(items))
|
||
for _, v := range items {
|
||
key := firstNonEmpty(strings.TrimSpace(v.ID), strings.TrimSpace(v.Name), strings.TrimSpace(v.Controller)+"|"+fmt.Sprintf("%d", v.CapacityBytes))
|
||
if key == "" {
|
||
continue
|
||
}
|
||
if _, ok := seen[key]; ok {
|
||
continue
|
||
}
|
||
seen[key] = struct{}{}
|
||
out = append(out, v)
|
||
}
|
||
return out
|
||
}
|
||
|
||
func storageControllerFromPath(path string) string {
|
||
p := normalizeRedfishPath(path)
|
||
parts := strings.Split(p, "/")
|
||
for i := 0; i < len(parts)-1; i++ {
|
||
if parts[i] == "Storage" && i+1 < len(parts) {
|
||
return parts[i+1]
|
||
}
|
||
}
|
||
return ""
|
||
}
|
||
|
||
func parseFirmware(system, bios, manager, secureBoot, networkProtocol map[string]interface{}) []models.FirmwareInfo {
|
||
var out []models.FirmwareInfo
|
||
|
||
appendFW := func(name, version string) {
|
||
version = strings.TrimSpace(version)
|
||
if version == "" {
|
||
return
|
||
}
|
||
out = append(out, models.FirmwareInfo{DeviceName: name, Version: version})
|
||
}
|
||
|
||
appendFW("BIOS", asString(system["BiosVersion"]))
|
||
appendFW("BIOS", asString(bios["Version"]))
|
||
appendFW("BMC", asString(manager["FirmwareVersion"]))
|
||
appendFW("SecureBoot", asString(secureBoot["SecureBootCurrentBoot"]))
|
||
appendFW("NetworkProtocol", asString(networkProtocol["Id"]))
|
||
|
||
return out
|
||
}
|
||
|
||
func mapStatus(statusAny interface{}) string {
|
||
if statusAny == nil {
|
||
return ""
|
||
}
|
||
if statusMap, ok := statusAny.(map[string]interface{}); ok {
|
||
health := asString(statusMap["Health"])
|
||
state := asString(statusMap["State"])
|
||
return firstNonEmpty(health, state)
|
||
}
|
||
return asString(statusAny)
|
||
}
|
||
|
||
func asString(v interface{}) string {
|
||
switch value := v.(type) {
|
||
case nil:
|
||
return ""
|
||
case string:
|
||
return strings.TrimSpace(value)
|
||
case json.Number:
|
||
return value.String()
|
||
default:
|
||
return strings.TrimSpace(fmt.Sprintf("%v", value))
|
||
}
|
||
}
|
||
|
||
func asInt(v interface{}) int {
|
||
switch value := v.(type) {
|
||
case nil:
|
||
return 0
|
||
case int:
|
||
return value
|
||
case int64:
|
||
return int(value)
|
||
case float64:
|
||
return int(value)
|
||
case json.Number:
|
||
if i, err := value.Int64(); err == nil {
|
||
return int(i)
|
||
}
|
||
if f, err := value.Float64(); err == nil {
|
||
return int(f)
|
||
}
|
||
case string:
|
||
if value == "" {
|
||
return 0
|
||
}
|
||
if i, err := strconv.Atoi(value); err == nil {
|
||
return i
|
||
}
|
||
}
|
||
return 0
|
||
}
|
||
|
||
func asInt64(v interface{}) int64 {
|
||
switch value := v.(type) {
|
||
case nil:
|
||
return 0
|
||
case int:
|
||
return int64(value)
|
||
case int64:
|
||
return value
|
||
case float64:
|
||
return int64(value)
|
||
case json.Number:
|
||
if i, err := value.Int64(); err == nil {
|
||
return i
|
||
}
|
||
if f, err := value.Float64(); err == nil {
|
||
return int64(f)
|
||
}
|
||
case string:
|
||
if value == "" {
|
||
return 0
|
||
}
|
||
if i, err := strconv.ParseInt(value, 10, 64); err == nil {
|
||
return i
|
||
}
|
||
}
|
||
return 0
|
||
}
|
||
|
||
func asBool(v interface{}) bool {
|
||
switch t := v.(type) {
|
||
case bool:
|
||
return t
|
||
case string:
|
||
return strings.EqualFold(strings.TrimSpace(t), "true")
|
||
default:
|
||
return false
|
||
}
|
||
}
|
||
|
||
func asFloat(v interface{}) float64 {
|
||
switch value := v.(type) {
|
||
case nil:
|
||
return 0
|
||
case float64:
|
||
return value
|
||
case int:
|
||
return float64(value)
|
||
case int64:
|
||
return float64(value)
|
||
case json.Number:
|
||
if f, err := value.Float64(); err == nil {
|
||
return f
|
||
}
|
||
case string:
|
||
if value == "" {
|
||
return 0
|
||
}
|
||
if f, err := strconv.ParseFloat(value, 64); err == nil {
|
||
return f
|
||
}
|
||
}
|
||
return 0
|
||
}
|
||
|
||
func asHexOrInt(v interface{}) int {
|
||
switch value := v.(type) {
|
||
case nil:
|
||
return 0
|
||
case int:
|
||
return value
|
||
case int64:
|
||
return int(value)
|
||
case float64:
|
||
return int(value)
|
||
case json.Number:
|
||
if i, err := value.Int64(); err == nil {
|
||
return int(i)
|
||
}
|
||
if f, err := value.Float64(); err == nil {
|
||
return int(f)
|
||
}
|
||
case string:
|
||
s := strings.TrimSpace(value)
|
||
s = strings.TrimPrefix(strings.ToLower(s), "0x")
|
||
if s == "" {
|
||
return 0
|
||
}
|
||
if i, err := strconv.ParseInt(s, 16, 64); err == nil {
|
||
return int(i)
|
||
}
|
||
if i, err := strconv.Atoi(s); err == nil {
|
||
return i
|
||
}
|
||
}
|
||
return 0
|
||
}
|
||
|
||
func firstNonEmpty(values ...string) string {
|
||
for _, v := range values {
|
||
if strings.TrimSpace(v) != "" {
|
||
return strings.TrimSpace(v)
|
||
}
|
||
}
|
||
return ""
|
||
}
|
||
|
||
func joinPath(base, suffix string) string {
|
||
base = strings.TrimRight(base, "/")
|
||
if suffix == "" {
|
||
return base
|
||
}
|
||
if strings.HasPrefix(suffix, "/") {
|
||
return base + suffix
|
||
}
|
||
return base + "/" + suffix
|
||
}
|
||
|
||
func firstPathOrDefault(paths []string, fallback string) string {
|
||
if len(paths) == 0 {
|
||
return fallback
|
||
}
|
||
return paths[0]
|
||
}
|
||
|
||
func normalizeRedfishPath(raw string) string {
|
||
raw = strings.TrimSpace(raw)
|
||
if raw == "" {
|
||
return ""
|
||
}
|
||
if i := strings.Index(raw, "#"); i >= 0 {
|
||
raw = raw[:i]
|
||
}
|
||
|
||
if strings.HasPrefix(raw, "http://") || strings.HasPrefix(raw, "https://") {
|
||
u, err := url.Parse(raw)
|
||
if err != nil {
|
||
return ""
|
||
}
|
||
raw = u.Path
|
||
}
|
||
|
||
if !strings.HasPrefix(raw, "/") {
|
||
raw = "/" + raw
|
||
}
|
||
if !strings.HasPrefix(raw, "/redfish/") {
|
||
return ""
|
||
}
|
||
return raw
|
||
}
|
||
|
||
func extractODataIDs(v interface{}) []string {
|
||
var refs []string
|
||
var walk func(any)
|
||
walk = func(node any) {
|
||
switch typed := node.(type) {
|
||
case map[string]interface{}:
|
||
for k, child := range typed {
|
||
if k == "@odata.id" {
|
||
if ref := asString(child); ref != "" {
|
||
refs = append(refs, ref)
|
||
}
|
||
continue
|
||
}
|
||
walk(child)
|
||
}
|
||
case []interface{}:
|
||
for _, child := range typed {
|
||
walk(child)
|
||
}
|
||
}
|
||
}
|
||
walk(v)
|
||
return refs
|
||
}
|
||
|
||
func redfishTopRoot(path string) string {
|
||
path = strings.TrimPrefix(path, "/")
|
||
parts := strings.Split(path, "/")
|
||
if len(parts) < 3 {
|
||
return "root"
|
||
}
|
||
return parts[2]
|
||
}
|
||
|
||
func topRoots(counts map[string]int, limit int) []string {
|
||
if len(counts) == 0 {
|
||
return []string{"n/a"}
|
||
}
|
||
type rootCount struct {
|
||
root string
|
||
count int
|
||
}
|
||
items := make([]rootCount, 0, len(counts))
|
||
for root, count := range counts {
|
||
items = append(items, rootCount{root: root, count: count})
|
||
}
|
||
sort.Slice(items, func(i, j int) bool {
|
||
return items[i].count > items[j].count
|
||
})
|
||
if len(items) > limit {
|
||
items = items[:limit]
|
||
}
|
||
out := make([]string, 0, len(items))
|
||
for _, item := range items {
|
||
out = append(out, fmt.Sprintf("%s(%d)", item.root, item.count))
|
||
}
|
||
return out
|
||
}
|
||
|
||
func redfishLocationLabel(v interface{}) string {
|
||
switch typed := v.(type) {
|
||
case nil:
|
||
return ""
|
||
case string:
|
||
return strings.TrimSpace(typed)
|
||
case map[string]interface{}:
|
||
// Common shapes:
|
||
// Slot.Location.PartLocation.ServiceLabel
|
||
// Location.PartLocation.ServiceLabel
|
||
// PartLocation.ServiceLabel
|
||
if nested := redfishLocationLabel(typed["Location"]); nested != "" {
|
||
return nested
|
||
}
|
||
if nested := redfishLocationLabel(typed["PartLocation"]); nested != "" {
|
||
return nested
|
||
}
|
||
serviceLabel := asString(typed["ServiceLabel"])
|
||
locationType := asString(typed["LocationType"])
|
||
ordinal := asString(typed["LocationOrdinalValue"])
|
||
if serviceLabel != "" {
|
||
return serviceLabel
|
||
}
|
||
if locationType != "" && ordinal != "" {
|
||
return fmt.Sprintf("%s %s", locationType, ordinal)
|
||
}
|
||
if locationType != "" {
|
||
return locationType
|
||
}
|
||
if ordinal != "" {
|
||
return "Slot " + ordinal
|
||
}
|
||
return ""
|
||
default:
|
||
// Avoid fmt.Sprint(map[]) style garbage for complex objects in UI/export.
|
||
return ""
|
||
}
|
||
}
|
||
|
||
func compactProgressPath(p string) string {
|
||
const maxLen = 72
|
||
if len(p) <= maxLen {
|
||
return p
|
||
}
|
||
return "..." + p[len(p)-maxLen+3:]
|
||
}
|
||
|
||
func redfishSnapshotMaxDocuments() int {
|
||
// Default is intentionally high enough to capture vendor-specific PCIe/GPU trees
|
||
// on modern HGX-class systems while staying within memory budgets of a typical
|
||
// developer workstation.
|
||
const (
|
||
def = 100000
|
||
min = 1200
|
||
max = 500000
|
||
)
|
||
if v := strings.TrimSpace(os.Getenv("LOGPILE_REDFISH_SNAPSHOT_MAX_DOCS")); v != "" {
|
||
if n, err := strconv.Atoi(v); err == nil {
|
||
if n < min {
|
||
return min
|
||
}
|
||
if n > max {
|
||
return max
|
||
}
|
||
return n
|
||
}
|
||
}
|
||
return def
|
||
}
|
||
|
||
func redfishSnapshotPrioritySeeds(systemPaths, chassisPaths, managerPaths []string) []string {
|
||
var out []string
|
||
add := func(p string) {
|
||
if p = normalizeRedfishPath(p); p != "" {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
|
||
add("/redfish/v1/UpdateService")
|
||
add("/redfish/v1/UpdateService/FirmwareInventory")
|
||
add("/redfish/v1/Fabrics")
|
||
|
||
for _, p := range systemPaths {
|
||
add(p)
|
||
add(joinPath(p, "/Bios"))
|
||
add(joinPath(p, "/SecureBoot"))
|
||
add(joinPath(p, "/Oem/Public"))
|
||
add(joinPath(p, "/Oem/Public/FRU"))
|
||
add(joinPath(p, "/Oem/Public/ThermalConfig"))
|
||
add(joinPath(p, "/ThermalConfig"))
|
||
add(joinPath(p, "/Processors"))
|
||
add(joinPath(p, "/Memory"))
|
||
add(joinPath(p, "/EthernetInterfaces"))
|
||
add(joinPath(p, "/NetworkInterfaces"))
|
||
add(joinPath(p, "/BootOptions"))
|
||
add(joinPath(p, "/Certificates"))
|
||
add(joinPath(p, "/PCIeDevices"))
|
||
add(joinPath(p, "/PCIeFunctions"))
|
||
add(joinPath(p, "/Accelerators"))
|
||
add(joinPath(p, "/GraphicsControllers"))
|
||
add(joinPath(p, "/Storage"))
|
||
add(joinPath(p, "/SimpleStorage"))
|
||
add(joinPath(p, "/Storage/IntelVROC"))
|
||
add(joinPath(p, "/Storage/IntelVROC/Drives"))
|
||
add(joinPath(p, "/Storage/IntelVROC/Volumes"))
|
||
}
|
||
for _, p := range chassisPaths {
|
||
add(p)
|
||
add(joinPath(p, "/Oem/Public"))
|
||
add(joinPath(p, "/Oem/Public/FRU"))
|
||
add(joinPath(p, "/Oem/Public/ThermalConfig"))
|
||
add(joinPath(p, "/ThermalConfig"))
|
||
add(joinPath(p, "/Sensors"))
|
||
add(joinPath(p, "/HealthSummary"))
|
||
add(joinPath(p, "/ThresholdSensors"))
|
||
add(joinPath(p, "/DiscreteSensors"))
|
||
add(joinPath(p, "/Boards"))
|
||
add(joinPath(p, "/Backplanes"))
|
||
add(joinPath(p, "/Assembly"))
|
||
add(joinPath(p, "/Thermal"))
|
||
add(joinPath(p, "/EnvironmentMetrics"))
|
||
add(joinPath(p, "/PCIeDevices"))
|
||
add(joinPath(p, "/PCIeSlots"))
|
||
add(joinPath(p, "/NetworkAdapters"))
|
||
add(joinPath(p, "/Drives"))
|
||
add(joinPath(p, "/Temperatures"))
|
||
add(joinPath(p, "/Fans"))
|
||
add(joinPath(p, "/Voltages"))
|
||
add(joinPath(p, "/PowerSubsystem"))
|
||
add(joinPath(p, "/PowerSubsystem/PowerSupplies"))
|
||
add(joinPath(p, "/PowerSubsystem/Voltages"))
|
||
add(joinPath(p, "/ThermalSubsystem"))
|
||
add(joinPath(p, "/ThermalSubsystem/Fans"))
|
||
add(joinPath(p, "/ThermalSubsystem/Temperatures"))
|
||
add(joinPath(p, "/Power"))
|
||
}
|
||
for _, p := range managerPaths {
|
||
add(p)
|
||
add(joinPath(p, "/EthernetInterfaces"))
|
||
add(joinPath(p, "/NetworkProtocol/HTTPS/Certificates"))
|
||
add(joinPath(p, "/LogServices"))
|
||
add(joinPath(p, "/NetworkProtocol"))
|
||
}
|
||
return out
|
||
}
|
||
|
||
func shouldReportSnapshotFetchError(err error) bool {
|
||
if err == nil {
|
||
return false
|
||
}
|
||
msg := err.Error()
|
||
if strings.HasPrefix(msg, "status 404 ") ||
|
||
strings.HasPrefix(msg, "status 405 ") ||
|
||
strings.HasPrefix(msg, "status 410 ") ||
|
||
strings.HasPrefix(msg, "status 501 ") {
|
||
return false
|
||
}
|
||
return true
|
||
}
|
||
|
||
func minInt32(a, b int32) int32 {
|
||
if a < b {
|
||
return a
|
||
}
|
||
return b
|
||
}
|
||
|
||
func maxInt(values ...int) int {
|
||
if len(values) == 0 {
|
||
return 0
|
||
}
|
||
max := values[0]
|
||
for _, v := range values[1:] {
|
||
if v > max {
|
||
max = v
|
||
}
|
||
}
|
||
return max
|
||
}
|
||
|
||
func estimateSnapshotETA(start time.Time, processed, seen, queueLen, workers int, requestTimeout time.Duration) time.Duration {
|
||
remaining := maxInt(seen-processed, queueLen, 0)
|
||
if remaining == 0 {
|
||
return 0
|
||
}
|
||
if workers <= 0 {
|
||
workers = 1
|
||
}
|
||
if requestTimeout <= 0 {
|
||
requestTimeout = 10 * time.Second
|
||
}
|
||
|
||
timeoutBased := time.Duration(float64(requestTimeout) * float64(remaining) / float64(workers))
|
||
if processed <= 0 {
|
||
return timeoutBased
|
||
}
|
||
|
||
elapsed := time.Since(start)
|
||
if elapsed <= 0 {
|
||
return timeoutBased
|
||
}
|
||
rateBased := time.Duration(float64(elapsed) * float64(remaining) / float64(processed))
|
||
if rateBased <= 0 {
|
||
return timeoutBased
|
||
}
|
||
// Blend observed throughput with configured per-request timeout to keep ETA stable
|
||
// and still bounded by timeout assumptions on slower Redfish branches.
|
||
return (rateBased + timeoutBased) / 2
|
||
}
|
||
|
||
func estimatePlanBETA(targets int) time.Duration {
|
||
if targets <= 0 {
|
||
return 0
|
||
}
|
||
attempts := redfishCriticalPlanBAttempts()
|
||
if attempts < 1 {
|
||
attempts = 1
|
||
}
|
||
timeoutPart := time.Duration(attempts) * redfishCriticalRequestTimeout()
|
||
backoffPart := time.Duration(attempts-1) * redfishCriticalRetryBackoff()
|
||
gapPart := redfishCriticalSlowGap()
|
||
perTarget := timeoutPart + backoffPart + gapPart
|
||
return time.Duration(targets) * perTarget
|
||
}
|
||
|
||
func formatETA(d time.Duration) string {
|
||
if d <= 0 {
|
||
return "<1s"
|
||
}
|
||
if d < time.Second {
|
||
return "<1s"
|
||
}
|
||
if d < time.Minute {
|
||
return fmt.Sprintf("%ds", int(d.Round(time.Second).Seconds()))
|
||
}
|
||
totalSec := int(d.Round(time.Second).Seconds())
|
||
hours := totalSec / 3600
|
||
minutes := (totalSec % 3600) / 60
|
||
seconds := totalSec % 60
|
||
if hours > 0 {
|
||
return fmt.Sprintf("%dh%02dm%02ds", hours, minutes, seconds)
|
||
}
|
||
return fmt.Sprintf("%dm%02ds", minutes, seconds)
|
||
}
|