From bf6ecab4f0b3183b8a9a6511599ac97fe8797c07 Mon Sep 17 00:00:00 2001 From: Michael Chus Date: Mon, 13 Apr 2026 10:49:49 +0300 Subject: [PATCH] Add per-precision benchmark phases, weighted TOPS scoring, and ECC tracking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Split steady window into 6 equal slots: fp8/fp16/fp32/fp64/fp4 + combined - Each precision phase runs bee-gpu-burn with --precision filter so PowerCVPct reflects single-kernel stability (not round-robin artifact) - Add fp4 support in bee-gpu-stress.c for Blackwell (cc>=100) via existing CUDA_R_4F_E2M1 guard - Weighted TOPS: fp64×2.0, fp32×1.0, fp16×0.5, fp8×0.25, fp4×0.125 - SyntheticScore = sum of weighted TOPS from per-precision phases - MixedScore = sum from combined phase; MixedEfficiency = Mixed/Synthetic - ComputeScore = SyntheticScore × (1 + MixedEfficiency × 0.3) - ECC volatile counters sampled before/after each phase and overall - DegradationReasons: ecc_uncorrected_errors, ecc_corrected_errors - Report: per-precision stability table with ECC columns, methodology section - Ramp-up history table redesign: GPU indices as columns, runs as rows Co-Authored-By: Claude Sonnet 4.6 --- audit/internal/platform/benchmark.go | 236 +++++++++++++++++++- audit/internal/platform/benchmark_report.go | 72 +++++- audit/internal/platform/benchmark_types.go | 55 ++++- audit/internal/webui/api.go | 2 + audit/internal/webui/pages.go | 135 ++--------- audit/internal/webui/server_test.go | 4 +- audit/internal/webui/tasks_test.go | 2 +- iso/builder/bee-gpu-stress.c | 20 +- iso/overlay/usr/local/bin/bee-gpu-burn | 8 +- 9 files changed, 390 insertions(+), 144 deletions(-) diff --git a/audit/internal/platform/benchmark.go b/audit/internal/platform/benchmark.go index b4fe56a..014a4f9 100644 --- a/audit/internal/platform/benchmark.go +++ b/audit/internal/platform/benchmark.go @@ -73,6 +73,11 @@ var ( benchmarkIterationsPattern = regexp.MustCompile(`^([a-z0-9_]+)_iterations=(\d+)$`) ) +// benchmarkPrecisionPhases lists the precision categories run as individual +// steady-state windows before the combined steady pass. Order is from lowest +// to highest power draw so thermal ramp-up is gradual. +var benchmarkPrecisionPhases = []string{"fp8", "fp16", "fp32", "fp64", "fp4"} + func (s *System) RunNvidiaBenchmark(ctx context.Context, baseDir string, opts NvidiaBenchmarkOptions, logFunc func(string)) (string, error) { if ctx == nil { ctx = context.Background() @@ -225,14 +230,56 @@ func (s *System) RunNvidiaBenchmark(ctx context.Context, baseDir string, opts Nv continue } + // ── Per-precision stability phases ──────────────────────────────────────── + // Run each precision category alone so PowerCVPct reflects genuine GPU + // power stability, not kernel-mix variance. + // Time budget: each phase gets steadySec/numPhases, minimum 60 s. + // SteadySec is split equally across all precision phases + 1 combined slot. + // Skipped phases (unsupported precision) are simply omitted; combined is fixed. + totalSlots := len(benchmarkPrecisionPhases) + 1 + perPhaseSec := spec.SteadySec / totalSlots + if perPhaseSec < 60 { + perPhaseSec = 60 + } + eccBase, _ := queryECCCounters(idx) + for _, prec := range benchmarkPrecisionPhases { + phaseCmd := []string{ + "bee-gpu-burn", + "--seconds", strconv.Itoa(perPhaseSec), + "--size-mb", strconv.Itoa(opts.SizeMB), + "--devices", strconv.Itoa(idx), + "--precision", prec, + } + logFunc(fmt.Sprintf("GPU %d: %s stability phase (%ds)", idx, prec, perPhaseSec)) + phaseLogName := fmt.Sprintf("gpu-%d-steady-%s", idx, prec) + eccBefore, _ := queryECCCounters(idx) + phaseOut, phaseRows, phaseErr := runBenchmarkCommandWithMetrics(ctx, verboseLog, phaseLogName+".log", phaseCmd, nil, []int{idx}, runDir, phaseLogName, logFunc) + eccAfter, _ := queryECCCounters(idx) + if phaseErr != nil || len(phaseRows) == 0 { + continue + } + phase := BenchmarkPrecisionSteadyPhase{ + Precision: prec, + Steady: summarizeBenchmarkTelemetry(phaseRows), + ECC: diffECCCounters(eccBefore, eccAfter), + } + for _, p := range parseBenchmarkBurnLog(string(phaseOut)).Profiles { + if p.Supported { + phase.TeraOpsPerSec += p.TeraOpsPerSec + phase.WeightedTeraOpsPerSec += p.WeightedTeraOpsPerSec + } + } + gpuResult.PrecisionSteady = append(gpuResult.PrecisionSteady, phase) + } + beforeThrottle, _ := queryThrottleCounters(idx) steadyCmd := []string{ "bee-gpu-burn", - "--seconds", strconv.Itoa(spec.SteadySec), + "--seconds", strconv.Itoa(perPhaseSec), "--size-mb", strconv.Itoa(opts.SizeMB), "--devices", strconv.Itoa(idx), } - logFunc(fmt.Sprintf("GPU %d: steady compute (%ds)", idx, spec.SteadySec)) + logFunc(fmt.Sprintf("GPU %d: steady compute (combined, %ds)", idx, perPhaseSec)) // Sample server power via IPMI in parallel with the steady phase. // We collect readings every 5s and average them. @@ -293,6 +340,9 @@ func (s *System) RunNvidiaBenchmark(ctx context.Context, baseDir string, opts Nv gpuResult.Steady = summarizeBenchmarkTelemetry(steadyRows) gpuResult.Throttle = diffThrottleCounters(beforeThrottle, afterThrottle) + if eccFinal, err := queryECCCounters(idx); err == nil { + gpuResult.ECC = diffECCCounters(eccBase, eccFinal) + } cooldownRows, err := collectBenchmarkSamples(ctx, spec.CooldownSec, []int{idx}) if err != nil && err != context.Canceled { @@ -811,8 +861,11 @@ func parseBenchmarkBurnLog(raw string) benchmarkBurnParseResult { Iterations: profile.iterations, Notes: profile.notes, } + w := precisionWeight(profile.category) + precision.Weight = w if profile.supported && result.DurationSec > 0 && profile.m > 0 && profile.n > 0 && profile.k > 0 && profile.iterations > 0 { precision.TeraOpsPerSec = (2.0 * float64(profile.m) * float64(profile.n) * float64(profile.k) * float64(profile.iterations)) / float64(result.DurationSec) / 1e12 + precision.WeightedTeraOpsPerSec = precision.TeraOpsPerSec * w } result.Profiles = append(result.Profiles, precision) } @@ -841,6 +894,33 @@ func ensureBenchmarkProfile(profiles map[string]*benchmarkBurnProfile, name stri return profile } +// precisionWeight returns the fp32-equivalence factor for a precision category. +// Each factor represents how much "real" numeric work one operation of that +// type performs relative to fp32 (single precision = 1.0 baseline): +// fp64 = 2.0 — double precision, 2× more bits per operand +// fp32 = 1.0 — single precision baseline +// fp16 = 0.5 — half precision +// fp8 = 0.25 — quarter precision +// fp4 = 0.125 — eighth precision +// Multiplying raw TOPS by the weight gives fp32-equivalent TOPS, enabling +// cross-precision comparison on the same numeric scale. +func precisionWeight(category string) float64 { + switch category { + case "fp64": + return 2.0 + case "fp32_tf32": + return 1.0 + case "fp16_bf16": + return 0.5 + case "fp8": + return 0.25 + case "fp4": + return 0.125 + default: + return 1.0 + } +} + func stripBenchmarkPrefix(line string) string { if strings.HasPrefix(line, "[gpu ") { if idx := strings.Index(line, "] "); idx >= 0 { @@ -890,11 +970,39 @@ func summarizeBenchmarkTelemetry(rows []GPUMetricRow) BenchmarkTelemetrySummary func scoreBenchmarkGPUResult(gpu BenchmarkGPUResult) BenchmarkScorecard { score := BenchmarkScorecard{} - for _, precision := range gpu.PrecisionResults { - if precision.Supported { - score.ComputeScore += precision.TeraOpsPerSec + + // SyntheticScore: sum of fp32-equivalent TOPS from per-precision phases. + // Each precision ran alone with full GPU dedicated — peak capability. + for _, p := range gpu.PrecisionSteady { + score.SyntheticScore += p.WeightedTeraOpsPerSec + } + + // MixedScore: sum of fp32-equivalent TOPS from the combined phase. + // All precisions compete simultaneously — closer to real inference workloads. + for _, p := range gpu.PrecisionResults { + if p.Supported { + score.MixedScore += p.WeightedTeraOpsPerSec } } + + // MixedEfficiency = MixedScore / SyntheticScore. + // Measures how well the GPU sustains throughput under concurrent mixed load. + // A healthy GPU scores ~0.8–0.95; severe degradation suggests bandwidth + // contention or scheduler inefficiency. + if score.SyntheticScore > 0 && score.MixedScore > 0 { + score.MixedEfficiency = score.MixedScore / score.SyntheticScore + } + + // ComputeScore = SyntheticScore × (1 + MixedEfficiency × 0.3). + // SyntheticScore is the primary signal; MixedEfficiency adds up to +30% + // bonus for GPUs that handle mixed-precision concurrency well. + // Falls back to MixedScore alone when per-precision data is absent. + switch { + case score.SyntheticScore > 0: + score.ComputeScore = score.SyntheticScore * (1 + score.MixedEfficiency*0.3) + case score.MixedScore > 0: + score.ComputeScore = score.MixedScore + } // PowerSustainScore: measures how close the GPU came to its rated TDP under // a full-spectrum load (dcgmi targeted_power). 100 = exactly at rated TDP. // Penalty applied symmetrically for both under- and over-TDP deviations: @@ -915,7 +1023,19 @@ func scoreBenchmarkGPUResult(gpu BenchmarkGPUResult) BenchmarkScorecard { runtimeUS := math.Max(1, gpu.Steady.DurationSec*1e6) thermalRatio := float64(gpu.Throttle.HWThermalSlowdownUS+gpu.Throttle.SWThermalSlowdownUS) / runtimeUS score.ThermalSustainScore = clampScore(100 - thermalRatio*100) - score.StabilityScore = clampScore(100 - (gpu.Steady.ClockCVPct*4 + gpu.Steady.PowerCVPct*2 + gpu.Steady.ClockDriftPct*2)) + // StabilityScore: prefer per-precision steady phases where each window runs a + // single kernel type so PowerCVPct is a genuine stability signal (not a + // workload-mix artifact). Fall back to combined steady using clock-only metrics + // when per-precision data is absent (older results, short profiles). + if len(gpu.PrecisionSteady) > 0 { + var sum float64 + for _, p := range gpu.PrecisionSteady { + sum += clampScore(100 - (p.Steady.ClockCVPct*4 + p.Steady.PowerCVPct*2 + p.Steady.ClockDriftPct*2)) + } + score.StabilityScore = sum / float64(len(gpu.PrecisionSteady)) + } else { + score.StabilityScore = clampScore(100 - (gpu.Steady.ClockCVPct*4 + gpu.Steady.ClockDriftPct*2)) + } score.CompositeScore = compositeBenchmarkScore(score) if gpu.MultiprocessorCount > 0 && gpu.Steady.AvgGraphicsClockMHz > 0 && score.ComputeScore > 0 { score.TOPSPerSMPerGHz = score.ComputeScore / float64(gpu.MultiprocessorCount) / (gpu.Steady.AvgGraphicsClockMHz / 1000.0) @@ -963,6 +1083,12 @@ func detectBenchmarkDegradationReasons(gpu BenchmarkGPUResult, normalizationStat if normalizationStatus != "full" { reasons = append(reasons, "normalization_partial") } + if gpu.ECC.Uncorrected > 0 { + reasons = append(reasons, "ecc_uncorrected_errors") + } + if gpu.ECC.Corrected > 0 { + reasons = append(reasons, "ecc_corrected_errors") + } return dedupeStrings(reasons) } @@ -1064,6 +1190,36 @@ func diffThrottleCounters(before, after BenchmarkThrottleCounters) BenchmarkThro } } +func queryECCCounters(gpuIndex int) (BenchmarkECCCounters, error) { + out, err := satExecCommand( + "nvidia-smi", + "--id="+strconv.Itoa(gpuIndex), + "--query-gpu=ecc.errors.corrected.volatile.total,ecc.errors.uncorrected.volatile.total", + "--format=csv,noheader,nounits", + ).Output() + if err != nil { + return BenchmarkECCCounters{}, err + } + fields := strings.Split(strings.TrimSpace(string(out)), ",") + if len(fields) < 2 { + return BenchmarkECCCounters{}, fmt.Errorf("unexpected ECC counter columns: %q", strings.TrimSpace(string(out))) + } + corrected, err1 := strconv.ParseUint(strings.TrimSpace(fields[0]), 10, 64) + uncorrected, err2 := strconv.ParseUint(strings.TrimSpace(fields[1]), 10, 64) + if err1 != nil || err2 != nil { + // ECC may be disabled on this GPU — return zero counters silently. + return BenchmarkECCCounters{}, nil + } + return BenchmarkECCCounters{Corrected: corrected, Uncorrected: uncorrected}, nil +} + +func diffECCCounters(before, after BenchmarkECCCounters) BenchmarkECCCounters { + return BenchmarkECCCounters{ + Corrected: saturatingSub(after.Corrected, before.Corrected), + Uncorrected: saturatingSub(after.Uncorrected, before.Uncorrected), + } +} + func queryActiveComputeApps(gpuIndices []int) ([]string, error) { args := []string{ "--query-compute-apps=gpu_uuid,pid,process_name", @@ -1141,6 +1297,10 @@ func buildBenchmarkFindings(result NvidiaBenchmarkResult) []string { findings = append(findings, fmt.Sprintf("GPU %d showed unstable clocks/power over the benchmark window.", gpu.Index)) case "normalization_partial": findings = append(findings, fmt.Sprintf("GPU %d ran without full benchmark normalization.", gpu.Index)) + case "ecc_uncorrected_errors": + findings = append(findings, fmt.Sprintf("GPU %d reported %d uncorrected ECC error(s) — possible hardware fault.", gpu.Index, gpu.ECC.Uncorrected)) + case "ecc_corrected_errors": + findings = append(findings, fmt.Sprintf("GPU %d reported %d corrected ECC error(s) — possible DRAM degradation.", gpu.Index, gpu.ECC.Corrected)) } } if gpu.Backend == "driver-ptx" { @@ -1580,20 +1740,75 @@ func runNvidiaBenchmarkParallel( } } + // ── Per-precision stability phases (parallel) ───────────────────────────── + totalSlots := len(benchmarkPrecisionPhases) + 1 + perPhaseSec := spec.SteadySec / totalSlots + if perPhaseSec < 60 { + perPhaseSec = 60 + } + eccBase := make(map[int]BenchmarkECCCounters, len(selected)) + for _, idx := range selected { + eccBase[idx], _ = queryECCCounters(idx) + } + for _, prec := range benchmarkPrecisionPhases { + phaseCmd := []string{ + "bee-gpu-burn", + "--seconds", strconv.Itoa(perPhaseSec), + "--size-mb", strconv.Itoa(opts.SizeMB), + "--devices", allDevices, + "--precision", prec, + } + logFunc(fmt.Sprintf("GPUs %s: %s stability phase (%ds)", allDevices, prec, perPhaseSec)) + phaseLogName := "gpu-all-steady-" + prec + eccBeforePhase := make(map[int]BenchmarkECCCounters, len(selected)) + for _, idx := range selected { + eccBeforePhase[idx], _ = queryECCCounters(idx) + } + phaseOut, phaseRows, phaseErr := runBenchmarkCommandWithMetrics(ctx, verboseLog, phaseLogName+".log", phaseCmd, nil, selected, runDir, phaseLogName, logFunc) + eccAfterPhase := make(map[int]BenchmarkECCCounters, len(selected)) + for _, idx := range selected { + eccAfterPhase[idx], _ = queryECCCounters(idx) + } + if phaseErr != nil || len(phaseRows) == 0 { + continue + } + parseByGPU := parseBenchmarkBurnLogByGPU(string(phaseOut)) + for _, idx := range selected { + perGPU := filterRowsByGPU(phaseRows, idx) + if len(perGPU) == 0 { + continue + } + phase := BenchmarkPrecisionSteadyPhase{ + Precision: prec, + Steady: summarizeBenchmarkTelemetry(perGPU), + ECC: diffECCCounters(eccBeforePhase[idx], eccAfterPhase[idx]), + } + if pr, ok := parseByGPU[idx]; ok { + for _, p := range pr.Profiles { + if p.Supported { + phase.TeraOpsPerSec += p.TeraOpsPerSec + phase.WeightedTeraOpsPerSec += p.WeightedTeraOpsPerSec + } + } + } + gpuResults[idx].PrecisionSteady = append(gpuResults[idx].PrecisionSteady, phase) + } + } + // Snapshot throttle counters before steady. beforeThrottle := make(map[int]BenchmarkThrottleCounters, len(selected)) for _, idx := range selected { beforeThrottle[idx], _ = queryThrottleCounters(idx) } - // Steady: all GPUs simultaneously. + // Steady: all GPUs simultaneously (combined). Fixed at one slot = perPhaseSec. steadyCmd := []string{ "bee-gpu-burn", - "--seconds", strconv.Itoa(spec.SteadySec), + "--seconds", strconv.Itoa(perPhaseSec), "--size-mb", strconv.Itoa(opts.SizeMB), "--devices", allDevices, } - logFunc(fmt.Sprintf("GPUs %s: parallel steady compute (%ds)", allDevices, spec.SteadySec)) + logFunc(fmt.Sprintf("GPUs %s: parallel steady compute (combined, %ds)", allDevices, perPhaseSec)) // Sample server power via IPMI in parallel with steady phase. ipmiStopCh := make(chan struct{}) @@ -1649,6 +1864,9 @@ func runNvidiaBenchmarkParallel( writeBenchmarkMetricsFiles(runDir, fmt.Sprintf("gpu-%d-steady", idx), perGPU) gpuResults[idx].Steady = summarizeBenchmarkTelemetry(perGPU) gpuResults[idx].Throttle = diffThrottleCounters(beforeThrottle[idx], afterThrottle[idx]) + if eccFinal, err := queryECCCounters(idx); err == nil { + gpuResults[idx].ECC = diffECCCounters(eccBase[idx], eccFinal) + } if pr, ok := parseResults[idx]; ok { gpuResults[idx].ComputeCapability = pr.ComputeCapability diff --git a/audit/internal/platform/benchmark_report.go b/audit/internal/platform/benchmark_report.go index 1c69463..efcbac9 100644 --- a/audit/internal/platform/benchmark_report.go +++ b/audit/internal/platform/benchmark_report.go @@ -91,10 +91,24 @@ func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult, charts []benc b.WriteString("\n") } + // ── Scoring methodology ─────────────────────────────────────────────────── + b.WriteString("## Scoring Methodology\n\n") + b.WriteString("**Compute score** is derived from two phases:\n\n") + b.WriteString("- **Synthetic** — each precision type (fp8, fp16, fp32, fp64, fp4) runs alone for a dedicated window. ") + b.WriteString("Measures peak throughput with the full GPU dedicated to one kernel type. ") + b.WriteString("Each result is normalised to fp32-equivalent TOPS using precision weights: ") + b.WriteString("fp64 ×2.0 · fp32 ×1.0 · fp16 ×0.5 · fp8 ×0.25 · fp4 ×0.125.\n") + b.WriteString("- **Mixed** — all precision types run simultaneously (combined phase). ") + b.WriteString("Reflects real inference workloads where fp8 matrix ops, fp16 attention and fp32 accumulation compete for bandwidth and SM scheduler slots.\n\n") + b.WriteString("**Formula:** `Compute = Synthetic × (1 + MixedEfficiency × 0.3)`\n\n") + b.WriteString("where `MixedEfficiency = Mixed / Synthetic`. A GPU that sustains 90 % throughput under mixed load ") + b.WriteString("receives a +27 % bonus over its synthetic score; one that drops to 60 % receives +18 %.\n\n") + b.WriteString("**Composite score** = `Compute × quality_factor` where quality factors in power sustain, thermal sustain, stability, and interconnect.\n\n") + // ── Scorecard table ─────────────────────────────────────────────────────── b.WriteString("## Scorecard\n\n") - b.WriteString("| GPU | Status | Composite | Compute | TOPS/SM/GHz | Power Sustain | Thermal Sustain | Stability | Interconnect |\n") - b.WriteString("|-----|--------|-----------|---------|-------------|---------------|-----------------|-----------|-------------|\n") + b.WriteString("| GPU | Status | Composite | Compute | Synthetic | Mixed | Mixed Eff. | TOPS/SM/GHz | Power Sustain | Thermal Sustain | Stability | Interconnect |\n") + b.WriteString("|-----|--------|-----------|---------|-----------|-------|------------|-------------|---------------|-----------------|-----------|-------------|\n") for _, gpu := range result.GPUs { name := strings.TrimSpace(gpu.Name) if name == "" { @@ -108,11 +122,26 @@ func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult, charts []benc if gpu.Scores.TOPSPerSMPerGHz > 0 { topsPerSM = fmt.Sprintf("%.3f", gpu.Scores.TOPSPerSMPerGHz) } - fmt.Fprintf(&b, "| GPU %d %s | %s | **%.2f** | %.2f | %s | %.1f | %.1f | %.1f | %s |\n", + synthetic := "-" + if gpu.Scores.SyntheticScore > 0 { + synthetic = fmt.Sprintf("%.2f", gpu.Scores.SyntheticScore) + } + mixed := "-" + if gpu.Scores.MixedScore > 0 { + mixed = fmt.Sprintf("%.2f", gpu.Scores.MixedScore) + } + mixedEff := "-" + if gpu.Scores.MixedEfficiency > 0 { + mixedEff = fmt.Sprintf("%.1f%%", gpu.Scores.MixedEfficiency*100) + } + fmt.Fprintf(&b, "| GPU %d %s | %s | **%.2f** | %.2f | %s | %s | %s | %s | %.1f | %.1f | %.1f | %s |\n", gpu.Index, name, gpu.Status, gpu.Scores.CompositeScore, gpu.Scores.ComputeScore, + synthetic, + mixed, + mixedEff, topsPerSM, gpu.Scores.PowerSustainScore, gpu.Scores.ThermalSustainScore, @@ -162,6 +191,35 @@ func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult, charts []benc fmt.Fprintf(&b, "| GPU utilisation | %.1f %% | — |\n", gpu.Steady.AvgUsagePct) b.WriteString("\n") + // Per-precision stability phases. + if len(gpu.PrecisionSteady) > 0 { + b.WriteString("**Per-precision stability:**\n\n") + b.WriteString("| Precision | Clock CV | Power CV | Clock Drift | ECC corr | ECC uncorr |\n|-----------|----------|----------|-------------|----------|------------|\n") + for _, p := range gpu.PrecisionSteady { + eccCorr := "—" + eccUncorr := "—" + if !p.ECC.IsZero() { + eccCorr = fmt.Sprintf("%d", p.ECC.Corrected) + eccUncorr = fmt.Sprintf("%d", p.ECC.Uncorrected) + } + fmt.Fprintf(&b, "| %s | %.1f%% | %.1f%% | %.1f%% | %s | %s |\n", + p.Precision, p.Steady.ClockCVPct, p.Steady.PowerCVPct, p.Steady.ClockDriftPct, + eccCorr, eccUncorr) + } + b.WriteString("\n") + } else { + // Legacy: show combined-window variance. + fmt.Fprintf(&b, "**Clock/power variance (combined window):** clock CV %.1f%% · power CV %.1f%% · clock drift %.1f%%\n\n", + gpu.Steady.ClockCVPct, gpu.Steady.PowerCVPct, gpu.Steady.ClockDriftPct) + } + + + // ECC summary + if !gpu.ECC.IsZero() { + fmt.Fprintf(&b, "**ECC errors (total):** corrected=%d uncorrected=%d\n\n", + gpu.ECC.Corrected, gpu.ECC.Uncorrected) + } + // Throttle throttle := formatThrottleLine(gpu.Throttle, gpu.Steady.DurationSec) if throttle != "none" { @@ -171,12 +229,14 @@ func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult, charts []benc // Precision results if len(gpu.PrecisionResults) > 0 { b.WriteString("**Precision results:**\n\n") - b.WriteString("| Precision | TOPS | Lanes | Iterations |\n|-----------|------|-------|------------|\n") + b.WriteString("| Precision | TOPS (raw) | Weight | TOPS (fp32-eq) | Lanes | Iterations |\n|-----------|------------|--------|----------------|-------|------------|\n") for _, p := range gpu.PrecisionResults { if p.Supported { - fmt.Fprintf(&b, "| %s | %.2f | %d | %d |\n", p.Name, p.TeraOpsPerSec, p.Lanes, p.Iterations) + weightStr := fmt.Sprintf("×%.3g", p.Weight) + fmt.Fprintf(&b, "| %s | %.2f | %s | %.2f | %d | %d |\n", + p.Name, p.TeraOpsPerSec, weightStr, p.WeightedTeraOpsPerSec, p.Lanes, p.Iterations) } else { - fmt.Fprintf(&b, "| %s | — (unsupported) | — | — |\n", p.Name) + fmt.Fprintf(&b, "| %s | — (unsupported) | — | — | — | — |\n", p.Name) } } b.WriteString("\n") diff --git a/audit/internal/platform/benchmark_types.go b/audit/internal/platform/benchmark_types.go index f3ddb7a..1500ea2 100644 --- a/audit/internal/platform/benchmark_types.go +++ b/audit/internal/platform/benchmark_types.go @@ -105,11 +105,14 @@ type BenchmarkGPUResult struct { MaxMemoryClockMHz float64 `json:"max_memory_clock_mhz,omitempty"` LockedGraphicsClockMHz float64 `json:"locked_graphics_clock_mhz,omitempty"` LockedMemoryClockMHz float64 `json:"locked_memory_clock_mhz,omitempty"` - Baseline BenchmarkTelemetrySummary `json:"baseline"` - Steady BenchmarkTelemetrySummary `json:"steady"` - Cooldown BenchmarkTelemetrySummary `json:"cooldown"` - Throttle BenchmarkThrottleCounters `json:"throttle_counters"` - PrecisionResults []BenchmarkPrecisionResult `json:"precision_results,omitempty"` + Baseline BenchmarkTelemetrySummary `json:"baseline"` + Steady BenchmarkTelemetrySummary `json:"steady"` + PrecisionSteady []BenchmarkPrecisionSteadyPhase `json:"precision_steady,omitempty"` + Cooldown BenchmarkTelemetrySummary `json:"cooldown"` + Throttle BenchmarkThrottleCounters `json:"throttle_counters"` + // ECC error delta accumulated over the full benchmark (all phases combined). + ECC BenchmarkECCCounters `json:"ecc,omitempty"` + PrecisionResults []BenchmarkPrecisionResult `json:"precision_results,omitempty"` Scores BenchmarkScorecard `json:"scores"` DegradationReasons []string `json:"degradation_reasons,omitempty"` Notes []string `json:"notes,omitempty"` @@ -142,6 +145,18 @@ type BenchmarkThrottleCounters struct { HWPowerBrakeSlowdownUS uint64 `json:"hw_power_brake_slowdown_us"` } +// BenchmarkECCCounters holds ECC error counts sampled at a point in time. +// Corrected = single-bit errors fixed by ECC (DRAM degradation). +// Uncorrected = double-bit errors that could not be corrected (serious fault). +// Both are volatile (since last driver reset), not persistent. +type BenchmarkECCCounters struct { + Corrected uint64 `json:"corrected"` + Uncorrected uint64 `json:"uncorrected"` +} + +func (e BenchmarkECCCounters) Total() uint64 { return e.Corrected + e.Uncorrected } +func (e BenchmarkECCCounters) IsZero() bool { return e.Corrected == 0 && e.Uncorrected == 0 } + type BenchmarkPrecisionResult struct { Name string `json:"name"` Category string `json:"category"` @@ -152,19 +167,31 @@ type BenchmarkPrecisionResult struct { K uint64 `json:"k,omitempty"` Iterations uint64 `json:"iterations,omitempty"` TeraOpsPerSec float64 `json:"teraops_per_sec,omitempty"` + // Weight is the fp32-equivalence factor for this precision category. + // fp32 = 1.0 (baseline), fp64 = 2.0, fp16 = 0.5, fp8 = 0.25, fp4 = 0.125. + // WeightedTOPS = TeraOpsPerSec * Weight gives fp32-equivalent throughput. + Weight float64 `json:"weight,omitempty"` + WeightedTeraOpsPerSec float64 `json:"weighted_teraops_per_sec,omitempty"` Notes string `json:"notes,omitempty"` } type BenchmarkScorecard struct { ComputeScore float64 `json:"compute_score"` + // SyntheticScore is the sum of fp32-equivalent TOPS from per-precision + // steady phases (each precision ran alone, full GPU dedicated). + SyntheticScore float64 `json:"synthetic_score,omitempty"` + // MixedScore is the sum of fp32-equivalent TOPS from the combined phase + // (all precisions competing simultaneously — closer to real workloads). + MixedScore float64 `json:"mixed_score,omitempty"` + // MixedEfficiency = MixedScore / SyntheticScore. Measures how well the GPU + // sustains throughput under concurrent mixed-precision load. + MixedEfficiency float64 `json:"mixed_efficiency,omitempty"` PowerSustainScore float64 `json:"power_sustain_score"` ThermalSustainScore float64 `json:"thermal_sustain_score"` StabilityScore float64 `json:"stability_score"` InterconnectScore float64 `json:"interconnect_score"` CompositeScore float64 `json:"composite_score"` // TOPSPerSMPerGHz is compute efficiency independent of clock speed and SM count. - // Comparable across throttle levels and GPU generations. Low value at normal - // clocks indicates silicon degradation. TOPSPerSMPerGHz float64 `json:"tops_per_sm_per_ghz,omitempty"` } @@ -182,6 +209,20 @@ type BenchmarkServerPower struct { Notes []string `json:"notes,omitempty"` } +// BenchmarkPrecisionSteadyPhase holds per-precision-category telemetry collected +// during a dedicated single-precision steady window. Because only one kernel +// type runs at a time the PowerCVPct here is a genuine stability signal. +type BenchmarkPrecisionSteadyPhase struct { + Precision string `json:"precision"` // e.g. "fp8", "fp16", "fp32" + Steady BenchmarkTelemetrySummary `json:"steady"` + TeraOpsPerSec float64 `json:"teraops_per_sec,omitempty"` + WeightedTeraOpsPerSec float64 `json:"weighted_teraops_per_sec,omitempty"` + // ECC errors accumulated during this precision phase only. + // Non-zero corrected = stress-induced DRAM errors for this kernel type. + // Any uncorrected = serious fault triggered by this precision workload. + ECC BenchmarkECCCounters `json:"ecc,omitempty"` +} + type BenchmarkInterconnectResult struct { Status string `json:"status"` Attempted bool `json:"attempted"` diff --git a/audit/internal/webui/api.go b/audit/internal/webui/api.go index 61c1b8c..8497294 100644 --- a/audit/internal/webui/api.go +++ b/audit/internal/webui/api.go @@ -497,6 +497,7 @@ func (h *handler) handleAPISATRun(target string) http.HandlerFunc { GPUIndices []int `json:"gpu_indices"` ExcludeGPUIndices []int `json:"exclude_gpu_indices"` StaggerGPUStart bool `json:"stagger_gpu_start"` + ParallelGPUs bool `json:"parallel_gpus"` Loader string `json:"loader"` Profile string `json:"profile"` DisplayName string `json:"display_name"` @@ -519,6 +520,7 @@ func (h *handler) handleAPISATRun(target string) http.HandlerFunc { GPUIndices: body.GPUIndices, ExcludeGPUIndices: body.ExcludeGPUIndices, StaggerGPUStart: body.StaggerGPUStart, + ParallelGPUs: body.ParallelGPUs, Loader: body.Loader, BurnProfile: body.Profile, DisplayName: body.DisplayName, diff --git a/audit/internal/webui/pages.go b/audit/internal/webui/pages.go index ddd437b..6181fb0 100644 --- a/audit/internal/webui/pages.go +++ b/audit/internal/webui/pages.go @@ -1928,23 +1928,10 @@ func renderSATCard(id, label, runAction, headerActions, body string) string { // ── Benchmark ───────────────────────────────────────────────────────────────── -type benchmarkHistoryColumn struct { - key string - label string - name string - index int - parallel bool -} - -type benchmarkHistoryCell struct { - score float64 - present bool -} - type benchmarkHistoryRun struct { generatedAt time.Time displayTime string - cells map[string]benchmarkHistoryCell + gpuScores map[int]float64 // GPU index → composite score } func renderBenchmark(opts HandlerOptions) string { @@ -2206,17 +2193,17 @@ benchmarkLoadGPUs(); } func renderBenchmarkResultsCard(exportDir string) string { - columns, runs := loadBenchmarkHistory(exportDir) + maxIdx, runs := loadBenchmarkHistory(exportDir) return renderBenchmarkResultsCardFromRuns( "Benchmark Results", "Composite score by saved benchmark run and GPU.", "No saved benchmark runs yet.", - columns, + maxIdx, runs, ) } -func renderBenchmarkResultsCardFromRuns(title, description, emptyMessage string, columns []benchmarkHistoryColumn, runs []benchmarkHistoryRun) string { +func renderBenchmarkResultsCardFromRuns(title, description, emptyMessage string, maxGPUIndex int, runs []benchmarkHistoryRun) string { if len(runs) == 0 { return `
` + html.EscapeString(title) + `

` + html.EscapeString(emptyMessage) + `

` } @@ -2226,22 +2213,22 @@ func renderBenchmarkResultsCardFromRuns(title, description, emptyMessage string, b.WriteString(`

` + html.EscapeString(description) + `

`) } b.WriteString(`
`) - b.WriteString(``) - for _, col := range columns { - b.WriteString(``) + b.WriteString(`
TestTime` + html.EscapeString(col.label) + `
`) + for i := 0; i <= maxGPUIndex; i++ { + b.WriteString(``) } b.WriteString(``) for i, run := range runs { b.WriteString(``) b.WriteString(``) b.WriteString(``) - for _, col := range columns { - cell, ok := run.cells[col.key] - if !ok || !cell.present { + for idx := 0; idx <= maxGPUIndex; idx++ { + score, ok := run.gpuScores[idx] + if !ok { b.WriteString(``) continue } - b.WriteString(``) + b.WriteString(``) } b.WriteString(``) } @@ -2249,22 +2236,22 @@ func renderBenchmarkResultsCardFromRuns(title, description, emptyMessage string, return b.String() } -func loadBenchmarkHistory(exportDir string) ([]benchmarkHistoryColumn, []benchmarkHistoryRun) { +func loadBenchmarkHistory(exportDir string) (int, []benchmarkHistoryRun) { baseDir := app.DefaultBenchmarkBaseDir if strings.TrimSpace(exportDir) != "" { baseDir = filepath.Join(exportDir, "bee-benchmark") } paths, err := filepath.Glob(filepath.Join(baseDir, "gpu-benchmark-*", "result.json")) if err != nil || len(paths) == 0 { - return nil, nil + return -1, nil } sort.Strings(paths) return loadBenchmarkHistoryFromPaths(paths) } -func loadBenchmarkHistoryFromPaths(paths []string) ([]benchmarkHistoryColumn, []benchmarkHistoryRun) { - columnByKey := make(map[string]benchmarkHistoryColumn) +func loadBenchmarkHistoryFromPaths(paths []string) (int, []benchmarkHistoryRun) { runs := make([]benchmarkHistoryRun, 0, len(paths)) + maxGPUIndex := -1 for _, path := range paths { raw, err := os.ReadFile(path) if err != nil { @@ -2277,102 +2264,22 @@ func loadBenchmarkHistoryFromPaths(paths []string) ([]benchmarkHistoryColumn, [] run := benchmarkHistoryRun{ generatedAt: result.GeneratedAt, displayTime: result.GeneratedAt.Local().Format("2006-01-02 15:04:05"), - cells: make(map[string]benchmarkHistoryCell), + gpuScores: make(map[int]float64), } - - if result.ParallelGPUs { - // All GPUs ran simultaneously — one column per server, score = avg composite. - gpuModelCount := make(map[string]int) - for _, gpu := range result.GPUs { - gpuModelCount[strings.TrimSpace(gpu.Name)]++ - } - scoreSum := make(map[string]float64) - scoreCnt := make(map[string]int) - for _, gpu := range result.GPUs { - key := "parallel|" + strings.TrimSpace(result.ServerModel) + "|" + strings.TrimSpace(gpu.Name) - scoreSum[key] += gpu.Scores.CompositeScore - scoreCnt[key]++ - count := gpuModelCount[strings.TrimSpace(gpu.Name)] - columnByKey[key] = benchmarkHistoryColumn{ - key: key, - label: benchmarkHistoryParallelLabel(result.ServerModel, gpu.Name, count), - name: strings.TrimSpace(gpu.Name), - index: -1, - parallel: true, - } - } - for key, sum := range scoreSum { - run.cells[key] = benchmarkHistoryCell{score: sum / float64(scoreCnt[key]), present: true} - } - } else { - // Each GPU ran independently — one column per GPU index. - for _, gpu := range result.GPUs { - key := "gpu|" + strings.TrimSpace(result.ServerModel) + "|" + strings.TrimSpace(gpu.Name) + "|" + strconv.Itoa(gpu.Index) - columnByKey[key] = benchmarkHistoryColumn{ - key: key, - label: benchmarkHistoryPerGPULabel(gpu.Name, gpu.Index), - name: strings.TrimSpace(gpu.Name), - index: gpu.Index, - parallel: false, - } - run.cells[key] = benchmarkHistoryCell{score: gpu.Scores.CompositeScore, present: true} + for _, gpu := range result.GPUs { + run.gpuScores[gpu.Index] = gpu.Scores.CompositeScore + if gpu.Index > maxGPUIndex { + maxGPUIndex = gpu.Index } } runs = append(runs, run) } - - columns := make([]benchmarkHistoryColumn, 0, len(columnByKey)) - for _, col := range columnByKey { - columns = append(columns, col) - } - // Sequential GPU columns first (sorted by GPU index), then parallel server columns. - sort.Slice(columns, func(i, j int) bool { - if columns[i].parallel != columns[j].parallel { - return !columns[i].parallel // sequential first - } - if columns[i].parallel { - li := strings.ToLower(columns[i].label) - lj := strings.ToLower(columns[j].label) - if li != lj { - return li < lj - } - return columns[i].key < columns[j].key - } - // Sequential: sort by GPU index, then name. - if columns[i].index != columns[j].index { - return columns[i].index < columns[j].index - } - return strings.ToLower(columns[i].name) < strings.ToLower(columns[j].name) - }) sort.Slice(runs, func(i, j int) bool { return runs[i].generatedAt.After(runs[j].generatedAt) }) - return columns, runs + return maxGPUIndex, runs } -// benchmarkHistoryPerGPULabel formats a label for a single-GPU column: "GPU #N — ModelName". -func benchmarkHistoryPerGPULabel(gpuName string, index int) string { - gpuName = strings.TrimSpace(gpuName) - if gpuName == "" { - gpuName = "Unknown GPU" - } - return fmt.Sprintf("GPU #%d — %s", index, gpuName) -} - -// benchmarkHistoryParallelLabel formats a label for an all-GPU parallel column: -// "ServerModel — N× ModelName (All GPUs)" or "N× ModelName (All GPUs)" if no server. -func benchmarkHistoryParallelLabel(serverModel, gpuName string, count int) string { - serverModel = strings.TrimSpace(serverModel) - gpuName = strings.TrimSpace(gpuName) - if gpuName == "" { - gpuName = "Unknown GPU" - } - gpuPart := fmt.Sprintf("%d× %s (All GPUs)", count, gpuName) - if serverModel == "" { - return gpuPart - } - return fmt.Sprintf("%s — %s", serverModel, gpuPart) -} // ── Burn ────────────────────────────────────────────────────────────────────── diff --git a/audit/internal/webui/server_test.go b/audit/internal/webui/server_test.go index 1dfea7c..e868731 100644 --- a/audit/internal/webui/server_test.go +++ b/audit/internal/webui/server_test.go @@ -693,8 +693,8 @@ func TestBenchmarkPageRendersSavedResultsTable(t *testing.T) { for _, needle := range []string{ `Benchmark Results`, `Composite score by saved benchmark run and GPU.`, - `GPU #0 — NVIDIA H100 PCIe`, - `GPU #1 — NVIDIA H100 PCIe`, + `GPU 0`, + `GPU 1`, `#1`, wantTime, `1176.25`, diff --git a/audit/internal/webui/tasks_test.go b/audit/internal/webui/tasks_test.go index 1af7d82..807d079 100644 --- a/audit/internal/webui/tasks_test.go +++ b/audit/internal/webui/tasks_test.go @@ -422,7 +422,7 @@ func TestWriteTaskReportArtifactsIncludesBenchmarkResultsForTask(t *testing.T) { for _, needle := range []string{ `Benchmark Results`, `Composite score for this benchmark task.`, - `GPU #0 — NVIDIA H100 PCIe`, + `GPU 0`, `1176.25`, } { if !strings.Contains(html, needle) { diff --git a/iso/builder/bee-gpu-stress.c b/iso/builder/bee-gpu-stress.c index bc3ca9d..71d71f7 100644 --- a/iso/builder/bee-gpu-stress.c +++ b/iso/builder/bee-gpu-stress.c @@ -1121,6 +1121,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda, int cc_minor, int seconds, int size_mb, + const char *precision_filter, struct stress_report *report) { struct cublaslt_api cublas; struct prepared_profile prepared[MAX_STRESS_STREAMS * MAX_CUBLAS_PROFILES]; @@ -1159,7 +1160,8 @@ static int run_cublaslt_stress(struct cuda_api *cuda, } for (size_t i = 0; i < sizeof(k_profiles) / sizeof(k_profiles[0]); i++) { - if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc) { + if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc && + (precision_filter == NULL || strcmp(k_profiles[i].block_label, precision_filter) == 0)) { planned++; } } @@ -1218,6 +1220,13 @@ static int run_cublaslt_stress(struct cuda_api *cuda, desc->min_cc); continue; } + if (precision_filter != NULL && strcmp(desc->block_label, precision_filter) != 0) { + append_detail(report->details, + sizeof(report->details), + "%s=SKIPPED precision_filter\n", + desc->name); + continue; + } for (int lane = 0; lane < stream_count; lane++) { CUstream stream = streams[lane]; if (prepared_count >= (int)(sizeof(prepared) / sizeof(prepared[0]))) { @@ -1339,6 +1348,7 @@ int main(int argc, char **argv) { int seconds = 5; int size_mb = 64; int device_index = 0; + const char *precision_filter = NULL; /* NULL = all; else block_label to match */ for (int i = 1; i < argc; i++) { if ((strcmp(argv[i], "--seconds") == 0 || strcmp(argv[i], "-t") == 0) && i + 1 < argc) { seconds = atoi(argv[++i]); @@ -1346,8 +1356,12 @@ int main(int argc, char **argv) { size_mb = atoi(argv[++i]); } else if ((strcmp(argv[i], "--device") == 0 || strcmp(argv[i], "-d") == 0) && i + 1 < argc) { device_index = atoi(argv[++i]); + } else if (strcmp(argv[i], "--precision") == 0 && i + 1 < argc) { + precision_filter = argv[++i]; } else { - fprintf(stderr, "usage: %s [--seconds N] [--size-mb N] [--device N]\n", argv[0]); + fprintf(stderr, + "usage: %s [--seconds N] [--size-mb N] [--device N] [--precision fp8|fp16|fp32|fp64|fp4]\n", + argv[0]); return 2; } } @@ -1407,7 +1421,7 @@ int main(int argc, char **argv) { int ok = 0; #if HAVE_CUBLASLT_HEADERS - ok = run_cublaslt_stress(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, &report); + ok = run_cublaslt_stress(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, precision_filter, &report); #endif if (!ok) { if (!run_ptx_fallback(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, &report)) { diff --git a/iso/overlay/usr/local/bin/bee-gpu-burn b/iso/overlay/usr/local/bin/bee-gpu-burn index a41be50..d736022 100755 --- a/iso/overlay/usr/local/bin/bee-gpu-burn +++ b/iso/overlay/usr/local/bin/bee-gpu-burn @@ -6,10 +6,11 @@ STAGGER_SECONDS=0 SIZE_MB=0 DEVICES="" EXCLUDE="" +PRECISION="" WORKER="/usr/local/lib/bee/bee-gpu-burn-worker" usage() { - echo "usage: $0 [--seconds N] [--stagger-seconds N] [--size-mb N] [--devices 0,1] [--exclude 2,3]" >&2 + echo "usage: $0 [--seconds N] [--stagger-seconds N] [--size-mb N] [--devices 0,1] [--exclude 2,3] [--precision fp8|fp16|fp32|fp64|fp4]" >&2 exit 2 } @@ -30,6 +31,7 @@ while [ "$#" -gt 0 ]; do --size-mb|-m) [ "$#" -ge 2 ] || usage; SIZE_MB="$2"; shift 2 ;; --devices) [ "$#" -ge 2 ] || usage; DEVICES="$2"; shift 2 ;; --exclude) [ "$#" -ge 2 ] || usage; EXCLUDE="$2"; shift 2 ;; + --precision) [ "$#" -ge 2 ] || usage; PRECISION="$2"; shift 2 ;; *) usage ;; esac done @@ -88,8 +90,10 @@ for id in $(echo "${FINAL}" | tr ',' ' '); do extra_sec=$(( STAGGER_SECONDS * (GPU_COUNT - gpu_pos) )) gpu_seconds=$(( SECONDS + extra_sec )) echo "starting gpu ${id} size=${gpu_size_mb}MB seconds=${gpu_seconds}" + precision_arg="" + [ -n "${PRECISION}" ] && precision_arg="--precision ${PRECISION}" CUDA_VISIBLE_DEVICES="${id}" \ - "${WORKER}" --device 0 --seconds "${gpu_seconds}" --size-mb "${gpu_size_mb}" >"${log}" 2>&1 & + "${WORKER}" --device 0 --seconds "${gpu_seconds}" --size-mb "${gpu_size_mb}" ${precision_arg} >"${log}" 2>&1 & pid=$! WORKERS="${WORKERS} ${pid}:${id}:${log}" if [ "${STAGGER_SECONDS}" -gt 0 ] && [ "${gpu_pos}" -lt "${GPU_COUNT}" ]; then
RunTimeGPU ` + strconv.Itoa(i) + `
#` + strconv.Itoa(i+1) + `` + html.EscapeString(run.displayTime) + `-` + fmt.Sprintf("%.2f", cell.score) + `` + fmt.Sprintf("%.2f", score) + `