Compare commits

..

6 Commits
v7.17 ... v7.21

Author SHA1 Message Date
Mikhail Chusavitin
b1a5035edd Normalize task queue priorities by workflow 2026-04-14 11:13:54 +03:00
Mikhail Chusavitin
8fc986c933 Add benchmark fan duty cycle summary to report 2026-04-14 10:24:02 +03:00
Mikhail Chusavitin
88b5e0edf2 Harden IPMI power probe timeout 2026-04-14 10:18:23 +03:00
Mikhail Chusavitin
82fe1f6d26 Disable precision fallback and pin cuBLAS 13.1 2026-04-14 10:17:44 +03:00
81e7c921f8 дебаг при сборке 2026-04-14 07:02:37 +03:00
0fb8f2777f Fix combined gpu burn profile capacity for fp4 2026-04-14 00:00:40 +03:00
13 changed files with 399 additions and 34 deletions

1
.gitignore vendored
View File

@@ -2,3 +2,4 @@
.DS_Store
dist/
iso/out/
build-cache/

View File

@@ -401,6 +401,7 @@ func (s *System) RunNvidiaBenchmark(ctx context.Context, baseDir string, opts Nv
serverLoadedW = serverLoadedWSum / float64(serverLoadedSamples)
}
result.ServerPower = characterizeServerPower(serverIdleW, serverLoadedW, gpuReportedSumW, serverIdleOK && serverLoadedOK)
result.Cooling = summarizeBenchmarkCooling(metricRows)
// Apply server-power penalty when IPMI reports the server delta is much
// lower than GPU-reported sum: GPU power telemetry is over-stated, making
@@ -739,7 +740,7 @@ func collectBenchmarkSamples(ctx context.Context, durationSec int, gpuIndices []
if ctx.Err() != nil {
return rows, ctx.Err()
}
samples, err := sampleGPUMetrics(gpuIndices)
samples, err := sampleBenchmarkTelemetry(gpuIndices)
if err == nil {
elapsed := time.Since(start).Seconds()
for i := range samples {
@@ -774,7 +775,7 @@ func runBenchmarkCommandWithMetrics(ctx context.Context, verboseLog, name string
case <-stopCh:
return
case <-ticker.C:
samples, err := sampleGPUMetrics(gpuIndices)
samples, err := sampleBenchmarkTelemetry(gpuIndices)
if err != nil {
continue
}
@@ -794,6 +795,37 @@ func runBenchmarkCommandWithMetrics(ctx context.Context, verboseLog, name string
return out, metricRows, err
}
type benchmarkCoolingSample struct {
AvgFanRPM float64
AvgFanDutyCyclePct float64
FanDutyCycleAvailable bool
}
func sampleBenchmarkTelemetry(gpuIndices []int) ([]GPUMetricRow, error) {
samples, err := sampleGPUMetrics(gpuIndices)
if err != nil {
return nil, err
}
fanSample := sampleBenchmarkCoolingSample()
for i := range samples {
samples[i].FanAvgRPM = fanSample.AvgFanRPM
samples[i].FanDutyCyclePct = fanSample.AvgFanDutyCyclePct
samples[i].FanDutyCycleAvailable = fanSample.FanDutyCycleAvailable
}
return samples, nil
}
func sampleBenchmarkCoolingSample() benchmarkCoolingSample {
fans, _ := sampleFanSpeeds()
avgRPM, _, _ := fanRPMStats(fans)
dutyPct, dutyAvailable := sampleFanDutyCyclePct()
return benchmarkCoolingSample{
AvgFanRPM: avgRPM,
AvgFanDutyCyclePct: dutyPct,
FanDutyCycleAvailable: dutyAvailable,
}
}
func annotateBenchmarkMetricRows(rows []GPUMetricRow, stage string, offset float64) []GPUMetricRow {
if len(rows) == 0 {
return nil
@@ -1022,6 +1054,37 @@ func summarizeBenchmarkTelemetry(rows []GPUMetricRow) BenchmarkTelemetrySummary
return summary
}
func summarizeBenchmarkCooling(rows []GPUMetricRow) *BenchmarkCoolingSummary {
if len(rows) == 0 {
return nil
}
var rpmValues []float64
var dutyValues []float64
for _, row := range rows {
if row.FanAvgRPM > 0 {
rpmValues = append(rpmValues, row.FanAvgRPM)
}
if row.FanDutyCycleAvailable {
dutyValues = append(dutyValues, row.FanDutyCyclePct)
}
}
if len(rpmValues) == 0 && len(dutyValues) == 0 {
return nil
}
summary := &BenchmarkCoolingSummary{
Available: true,
AvgFanRPM: benchmarkMean(rpmValues),
}
if len(dutyValues) > 0 {
summary.FanDutyCycleAvailable = true
summary.AvgFanDutyCyclePct = benchmarkMean(dutyValues)
summary.P95FanDutyCyclePct = benchmarkPercentile(dutyValues, 95)
} else {
summary.Notes = append(summary.Notes, "fan duty cycle unavailable on this host; RPM-only fan telemetry was collected")
}
return summary
}
func scoreBenchmarkGPUResult(gpu BenchmarkGPUResult) BenchmarkScorecard {
score := BenchmarkScorecard{}
@@ -1601,7 +1664,10 @@ func maxInt(a, b int) int {
// queryIPMIServerPowerW reads the current server power draw via ipmitool dcmi.
// Returns 0 and an error if IPMI is unavailable or the output cannot be parsed.
func queryIPMIServerPowerW() (float64, error) {
out, err := satExecCommand("ipmitool", "dcmi", "power", "reading").Output()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "ipmitool", "dcmi", "power", "reading")
out, err := cmd.Output()
if err != nil {
return 0, fmt.Errorf("ipmitool dcmi power reading: %w", err)
}
@@ -1620,6 +1686,7 @@ func sampleIPMIPowerSeries(ctx context.Context, durationSec int) (meanW float64,
}
deadline := time.Now().Add(time.Duration(durationSec) * time.Second)
var samples []float64
loop:
for {
if w, err := queryIPMIServerPowerW(); err == nil {
samples = append(samples, w)
@@ -1629,7 +1696,7 @@ func sampleIPMIPowerSeries(ctx context.Context, durationSec int) (meanW float64,
}
select {
case <-ctx.Done():
break
break loop
case <-time.After(2 * time.Second):
}
}

View File

@@ -290,6 +290,31 @@ func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult) string {
}
}
// ── Cooling ───────────────────────────────────────────────────────────────
if cooling := result.Cooling; cooling != nil {
b.WriteString("## Cooling\n\n")
if cooling.Available {
b.WriteString("| Metric | Value |\n|--------|-------|\n")
fmt.Fprintf(&b, "| Average fan speed | %.0f RPM |\n", cooling.AvgFanRPM)
if cooling.FanDutyCycleAvailable {
fmt.Fprintf(&b, "| Average fan duty cycle | %.1f%% |\n", cooling.AvgFanDutyCyclePct)
fmt.Fprintf(&b, "| P95 fan duty cycle | %.1f%% |\n", cooling.P95FanDutyCyclePct)
} else {
b.WriteString("| Average fan duty cycle | N/A |\n")
b.WriteString("| P95 fan duty cycle | N/A |\n")
}
b.WriteString("\n")
} else {
b.WriteString("Cooling telemetry unavailable.\n\n")
}
for _, note := range cooling.Notes {
fmt.Fprintf(&b, "- %s\n", note)
}
if len(cooling.Notes) > 0 {
b.WriteString("\n")
}
}
// ── Raw files ─────────────────────────────────────────────────────────────
b.WriteString("## Raw Files\n\n")
b.WriteString("- `result.json`\n- `report.md`\n- `summary.txt`\n- `verbose.log`\n")

View File

@@ -131,6 +131,13 @@ func TestRenderBenchmarkReportIncludesFindingsAndScores(t *testing.T) {
DegradationReasons: []string{"power_capped"},
},
},
Cooling: &BenchmarkCoolingSummary{
Available: true,
AvgFanRPM: 9200,
FanDutyCycleAvailable: true,
AvgFanDutyCyclePct: 47.5,
P95FanDutyCyclePct: 62.0,
},
}
report := renderBenchmarkReport(result)
@@ -140,6 +147,9 @@ func TestRenderBenchmarkReportIncludesFindingsAndScores(t *testing.T) {
"1176.00",
"fp16_tensor",
"700.00",
"Cooling",
"Average fan duty cycle",
"47.5%",
} {
if !strings.Contains(report, needle) {
t.Fatalf("report missing %q\n%s", needle, report)

View File

@@ -25,6 +25,17 @@ type BenchmarkCPULoad struct {
Note string `json:"note,omitempty"`
}
// BenchmarkCoolingSummary captures fan telemetry averaged across the full
// benchmark run.
type BenchmarkCoolingSummary struct {
Available bool `json:"available"`
AvgFanRPM float64 `json:"avg_fan_rpm,omitempty"`
FanDutyCycleAvailable bool `json:"fan_duty_cycle_available,omitempty"`
AvgFanDutyCyclePct float64 `json:"avg_fan_duty_cycle_pct,omitempty"`
P95FanDutyCyclePct float64 `json:"p95_fan_duty_cycle_pct,omitempty"`
Notes []string `json:"notes,omitempty"`
}
const (
NvidiaBenchmarkProfileStandard = "standard"
NvidiaBenchmarkProfileStability = "stability"
@@ -61,6 +72,7 @@ type NvidiaBenchmarkResult struct {
Normalization BenchmarkNormalization `json:"normalization"`
HostConfig *BenchmarkHostConfig `json:"host_config,omitempty"`
CPULoad *BenchmarkCPULoad `json:"cpu_load,omitempty"`
Cooling *BenchmarkCoolingSummary `json:"cooling,omitempty"`
GPUs []BenchmarkGPUResult `json:"gpus"`
Interconnect *BenchmarkInterconnectResult `json:"interconnect,omitempty"`
ServerPower *BenchmarkServerPower `json:"server_power,omitempty"`

View File

@@ -13,15 +13,18 @@ import (
// GPUMetricRow is one telemetry sample from nvidia-smi during a stress test.
type GPUMetricRow struct {
Stage string `json:"stage,omitempty"`
ElapsedSec float64 `json:"elapsed_sec"`
GPUIndex int `json:"index"`
TempC float64 `json:"temp_c"`
UsagePct float64 `json:"usage_pct"`
MemUsagePct float64 `json:"mem_usage_pct"`
PowerW float64 `json:"power_w"`
ClockMHz float64 `json:"clock_mhz"`
MemClockMHz float64 `json:"mem_clock_mhz"`
Stage string `json:"stage,omitempty"`
ElapsedSec float64 `json:"elapsed_sec"`
GPUIndex int `json:"index"`
TempC float64 `json:"temp_c"`
UsagePct float64 `json:"usage_pct"`
MemUsagePct float64 `json:"mem_usage_pct"`
PowerW float64 `json:"power_w"`
ClockMHz float64 `json:"clock_mhz"`
MemClockMHz float64 `json:"mem_clock_mhz"`
FanAvgRPM float64 `json:"fan_avg_rpm,omitempty"`
FanDutyCyclePct float64 `json:"fan_duty_cycle_pct,omitempty"`
FanDutyCycleAvailable bool `json:"fan_duty_cycle_available,omitempty"`
}
// sampleGPUMetrics runs nvidia-smi once and returns current metrics for each GPU.
@@ -142,10 +145,14 @@ func sampleAMDGPUMetrics() ([]GPUMetricRow, error) {
// WriteGPUMetricsCSV writes collected rows as a CSV file.
func WriteGPUMetricsCSV(path string, rows []GPUMetricRow) error {
var b bytes.Buffer
b.WriteString("stage,elapsed_sec,gpu_index,temperature_c,usage_pct,mem_usage_pct,power_w,clock_mhz,mem_clock_mhz\n")
b.WriteString("stage,elapsed_sec,gpu_index,temperature_c,usage_pct,mem_usage_pct,power_w,clock_mhz,mem_clock_mhz,fan_avg_rpm,fan_duty_cycle_pct,fan_duty_cycle_available\n")
for _, r := range rows {
fmt.Fprintf(&b, "%s,%.1f,%d,%.1f,%.1f,%.1f,%.1f,%.0f,%.0f\n",
strconv.Quote(strings.TrimSpace(r.Stage)), r.ElapsedSec, r.GPUIndex, r.TempC, r.UsagePct, r.MemUsagePct, r.PowerW, r.ClockMHz, r.MemClockMHz)
dutyAvail := 0
if r.FanDutyCycleAvailable {
dutyAvail = 1
}
fmt.Fprintf(&b, "%s,%.1f,%d,%.1f,%.1f,%.1f,%.1f,%.0f,%.0f,%.0f,%.1f,%d\n",
strconv.Quote(strings.TrimSpace(r.Stage)), r.ElapsedSec, r.GPUIndex, r.TempC, r.UsagePct, r.MemUsagePct, r.PowerW, r.ClockMHz, r.MemClockMHz, r.FanAvgRPM, r.FanDutyCyclePct, dutyAvail)
}
return os.WriteFile(path, b.Bytes(), 0644)
}

View File

@@ -426,6 +426,101 @@ func sampleFanSpeedsViaSensorsJSON() ([]FanReading, error) {
return fans, nil
}
// sampleFanDutyCyclePct reads fan PWM/duty-cycle controls from lm-sensors.
// Returns the average duty cycle across all exposed PWM controls.
func sampleFanDutyCyclePct() (float64, bool) {
out, err := exec.Command("sensors", "-j").Output()
if err != nil || len(out) == 0 {
return 0, false
}
return parseFanDutyCyclePctSensorsJSON(out)
}
func parseFanDutyCyclePctSensorsJSON(raw []byte) (float64, bool) {
var doc map[string]map[string]any
if err := json.Unmarshal(raw, &doc); err != nil {
return 0, false
}
var samples []float64
for _, features := range doc {
for name, feature := range features {
if strings.EqualFold(name, "Adapter") {
continue
}
featureMap, ok := feature.(map[string]any)
if !ok {
continue
}
if duty, ok := firstFanDutyValue(name, featureMap); ok {
samples = append(samples, duty)
}
}
}
if len(samples) == 0 {
return 0, false
}
return benchmarkMean(samples), true
}
func firstFanDutyValue(featureName string, feature map[string]any) (float64, bool) {
featureName = strings.ToLower(strings.TrimSpace(featureName))
if strings.Contains(featureName, "enable") || strings.Contains(featureName, "mode") || strings.Contains(featureName, "alarm") {
return 0, false
}
if strings.Contains(featureName, "pwm") {
for _, key := range []string{"input", "value", "current"} {
if value, ok := feature[key]; ok {
if duty, parsed := parseFanDutyValue(value); parsed {
return duty, true
}
}
}
}
keys := make([]string, 0, len(feature))
for key := range feature {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
lower := strings.ToLower(key)
if !strings.Contains(lower, "pwm") {
continue
}
if strings.Contains(lower, "enable") || strings.Contains(lower, "mode") || strings.Contains(lower, "alarm") {
continue
}
if duty, parsed := parseFanDutyValue(feature[key]); parsed {
return duty, true
}
}
return 0, false
}
func parseFanDutyValue(value any) (float64, bool) {
switch v := value.(type) {
case float64:
return normalizePWMAsDutyPct(v)
case string:
if f, err := strconv.ParseFloat(strings.TrimSpace(v), 64); err == nil {
return normalizePWMAsDutyPct(f)
}
}
return 0, false
}
func normalizePWMAsDutyPct(raw float64) (float64, bool) {
if raw < 0 {
return 0, false
}
if raw <= 100 {
return raw, true
}
if raw <= 255 {
return raw / 255.0 * 100.0, true
}
return 0, false
}
func firstFanInputValue(feature map[string]any) (float64, bool) {
keys := make([]string, 0, len(feature))
for key := range feature {

View File

@@ -29,6 +29,27 @@ func TestFirstFanInputValue(t *testing.T) {
}
}
func TestParseFanDutyCyclePctSensorsJSON(t *testing.T) {
raw := []byte(`{
"chip0": {
"fan1": {"input": 9000},
"pwm1": {"input": 128},
"pwm1_enable": {"input": 1}
},
"chip1": {
"pwm2": {"input": 64}
}
}`)
got, ok := parseFanDutyCyclePctSensorsJSON(raw)
if !ok {
t.Fatalf("expected duty cycle telemetry to be parsed")
}
if got < 57 || got > 58 {
t.Fatalf("got=%v want ~57.1", got)
}
}
func TestParseDCMIPowerReading(t *testing.T) {
raw := `
Instantaneous power reading: 512 Watts

View File

@@ -36,6 +36,16 @@ var apiListNvidiaGPUStatuses = func(a *app.App) ([]platform.NvidiaGPUStatus, err
return a.ListNvidiaGPUStatuses()
}
const (
taskPriorityBenchmark = 10
taskPriorityBurn = 20
taskPriorityValidateStress = 30
taskPriorityValidate = 40
taskPriorityAudit = 50
taskPriorityInstallToRAM = 60
taskPriorityInstall = 70
)
// ── Job ID counter ────────────────────────────────────────────────────────────
var jobCounter atomic.Uint64
@@ -109,6 +119,30 @@ func shouldSplitHomogeneousNvidiaTarget(target string) bool {
}
}
func defaultTaskPriority(target string, params taskParams) int {
switch strings.TrimSpace(target) {
case "install":
return taskPriorityInstall
case "install-to-ram":
return taskPriorityInstallToRAM
case "audit":
return taskPriorityAudit
case "nvidia-benchmark":
return taskPriorityBenchmark
case "nvidia-stress", "amd-stress", "memory-stress", "sat-stress", "platform-stress", "nvidia-compute":
return taskPriorityBurn
case "nvidia", "nvidia-targeted-stress", "nvidia-targeted-power", "nvidia-pulse",
"nvidia-interconnect", "nvidia-bandwidth", "memory", "storage", "cpu",
"amd", "amd-mem", "amd-bandwidth":
if params.StressMode {
return taskPriorityValidateStress
}
return taskPriorityValidate
default:
return 0
}
}
func expandHomogeneousNvidiaSelections(gpus []platform.NvidiaGPU, include, exclude []int) ([]nvidiaTaskSelection, error) {
if len(gpus) == 0 {
return nil, fmt.Errorf("no NVIDIA GPUs detected")
@@ -458,6 +492,7 @@ func (h *handler) handleAPIAuditRun(w http.ResponseWriter, _ *http.Request) {
ID: newJobID("audit"),
Name: "Audit",
Target: "audit",
Priority: defaultTaskPriority("audit", taskParams{}),
Status: TaskPending,
CreatedAt: time.Now(),
}
@@ -526,7 +561,7 @@ func (h *handler) handleAPISATRun(target string) http.HandlerFunc {
DisplayName: body.DisplayName,
PlatformComponents: body.PlatformComponents,
}
tasks, err := buildNvidiaTaskSet(target, 0, time.Now(), params, name, h.opts.App, "sat-"+target)
tasks, err := buildNvidiaTaskSet(target, defaultTaskPriority(target, params), time.Now(), params, name, h.opts.App, "sat-"+target)
if err != nil {
writeError(w, http.StatusBadRequest, err.Error())
return
@@ -613,7 +648,7 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
ID: newJobID("benchmark-nvidia"),
Name: stepName,
Target: "nvidia-benchmark",
Priority: 15,
Priority: defaultTaskPriority("nvidia-benchmark", taskParams{}),
Status: TaskPending,
CreatedAt: now,
params: taskParams{
@@ -645,7 +680,7 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
name = fmt.Sprintf("%s · sequential", name)
}
tasks, err := buildNvidiaTaskSet("nvidia-benchmark", 15, time.Now(), taskParams{
params := taskParams{
GPUIndices: body.GPUIndices,
ExcludeGPUIndices: body.ExcludeGPUIndices,
SizeMB: body.SizeMB,
@@ -653,7 +688,8 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
RunNCCL: runNCCL,
ParallelGPUs: parallelGPUs,
DisplayName: body.DisplayName,
}, name, h.opts.App, "benchmark-nvidia")
}
tasks, err := buildNvidiaTaskSet("nvidia-benchmark", defaultTaskPriority("nvidia-benchmark", params), time.Now(), params, name, h.opts.App, "benchmark-nvidia")
if err != nil {
writeError(w, http.StatusBadRequest, err.Error())
return
@@ -1054,7 +1090,7 @@ func (h *handler) handleAPIInstallToRAM(w http.ResponseWriter, r *http.Request)
ID: newJobID("install-to-ram"),
Name: "Install to RAM",
Target: "install-to-ram",
Priority: 10,
Priority: defaultTaskPriority("install-to-ram", taskParams{}),
Status: TaskPending,
CreatedAt: time.Now(),
}
@@ -1169,7 +1205,7 @@ func (h *handler) handleAPIInstallRun(w http.ResponseWriter, r *http.Request) {
ID: newJobID("install"),
Name: "Install to Disk",
Target: "install",
Priority: 20,
Priority: defaultTaskPriority("install", taskParams{}),
Status: TaskPending,
CreatedAt: time.Now(),
params: taskParams{
@@ -1461,4 +1497,3 @@ func (h *handler) rollbackPendingNetworkChange() error {
}
return nil
}

View File

@@ -39,6 +39,9 @@ func TestHandleAPISATRunDecodesBodyWithoutContentLength(t *testing.T) {
if got := globalQueue.tasks[0].params.BurnProfile; got != "smoke" {
t.Fatalf("burn profile=%q want smoke", got)
}
if got := globalQueue.tasks[0].Priority; got != taskPriorityValidate {
t.Fatalf("priority=%d want %d", got, taskPriorityValidate)
}
}
func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
@@ -84,6 +87,9 @@ func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
if task.params.RunNCCL {
t.Fatal("RunNCCL should reflect explicit false from request")
}
if task.Priority != taskPriorityBenchmark {
t.Fatalf("priority=%d want %d", task.Priority, taskPriorityBenchmark)
}
}
func TestHandleAPIBenchmarkNvidiaRunSplitsMixedGPUModels(t *testing.T) {
@@ -133,6 +139,12 @@ func TestHandleAPIBenchmarkNvidiaRunSplitsMixedGPUModels(t *testing.T) {
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
t.Fatalf("task[1] gpu indices=%v want [2]", got)
}
if got := globalQueue.tasks[0].Priority; got != taskPriorityBenchmark {
t.Fatalf("task[0] priority=%d want %d", got, taskPriorityBenchmark)
}
if got := globalQueue.tasks[1].Priority; got != taskPriorityBenchmark {
t.Fatalf("task[1] priority=%d want %d", got, taskPriorityBenchmark)
}
}
func TestHandleAPISATRunSplitsMixedNvidiaTaskSet(t *testing.T) {
@@ -175,6 +187,39 @@ func TestHandleAPISATRunSplitsMixedNvidiaTaskSet(t *testing.T) {
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
t.Fatalf("task[1] gpu indices=%v want [2]", got)
}
if got := globalQueue.tasks[0].Priority; got != taskPriorityValidate {
t.Fatalf("task[0] priority=%d want %d", got, taskPriorityValidate)
}
if got := globalQueue.tasks[1].Priority; got != taskPriorityValidate {
t.Fatalf("task[1] priority=%d want %d", got, taskPriorityValidate)
}
}
func TestDefaultTaskPriorityOrder(t *testing.T) {
got := []int{
defaultTaskPriority("install-to-ram", taskParams{}),
defaultTaskPriority("audit", taskParams{}),
defaultTaskPriority("cpu", taskParams{}),
defaultTaskPriority("cpu", taskParams{StressMode: true}),
defaultTaskPriority("nvidia-stress", taskParams{}),
defaultTaskPriority("nvidia-benchmark", taskParams{}),
}
want := []int{
taskPriorityInstallToRAM,
taskPriorityAudit,
taskPriorityValidate,
taskPriorityValidateStress,
taskPriorityBurn,
taskPriorityBenchmark,
}
for i := range want {
if got[i] != want[i] {
t.Fatalf("priority[%d]=%d want %d", i, got[i], want[i])
}
}
if !(got[0] > got[1] && got[1] > got[2] && got[2] > got[3] && got[3] > got[4] && got[4] > got[5]) {
t.Fatalf("priority order=%v", got)
}
}
func TestPushFanRingsTracksByNameAndCarriesForwardMissingSamples(t *testing.T) {

View File

@@ -6,7 +6,7 @@ NCCL_CUDA_VERSION=13.0
NCCL_SHA256=2e6faafd2c19cffc7738d9283976a3200ea9db9895907f337f0c7e5a25563186
NCCL_TESTS_VERSION=2.13.10
NVCC_VERSION=12.8
CUBLAS_VERSION=13.0.2.14-1
CUBLAS_VERSION=13.1.1.3-1
CUDA_USERSPACE_VERSION=13.0.96-1
DCGM_VERSION=4.5.3-1
JOHN_JUMBO_COMMIT=67fcf9fe5a

View File

@@ -33,7 +33,6 @@ typedef void *CUstream;
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR 75
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR 76
#define MAX_STRESS_STREAMS 16
#define MAX_CUBLAS_PROFILES 5
#define MIN_PROFILE_BUDGET_BYTES ((size_t)4u * 1024u * 1024u)
#define MIN_STREAM_BUDGET_BYTES ((size_t)64u * 1024u * 1024u)
@@ -689,6 +688,8 @@ static const struct profile_desc k_profiles[] = {
#endif
};
#define PROFILE_COUNT ((int)(sizeof(k_profiles) / sizeof(k_profiles[0])))
static int load_cublaslt(struct cublaslt_api *api) {
memset(api, 0, sizeof(*api));
api->lib = dlopen("libcublasLt.so.13", RTLD_NOW | RTLD_LOCAL);
@@ -1124,7 +1125,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
const char *precision_filter,
struct stress_report *report) {
struct cublaslt_api cublas;
struct prepared_profile prepared[MAX_STRESS_STREAMS * MAX_CUBLAS_PROFILES];
struct prepared_profile prepared[MAX_STRESS_STREAMS * PROFILE_COUNT];
cublasLtHandle_t handle = NULL;
CUcontext ctx = NULL;
CUstream streams[MAX_STRESS_STREAMS] = {0};
@@ -1134,7 +1135,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
int active = 0;
int mp_count = 0;
int stream_count = 1;
int profile_count = (int)(sizeof(k_profiles) / sizeof(k_profiles[0]));
int profile_count = PROFILE_COUNT;
int prepared_count = 0;
size_t requested_budget = 0;
size_t total_budget = 0;
@@ -1159,6 +1160,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
return 0;
}
/* Count profiles matching the filter (for deciding what to run). */
for (size_t i = 0; i < sizeof(k_profiles) / sizeof(k_profiles[0]); i++) {
if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc &&
(precision_filter == NULL || strcmp(k_profiles[i].block_label, precision_filter) == 0)) {
@@ -1172,18 +1174,31 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
return 0;
}
/* Count all profiles active on this GPU regardless of filter.
* Used as the budget divisor so matrix sizes stay consistent whether
* running all precisions together or a single-precision phase. */
int planned_total = 0;
for (size_t i = 0; i < sizeof(k_profiles) / sizeof(k_profiles[0]); i++) {
if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc) {
planned_total++;
}
}
if (planned_total < planned) {
planned_total = planned;
}
requested_budget = (size_t)size_mb * 1024u * 1024u;
if (requested_budget < (size_t)planned * MIN_PROFILE_BUDGET_BYTES) {
requested_budget = (size_t)planned * MIN_PROFILE_BUDGET_BYTES;
if (requested_budget < (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES) {
requested_budget = (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES;
}
total_budget = clamp_budget_to_free_memory(cuda, requested_budget);
if (total_budget < (size_t)planned * MIN_PROFILE_BUDGET_BYTES) {
total_budget = (size_t)planned * MIN_PROFILE_BUDGET_BYTES;
if (total_budget < (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES) {
total_budget = (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES;
}
if (query_multiprocessor_count(cuda, dev, &mp_count) &&
cuda->cuStreamCreate &&
cuda->cuStreamDestroy) {
stream_count = choose_stream_count(mp_count, planned, total_budget, 1);
stream_count = choose_stream_count(mp_count, planned_total, total_budget, 1);
}
if (stream_count > 1) {
int created = 0;
@@ -1196,7 +1211,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
}
}
report->stream_count = stream_count;
per_profile_budget = total_budget / ((size_t)planned * (size_t)stream_count);
per_profile_budget = total_budget / ((size_t)planned_total * (size_t)stream_count);
if (per_profile_budget < MIN_PROFILE_BUDGET_BYTES) {
per_profile_budget = MIN_PROFILE_BUDGET_BYTES;
}
@@ -1424,7 +1439,17 @@ int main(int argc, char **argv) {
ok = run_cublaslt_stress(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, precision_filter, &report);
#endif
if (!ok) {
if (!run_ptx_fallback(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, &report)) {
if (precision_filter != NULL) {
fprintf(stderr,
"requested precision path unavailable: precision=%s device=%s cc=%d.%d\n",
precision_filter,
name,
cc_major,
cc_minor);
return 1;
}
int ptx_mb = size_mb;
if (!run_ptx_fallback(&cuda, dev, name, cc_major, cc_minor, seconds, ptx_mb, &report)) {
return 1;
}
}

View File

@@ -873,6 +873,22 @@ if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
CUBLAS_CACHE="${DIST_DIR}/cublas-${CUBLAS_VERSION}+cuda${NCCL_CUDA_VERSION}"
echo "=== bee-gpu-burn FP4 header probe ==="
fp4_type_match="$(grep -Rsnm 1 'CUDA_R_4F_E2M1' "${CUBLAS_CACHE}/include" 2>/dev/null || true)"
fp4_scale_match="$(grep -Rsnm 1 'CUBLASLT_MATMUL_MATRIX_SCALE_VEC16_UE4M3' "${CUBLAS_CACHE}/include" 2>/dev/null || true)"
if [ -n "$fp4_type_match" ]; then
echo "fp4_header_symbol=present"
echo "$fp4_type_match"
else
echo "fp4_header_symbol=missing"
fi
if [ -n "$fp4_scale_match" ]; then
echo "fp4_scale_mode_symbol=present"
echo "$fp4_scale_match"
else
echo "fp4_scale_mode_symbol=missing"
fi
GPU_STRESS_NEED_BUILD=1
if [ -f "$GPU_BURN_WORKER_BIN" ]; then
GPU_STRESS_NEED_BUILD=0
@@ -901,6 +917,12 @@ if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
else
echo "=== bee-gpu-burn worker up to date, skipping build ==="
fi
echo "=== bee-gpu-burn compiled profile probe ==="
if grep -aq 'fp4_e2m1' "$GPU_BURN_WORKER_BIN"; then
echo "fp4_profile_string=present"
else
echo "fp4_profile_string=missing"
fi
fi
echo "=== preparing staged overlay (${BUILD_VARIANT}) ==="