Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1a5035edd | ||
|
|
8fc986c933 | ||
|
|
88b5e0edf2 | ||
|
|
82fe1f6d26 | ||
| 81e7c921f8 | |||
| 0fb8f2777f |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,3 +2,4 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
dist/
|
dist/
|
||||||
iso/out/
|
iso/out/
|
||||||
|
build-cache/
|
||||||
|
|||||||
@@ -401,6 +401,7 @@ func (s *System) RunNvidiaBenchmark(ctx context.Context, baseDir string, opts Nv
|
|||||||
serverLoadedW = serverLoadedWSum / float64(serverLoadedSamples)
|
serverLoadedW = serverLoadedWSum / float64(serverLoadedSamples)
|
||||||
}
|
}
|
||||||
result.ServerPower = characterizeServerPower(serverIdleW, serverLoadedW, gpuReportedSumW, serverIdleOK && serverLoadedOK)
|
result.ServerPower = characterizeServerPower(serverIdleW, serverLoadedW, gpuReportedSumW, serverIdleOK && serverLoadedOK)
|
||||||
|
result.Cooling = summarizeBenchmarkCooling(metricRows)
|
||||||
|
|
||||||
// Apply server-power penalty when IPMI reports the server delta is much
|
// Apply server-power penalty when IPMI reports the server delta is much
|
||||||
// lower than GPU-reported sum: GPU power telemetry is over-stated, making
|
// lower than GPU-reported sum: GPU power telemetry is over-stated, making
|
||||||
@@ -739,7 +740,7 @@ func collectBenchmarkSamples(ctx context.Context, durationSec int, gpuIndices []
|
|||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return rows, ctx.Err()
|
return rows, ctx.Err()
|
||||||
}
|
}
|
||||||
samples, err := sampleGPUMetrics(gpuIndices)
|
samples, err := sampleBenchmarkTelemetry(gpuIndices)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
elapsed := time.Since(start).Seconds()
|
elapsed := time.Since(start).Seconds()
|
||||||
for i := range samples {
|
for i := range samples {
|
||||||
@@ -774,7 +775,7 @@ func runBenchmarkCommandWithMetrics(ctx context.Context, verboseLog, name string
|
|||||||
case <-stopCh:
|
case <-stopCh:
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
samples, err := sampleGPUMetrics(gpuIndices)
|
samples, err := sampleBenchmarkTelemetry(gpuIndices)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -794,6 +795,37 @@ func runBenchmarkCommandWithMetrics(ctx context.Context, verboseLog, name string
|
|||||||
return out, metricRows, err
|
return out, metricRows, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type benchmarkCoolingSample struct {
|
||||||
|
AvgFanRPM float64
|
||||||
|
AvgFanDutyCyclePct float64
|
||||||
|
FanDutyCycleAvailable bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleBenchmarkTelemetry(gpuIndices []int) ([]GPUMetricRow, error) {
|
||||||
|
samples, err := sampleGPUMetrics(gpuIndices)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fanSample := sampleBenchmarkCoolingSample()
|
||||||
|
for i := range samples {
|
||||||
|
samples[i].FanAvgRPM = fanSample.AvgFanRPM
|
||||||
|
samples[i].FanDutyCyclePct = fanSample.AvgFanDutyCyclePct
|
||||||
|
samples[i].FanDutyCycleAvailable = fanSample.FanDutyCycleAvailable
|
||||||
|
}
|
||||||
|
return samples, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleBenchmarkCoolingSample() benchmarkCoolingSample {
|
||||||
|
fans, _ := sampleFanSpeeds()
|
||||||
|
avgRPM, _, _ := fanRPMStats(fans)
|
||||||
|
dutyPct, dutyAvailable := sampleFanDutyCyclePct()
|
||||||
|
return benchmarkCoolingSample{
|
||||||
|
AvgFanRPM: avgRPM,
|
||||||
|
AvgFanDutyCyclePct: dutyPct,
|
||||||
|
FanDutyCycleAvailable: dutyAvailable,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func annotateBenchmarkMetricRows(rows []GPUMetricRow, stage string, offset float64) []GPUMetricRow {
|
func annotateBenchmarkMetricRows(rows []GPUMetricRow, stage string, offset float64) []GPUMetricRow {
|
||||||
if len(rows) == 0 {
|
if len(rows) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -1022,6 +1054,37 @@ func summarizeBenchmarkTelemetry(rows []GPUMetricRow) BenchmarkTelemetrySummary
|
|||||||
return summary
|
return summary
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func summarizeBenchmarkCooling(rows []GPUMetricRow) *BenchmarkCoolingSummary {
|
||||||
|
if len(rows) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var rpmValues []float64
|
||||||
|
var dutyValues []float64
|
||||||
|
for _, row := range rows {
|
||||||
|
if row.FanAvgRPM > 0 {
|
||||||
|
rpmValues = append(rpmValues, row.FanAvgRPM)
|
||||||
|
}
|
||||||
|
if row.FanDutyCycleAvailable {
|
||||||
|
dutyValues = append(dutyValues, row.FanDutyCyclePct)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rpmValues) == 0 && len(dutyValues) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
summary := &BenchmarkCoolingSummary{
|
||||||
|
Available: true,
|
||||||
|
AvgFanRPM: benchmarkMean(rpmValues),
|
||||||
|
}
|
||||||
|
if len(dutyValues) > 0 {
|
||||||
|
summary.FanDutyCycleAvailable = true
|
||||||
|
summary.AvgFanDutyCyclePct = benchmarkMean(dutyValues)
|
||||||
|
summary.P95FanDutyCyclePct = benchmarkPercentile(dutyValues, 95)
|
||||||
|
} else {
|
||||||
|
summary.Notes = append(summary.Notes, "fan duty cycle unavailable on this host; RPM-only fan telemetry was collected")
|
||||||
|
}
|
||||||
|
return summary
|
||||||
|
}
|
||||||
|
|
||||||
func scoreBenchmarkGPUResult(gpu BenchmarkGPUResult) BenchmarkScorecard {
|
func scoreBenchmarkGPUResult(gpu BenchmarkGPUResult) BenchmarkScorecard {
|
||||||
score := BenchmarkScorecard{}
|
score := BenchmarkScorecard{}
|
||||||
|
|
||||||
@@ -1601,7 +1664,10 @@ func maxInt(a, b int) int {
|
|||||||
// queryIPMIServerPowerW reads the current server power draw via ipmitool dcmi.
|
// queryIPMIServerPowerW reads the current server power draw via ipmitool dcmi.
|
||||||
// Returns 0 and an error if IPMI is unavailable or the output cannot be parsed.
|
// Returns 0 and an error if IPMI is unavailable or the output cannot be parsed.
|
||||||
func queryIPMIServerPowerW() (float64, error) {
|
func queryIPMIServerPowerW() (float64, error) {
|
||||||
out, err := satExecCommand("ipmitool", "dcmi", "power", "reading").Output()
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
cmd := exec.CommandContext(ctx, "ipmitool", "dcmi", "power", "reading")
|
||||||
|
out, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("ipmitool dcmi power reading: %w", err)
|
return 0, fmt.Errorf("ipmitool dcmi power reading: %w", err)
|
||||||
}
|
}
|
||||||
@@ -1620,6 +1686,7 @@ func sampleIPMIPowerSeries(ctx context.Context, durationSec int) (meanW float64,
|
|||||||
}
|
}
|
||||||
deadline := time.Now().Add(time.Duration(durationSec) * time.Second)
|
deadline := time.Now().Add(time.Duration(durationSec) * time.Second)
|
||||||
var samples []float64
|
var samples []float64
|
||||||
|
loop:
|
||||||
for {
|
for {
|
||||||
if w, err := queryIPMIServerPowerW(); err == nil {
|
if w, err := queryIPMIServerPowerW(); err == nil {
|
||||||
samples = append(samples, w)
|
samples = append(samples, w)
|
||||||
@@ -1629,7 +1696,7 @@ func sampleIPMIPowerSeries(ctx context.Context, durationSec int) (meanW float64,
|
|||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
break
|
break loop
|
||||||
case <-time.After(2 * time.Second):
|
case <-time.After(2 * time.Second):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -290,6 +290,31 @@ func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ── Cooling ───────────────────────────────────────────────────────────────
|
||||||
|
if cooling := result.Cooling; cooling != nil {
|
||||||
|
b.WriteString("## Cooling\n\n")
|
||||||
|
if cooling.Available {
|
||||||
|
b.WriteString("| Metric | Value |\n|--------|-------|\n")
|
||||||
|
fmt.Fprintf(&b, "| Average fan speed | %.0f RPM |\n", cooling.AvgFanRPM)
|
||||||
|
if cooling.FanDutyCycleAvailable {
|
||||||
|
fmt.Fprintf(&b, "| Average fan duty cycle | %.1f%% |\n", cooling.AvgFanDutyCyclePct)
|
||||||
|
fmt.Fprintf(&b, "| P95 fan duty cycle | %.1f%% |\n", cooling.P95FanDutyCyclePct)
|
||||||
|
} else {
|
||||||
|
b.WriteString("| Average fan duty cycle | N/A |\n")
|
||||||
|
b.WriteString("| P95 fan duty cycle | N/A |\n")
|
||||||
|
}
|
||||||
|
b.WriteString("\n")
|
||||||
|
} else {
|
||||||
|
b.WriteString("Cooling telemetry unavailable.\n\n")
|
||||||
|
}
|
||||||
|
for _, note := range cooling.Notes {
|
||||||
|
fmt.Fprintf(&b, "- %s\n", note)
|
||||||
|
}
|
||||||
|
if len(cooling.Notes) > 0 {
|
||||||
|
b.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ── Raw files ─────────────────────────────────────────────────────────────
|
// ── Raw files ─────────────────────────────────────────────────────────────
|
||||||
b.WriteString("## Raw Files\n\n")
|
b.WriteString("## Raw Files\n\n")
|
||||||
b.WriteString("- `result.json`\n- `report.md`\n- `summary.txt`\n- `verbose.log`\n")
|
b.WriteString("- `result.json`\n- `report.md`\n- `summary.txt`\n- `verbose.log`\n")
|
||||||
|
|||||||
@@ -131,6 +131,13 @@ func TestRenderBenchmarkReportIncludesFindingsAndScores(t *testing.T) {
|
|||||||
DegradationReasons: []string{"power_capped"},
|
DegradationReasons: []string{"power_capped"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Cooling: &BenchmarkCoolingSummary{
|
||||||
|
Available: true,
|
||||||
|
AvgFanRPM: 9200,
|
||||||
|
FanDutyCycleAvailable: true,
|
||||||
|
AvgFanDutyCyclePct: 47.5,
|
||||||
|
P95FanDutyCyclePct: 62.0,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
report := renderBenchmarkReport(result)
|
report := renderBenchmarkReport(result)
|
||||||
@@ -140,6 +147,9 @@ func TestRenderBenchmarkReportIncludesFindingsAndScores(t *testing.T) {
|
|||||||
"1176.00",
|
"1176.00",
|
||||||
"fp16_tensor",
|
"fp16_tensor",
|
||||||
"700.00",
|
"700.00",
|
||||||
|
"Cooling",
|
||||||
|
"Average fan duty cycle",
|
||||||
|
"47.5%",
|
||||||
} {
|
} {
|
||||||
if !strings.Contains(report, needle) {
|
if !strings.Contains(report, needle) {
|
||||||
t.Fatalf("report missing %q\n%s", needle, report)
|
t.Fatalf("report missing %q\n%s", needle, report)
|
||||||
|
|||||||
@@ -25,6 +25,17 @@ type BenchmarkCPULoad struct {
|
|||||||
Note string `json:"note,omitempty"`
|
Note string `json:"note,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BenchmarkCoolingSummary captures fan telemetry averaged across the full
|
||||||
|
// benchmark run.
|
||||||
|
type BenchmarkCoolingSummary struct {
|
||||||
|
Available bool `json:"available"`
|
||||||
|
AvgFanRPM float64 `json:"avg_fan_rpm,omitempty"`
|
||||||
|
FanDutyCycleAvailable bool `json:"fan_duty_cycle_available,omitempty"`
|
||||||
|
AvgFanDutyCyclePct float64 `json:"avg_fan_duty_cycle_pct,omitempty"`
|
||||||
|
P95FanDutyCyclePct float64 `json:"p95_fan_duty_cycle_pct,omitempty"`
|
||||||
|
Notes []string `json:"notes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
NvidiaBenchmarkProfileStandard = "standard"
|
NvidiaBenchmarkProfileStandard = "standard"
|
||||||
NvidiaBenchmarkProfileStability = "stability"
|
NvidiaBenchmarkProfileStability = "stability"
|
||||||
@@ -61,6 +72,7 @@ type NvidiaBenchmarkResult struct {
|
|||||||
Normalization BenchmarkNormalization `json:"normalization"`
|
Normalization BenchmarkNormalization `json:"normalization"`
|
||||||
HostConfig *BenchmarkHostConfig `json:"host_config,omitempty"`
|
HostConfig *BenchmarkHostConfig `json:"host_config,omitempty"`
|
||||||
CPULoad *BenchmarkCPULoad `json:"cpu_load,omitempty"`
|
CPULoad *BenchmarkCPULoad `json:"cpu_load,omitempty"`
|
||||||
|
Cooling *BenchmarkCoolingSummary `json:"cooling,omitempty"`
|
||||||
GPUs []BenchmarkGPUResult `json:"gpus"`
|
GPUs []BenchmarkGPUResult `json:"gpus"`
|
||||||
Interconnect *BenchmarkInterconnectResult `json:"interconnect,omitempty"`
|
Interconnect *BenchmarkInterconnectResult `json:"interconnect,omitempty"`
|
||||||
ServerPower *BenchmarkServerPower `json:"server_power,omitempty"`
|
ServerPower *BenchmarkServerPower `json:"server_power,omitempty"`
|
||||||
|
|||||||
@@ -13,15 +13,18 @@ import (
|
|||||||
|
|
||||||
// GPUMetricRow is one telemetry sample from nvidia-smi during a stress test.
|
// GPUMetricRow is one telemetry sample from nvidia-smi during a stress test.
|
||||||
type GPUMetricRow struct {
|
type GPUMetricRow struct {
|
||||||
Stage string `json:"stage,omitempty"`
|
Stage string `json:"stage,omitempty"`
|
||||||
ElapsedSec float64 `json:"elapsed_sec"`
|
ElapsedSec float64 `json:"elapsed_sec"`
|
||||||
GPUIndex int `json:"index"`
|
GPUIndex int `json:"index"`
|
||||||
TempC float64 `json:"temp_c"`
|
TempC float64 `json:"temp_c"`
|
||||||
UsagePct float64 `json:"usage_pct"`
|
UsagePct float64 `json:"usage_pct"`
|
||||||
MemUsagePct float64 `json:"mem_usage_pct"`
|
MemUsagePct float64 `json:"mem_usage_pct"`
|
||||||
PowerW float64 `json:"power_w"`
|
PowerW float64 `json:"power_w"`
|
||||||
ClockMHz float64 `json:"clock_mhz"`
|
ClockMHz float64 `json:"clock_mhz"`
|
||||||
MemClockMHz float64 `json:"mem_clock_mhz"`
|
MemClockMHz float64 `json:"mem_clock_mhz"`
|
||||||
|
FanAvgRPM float64 `json:"fan_avg_rpm,omitempty"`
|
||||||
|
FanDutyCyclePct float64 `json:"fan_duty_cycle_pct,omitempty"`
|
||||||
|
FanDutyCycleAvailable bool `json:"fan_duty_cycle_available,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// sampleGPUMetrics runs nvidia-smi once and returns current metrics for each GPU.
|
// sampleGPUMetrics runs nvidia-smi once and returns current metrics for each GPU.
|
||||||
@@ -142,10 +145,14 @@ func sampleAMDGPUMetrics() ([]GPUMetricRow, error) {
|
|||||||
// WriteGPUMetricsCSV writes collected rows as a CSV file.
|
// WriteGPUMetricsCSV writes collected rows as a CSV file.
|
||||||
func WriteGPUMetricsCSV(path string, rows []GPUMetricRow) error {
|
func WriteGPUMetricsCSV(path string, rows []GPUMetricRow) error {
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
b.WriteString("stage,elapsed_sec,gpu_index,temperature_c,usage_pct,mem_usage_pct,power_w,clock_mhz,mem_clock_mhz\n")
|
b.WriteString("stage,elapsed_sec,gpu_index,temperature_c,usage_pct,mem_usage_pct,power_w,clock_mhz,mem_clock_mhz,fan_avg_rpm,fan_duty_cycle_pct,fan_duty_cycle_available\n")
|
||||||
for _, r := range rows {
|
for _, r := range rows {
|
||||||
fmt.Fprintf(&b, "%s,%.1f,%d,%.1f,%.1f,%.1f,%.1f,%.0f,%.0f\n",
|
dutyAvail := 0
|
||||||
strconv.Quote(strings.TrimSpace(r.Stage)), r.ElapsedSec, r.GPUIndex, r.TempC, r.UsagePct, r.MemUsagePct, r.PowerW, r.ClockMHz, r.MemClockMHz)
|
if r.FanDutyCycleAvailable {
|
||||||
|
dutyAvail = 1
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&b, "%s,%.1f,%d,%.1f,%.1f,%.1f,%.1f,%.0f,%.0f,%.0f,%.1f,%d\n",
|
||||||
|
strconv.Quote(strings.TrimSpace(r.Stage)), r.ElapsedSec, r.GPUIndex, r.TempC, r.UsagePct, r.MemUsagePct, r.PowerW, r.ClockMHz, r.MemClockMHz, r.FanAvgRPM, r.FanDutyCyclePct, dutyAvail)
|
||||||
}
|
}
|
||||||
return os.WriteFile(path, b.Bytes(), 0644)
|
return os.WriteFile(path, b.Bytes(), 0644)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -426,6 +426,101 @@ func sampleFanSpeedsViaSensorsJSON() ([]FanReading, error) {
|
|||||||
return fans, nil
|
return fans, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sampleFanDutyCyclePct reads fan PWM/duty-cycle controls from lm-sensors.
|
||||||
|
// Returns the average duty cycle across all exposed PWM controls.
|
||||||
|
func sampleFanDutyCyclePct() (float64, bool) {
|
||||||
|
out, err := exec.Command("sensors", "-j").Output()
|
||||||
|
if err != nil || len(out) == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return parseFanDutyCyclePctSensorsJSON(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFanDutyCyclePctSensorsJSON(raw []byte) (float64, bool) {
|
||||||
|
var doc map[string]map[string]any
|
||||||
|
if err := json.Unmarshal(raw, &doc); err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
var samples []float64
|
||||||
|
for _, features := range doc {
|
||||||
|
for name, feature := range features {
|
||||||
|
if strings.EqualFold(name, "Adapter") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
featureMap, ok := feature.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if duty, ok := firstFanDutyValue(name, featureMap); ok {
|
||||||
|
samples = append(samples, duty)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(samples) == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return benchmarkMean(samples), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func firstFanDutyValue(featureName string, feature map[string]any) (float64, bool) {
|
||||||
|
featureName = strings.ToLower(strings.TrimSpace(featureName))
|
||||||
|
if strings.Contains(featureName, "enable") || strings.Contains(featureName, "mode") || strings.Contains(featureName, "alarm") {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if strings.Contains(featureName, "pwm") {
|
||||||
|
for _, key := range []string{"input", "value", "current"} {
|
||||||
|
if value, ok := feature[key]; ok {
|
||||||
|
if duty, parsed := parseFanDutyValue(value); parsed {
|
||||||
|
return duty, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
keys := make([]string, 0, len(feature))
|
||||||
|
for key := range feature {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, key := range keys {
|
||||||
|
lower := strings.ToLower(key)
|
||||||
|
if !strings.Contains(lower, "pwm") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(lower, "enable") || strings.Contains(lower, "mode") || strings.Contains(lower, "alarm") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if duty, parsed := parseFanDutyValue(feature[key]); parsed {
|
||||||
|
return duty, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFanDutyValue(value any) (float64, bool) {
|
||||||
|
switch v := value.(type) {
|
||||||
|
case float64:
|
||||||
|
return normalizePWMAsDutyPct(v)
|
||||||
|
case string:
|
||||||
|
if f, err := strconv.ParseFloat(strings.TrimSpace(v), 64); err == nil {
|
||||||
|
return normalizePWMAsDutyPct(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizePWMAsDutyPct(raw float64) (float64, bool) {
|
||||||
|
if raw < 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
if raw <= 100 {
|
||||||
|
return raw, true
|
||||||
|
}
|
||||||
|
if raw <= 255 {
|
||||||
|
return raw / 255.0 * 100.0, true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
func firstFanInputValue(feature map[string]any) (float64, bool) {
|
func firstFanInputValue(feature map[string]any) (float64, bool) {
|
||||||
keys := make([]string, 0, len(feature))
|
keys := make([]string, 0, len(feature))
|
||||||
for key := range feature {
|
for key := range feature {
|
||||||
|
|||||||
@@ -29,6 +29,27 @@ func TestFirstFanInputValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseFanDutyCyclePctSensorsJSON(t *testing.T) {
|
||||||
|
raw := []byte(`{
|
||||||
|
"chip0": {
|
||||||
|
"fan1": {"input": 9000},
|
||||||
|
"pwm1": {"input": 128},
|
||||||
|
"pwm1_enable": {"input": 1}
|
||||||
|
},
|
||||||
|
"chip1": {
|
||||||
|
"pwm2": {"input": 64}
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
got, ok := parseFanDutyCyclePctSensorsJSON(raw)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected duty cycle telemetry to be parsed")
|
||||||
|
}
|
||||||
|
if got < 57 || got > 58 {
|
||||||
|
t.Fatalf("got=%v want ~57.1", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseDCMIPowerReading(t *testing.T) {
|
func TestParseDCMIPowerReading(t *testing.T) {
|
||||||
raw := `
|
raw := `
|
||||||
Instantaneous power reading: 512 Watts
|
Instantaneous power reading: 512 Watts
|
||||||
|
|||||||
@@ -36,6 +36,16 @@ var apiListNvidiaGPUStatuses = func(a *app.App) ([]platform.NvidiaGPUStatus, err
|
|||||||
return a.ListNvidiaGPUStatuses()
|
return a.ListNvidiaGPUStatuses()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
taskPriorityBenchmark = 10
|
||||||
|
taskPriorityBurn = 20
|
||||||
|
taskPriorityValidateStress = 30
|
||||||
|
taskPriorityValidate = 40
|
||||||
|
taskPriorityAudit = 50
|
||||||
|
taskPriorityInstallToRAM = 60
|
||||||
|
taskPriorityInstall = 70
|
||||||
|
)
|
||||||
|
|
||||||
// ── Job ID counter ────────────────────────────────────────────────────────────
|
// ── Job ID counter ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
var jobCounter atomic.Uint64
|
var jobCounter atomic.Uint64
|
||||||
@@ -109,6 +119,30 @@ func shouldSplitHomogeneousNvidiaTarget(target string) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func defaultTaskPriority(target string, params taskParams) int {
|
||||||
|
switch strings.TrimSpace(target) {
|
||||||
|
case "install":
|
||||||
|
return taskPriorityInstall
|
||||||
|
case "install-to-ram":
|
||||||
|
return taskPriorityInstallToRAM
|
||||||
|
case "audit":
|
||||||
|
return taskPriorityAudit
|
||||||
|
case "nvidia-benchmark":
|
||||||
|
return taskPriorityBenchmark
|
||||||
|
case "nvidia-stress", "amd-stress", "memory-stress", "sat-stress", "platform-stress", "nvidia-compute":
|
||||||
|
return taskPriorityBurn
|
||||||
|
case "nvidia", "nvidia-targeted-stress", "nvidia-targeted-power", "nvidia-pulse",
|
||||||
|
"nvidia-interconnect", "nvidia-bandwidth", "memory", "storage", "cpu",
|
||||||
|
"amd", "amd-mem", "amd-bandwidth":
|
||||||
|
if params.StressMode {
|
||||||
|
return taskPriorityValidateStress
|
||||||
|
}
|
||||||
|
return taskPriorityValidate
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func expandHomogeneousNvidiaSelections(gpus []platform.NvidiaGPU, include, exclude []int) ([]nvidiaTaskSelection, error) {
|
func expandHomogeneousNvidiaSelections(gpus []platform.NvidiaGPU, include, exclude []int) ([]nvidiaTaskSelection, error) {
|
||||||
if len(gpus) == 0 {
|
if len(gpus) == 0 {
|
||||||
return nil, fmt.Errorf("no NVIDIA GPUs detected")
|
return nil, fmt.Errorf("no NVIDIA GPUs detected")
|
||||||
@@ -458,6 +492,7 @@ func (h *handler) handleAPIAuditRun(w http.ResponseWriter, _ *http.Request) {
|
|||||||
ID: newJobID("audit"),
|
ID: newJobID("audit"),
|
||||||
Name: "Audit",
|
Name: "Audit",
|
||||||
Target: "audit",
|
Target: "audit",
|
||||||
|
Priority: defaultTaskPriority("audit", taskParams{}),
|
||||||
Status: TaskPending,
|
Status: TaskPending,
|
||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
}
|
}
|
||||||
@@ -526,7 +561,7 @@ func (h *handler) handleAPISATRun(target string) http.HandlerFunc {
|
|||||||
DisplayName: body.DisplayName,
|
DisplayName: body.DisplayName,
|
||||||
PlatformComponents: body.PlatformComponents,
|
PlatformComponents: body.PlatformComponents,
|
||||||
}
|
}
|
||||||
tasks, err := buildNvidiaTaskSet(target, 0, time.Now(), params, name, h.opts.App, "sat-"+target)
|
tasks, err := buildNvidiaTaskSet(target, defaultTaskPriority(target, params), time.Now(), params, name, h.opts.App, "sat-"+target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, err.Error())
|
writeError(w, http.StatusBadRequest, err.Error())
|
||||||
return
|
return
|
||||||
@@ -613,7 +648,7 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
|
|||||||
ID: newJobID("benchmark-nvidia"),
|
ID: newJobID("benchmark-nvidia"),
|
||||||
Name: stepName,
|
Name: stepName,
|
||||||
Target: "nvidia-benchmark",
|
Target: "nvidia-benchmark",
|
||||||
Priority: 15,
|
Priority: defaultTaskPriority("nvidia-benchmark", taskParams{}),
|
||||||
Status: TaskPending,
|
Status: TaskPending,
|
||||||
CreatedAt: now,
|
CreatedAt: now,
|
||||||
params: taskParams{
|
params: taskParams{
|
||||||
@@ -645,7 +680,7 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
|
|||||||
name = fmt.Sprintf("%s · sequential", name)
|
name = fmt.Sprintf("%s · sequential", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks, err := buildNvidiaTaskSet("nvidia-benchmark", 15, time.Now(), taskParams{
|
params := taskParams{
|
||||||
GPUIndices: body.GPUIndices,
|
GPUIndices: body.GPUIndices,
|
||||||
ExcludeGPUIndices: body.ExcludeGPUIndices,
|
ExcludeGPUIndices: body.ExcludeGPUIndices,
|
||||||
SizeMB: body.SizeMB,
|
SizeMB: body.SizeMB,
|
||||||
@@ -653,7 +688,8 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
|
|||||||
RunNCCL: runNCCL,
|
RunNCCL: runNCCL,
|
||||||
ParallelGPUs: parallelGPUs,
|
ParallelGPUs: parallelGPUs,
|
||||||
DisplayName: body.DisplayName,
|
DisplayName: body.DisplayName,
|
||||||
}, name, h.opts.App, "benchmark-nvidia")
|
}
|
||||||
|
tasks, err := buildNvidiaTaskSet("nvidia-benchmark", defaultTaskPriority("nvidia-benchmark", params), time.Now(), params, name, h.opts.App, "benchmark-nvidia")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeError(w, http.StatusBadRequest, err.Error())
|
writeError(w, http.StatusBadRequest, err.Error())
|
||||||
return
|
return
|
||||||
@@ -1054,7 +1090,7 @@ func (h *handler) handleAPIInstallToRAM(w http.ResponseWriter, r *http.Request)
|
|||||||
ID: newJobID("install-to-ram"),
|
ID: newJobID("install-to-ram"),
|
||||||
Name: "Install to RAM",
|
Name: "Install to RAM",
|
||||||
Target: "install-to-ram",
|
Target: "install-to-ram",
|
||||||
Priority: 10,
|
Priority: defaultTaskPriority("install-to-ram", taskParams{}),
|
||||||
Status: TaskPending,
|
Status: TaskPending,
|
||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
}
|
}
|
||||||
@@ -1169,7 +1205,7 @@ func (h *handler) handleAPIInstallRun(w http.ResponseWriter, r *http.Request) {
|
|||||||
ID: newJobID("install"),
|
ID: newJobID("install"),
|
||||||
Name: "Install to Disk",
|
Name: "Install to Disk",
|
||||||
Target: "install",
|
Target: "install",
|
||||||
Priority: 20,
|
Priority: defaultTaskPriority("install", taskParams{}),
|
||||||
Status: TaskPending,
|
Status: TaskPending,
|
||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
params: taskParams{
|
params: taskParams{
|
||||||
@@ -1461,4 +1497,3 @@ func (h *handler) rollbackPendingNetworkChange() error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,9 @@ func TestHandleAPISATRunDecodesBodyWithoutContentLength(t *testing.T) {
|
|||||||
if got := globalQueue.tasks[0].params.BurnProfile; got != "smoke" {
|
if got := globalQueue.tasks[0].params.BurnProfile; got != "smoke" {
|
||||||
t.Fatalf("burn profile=%q want smoke", got)
|
t.Fatalf("burn profile=%q want smoke", got)
|
||||||
}
|
}
|
||||||
|
if got := globalQueue.tasks[0].Priority; got != taskPriorityValidate {
|
||||||
|
t.Fatalf("priority=%d want %d", got, taskPriorityValidate)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
|
func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
|
||||||
@@ -84,6 +87,9 @@ func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
|
|||||||
if task.params.RunNCCL {
|
if task.params.RunNCCL {
|
||||||
t.Fatal("RunNCCL should reflect explicit false from request")
|
t.Fatal("RunNCCL should reflect explicit false from request")
|
||||||
}
|
}
|
||||||
|
if task.Priority != taskPriorityBenchmark {
|
||||||
|
t.Fatalf("priority=%d want %d", task.Priority, taskPriorityBenchmark)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAPIBenchmarkNvidiaRunSplitsMixedGPUModels(t *testing.T) {
|
func TestHandleAPIBenchmarkNvidiaRunSplitsMixedGPUModels(t *testing.T) {
|
||||||
@@ -133,6 +139,12 @@ func TestHandleAPIBenchmarkNvidiaRunSplitsMixedGPUModels(t *testing.T) {
|
|||||||
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
|
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
|
||||||
t.Fatalf("task[1] gpu indices=%v want [2]", got)
|
t.Fatalf("task[1] gpu indices=%v want [2]", got)
|
||||||
}
|
}
|
||||||
|
if got := globalQueue.tasks[0].Priority; got != taskPriorityBenchmark {
|
||||||
|
t.Fatalf("task[0] priority=%d want %d", got, taskPriorityBenchmark)
|
||||||
|
}
|
||||||
|
if got := globalQueue.tasks[1].Priority; got != taskPriorityBenchmark {
|
||||||
|
t.Fatalf("task[1] priority=%d want %d", got, taskPriorityBenchmark)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAPISATRunSplitsMixedNvidiaTaskSet(t *testing.T) {
|
func TestHandleAPISATRunSplitsMixedNvidiaTaskSet(t *testing.T) {
|
||||||
@@ -175,6 +187,39 @@ func TestHandleAPISATRunSplitsMixedNvidiaTaskSet(t *testing.T) {
|
|||||||
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
|
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
|
||||||
t.Fatalf("task[1] gpu indices=%v want [2]", got)
|
t.Fatalf("task[1] gpu indices=%v want [2]", got)
|
||||||
}
|
}
|
||||||
|
if got := globalQueue.tasks[0].Priority; got != taskPriorityValidate {
|
||||||
|
t.Fatalf("task[0] priority=%d want %d", got, taskPriorityValidate)
|
||||||
|
}
|
||||||
|
if got := globalQueue.tasks[1].Priority; got != taskPriorityValidate {
|
||||||
|
t.Fatalf("task[1] priority=%d want %d", got, taskPriorityValidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultTaskPriorityOrder(t *testing.T) {
|
||||||
|
got := []int{
|
||||||
|
defaultTaskPriority("install-to-ram", taskParams{}),
|
||||||
|
defaultTaskPriority("audit", taskParams{}),
|
||||||
|
defaultTaskPriority("cpu", taskParams{}),
|
||||||
|
defaultTaskPriority("cpu", taskParams{StressMode: true}),
|
||||||
|
defaultTaskPriority("nvidia-stress", taskParams{}),
|
||||||
|
defaultTaskPriority("nvidia-benchmark", taskParams{}),
|
||||||
|
}
|
||||||
|
want := []int{
|
||||||
|
taskPriorityInstallToRAM,
|
||||||
|
taskPriorityAudit,
|
||||||
|
taskPriorityValidate,
|
||||||
|
taskPriorityValidateStress,
|
||||||
|
taskPriorityBurn,
|
||||||
|
taskPriorityBenchmark,
|
||||||
|
}
|
||||||
|
for i := range want {
|
||||||
|
if got[i] != want[i] {
|
||||||
|
t.Fatalf("priority[%d]=%d want %d", i, got[i], want[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !(got[0] > got[1] && got[1] > got[2] && got[2] > got[3] && got[3] > got[4] && got[4] > got[5]) {
|
||||||
|
t.Fatalf("priority order=%v", got)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPushFanRingsTracksByNameAndCarriesForwardMissingSamples(t *testing.T) {
|
func TestPushFanRingsTracksByNameAndCarriesForwardMissingSamples(t *testing.T) {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ NCCL_CUDA_VERSION=13.0
|
|||||||
NCCL_SHA256=2e6faafd2c19cffc7738d9283976a3200ea9db9895907f337f0c7e5a25563186
|
NCCL_SHA256=2e6faafd2c19cffc7738d9283976a3200ea9db9895907f337f0c7e5a25563186
|
||||||
NCCL_TESTS_VERSION=2.13.10
|
NCCL_TESTS_VERSION=2.13.10
|
||||||
NVCC_VERSION=12.8
|
NVCC_VERSION=12.8
|
||||||
CUBLAS_VERSION=13.0.2.14-1
|
CUBLAS_VERSION=13.1.1.3-1
|
||||||
CUDA_USERSPACE_VERSION=13.0.96-1
|
CUDA_USERSPACE_VERSION=13.0.96-1
|
||||||
DCGM_VERSION=4.5.3-1
|
DCGM_VERSION=4.5.3-1
|
||||||
JOHN_JUMBO_COMMIT=67fcf9fe5a
|
JOHN_JUMBO_COMMIT=67fcf9fe5a
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ typedef void *CUstream;
|
|||||||
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR 75
|
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR 75
|
||||||
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR 76
|
#define CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR 76
|
||||||
#define MAX_STRESS_STREAMS 16
|
#define MAX_STRESS_STREAMS 16
|
||||||
#define MAX_CUBLAS_PROFILES 5
|
|
||||||
#define MIN_PROFILE_BUDGET_BYTES ((size_t)4u * 1024u * 1024u)
|
#define MIN_PROFILE_BUDGET_BYTES ((size_t)4u * 1024u * 1024u)
|
||||||
#define MIN_STREAM_BUDGET_BYTES ((size_t)64u * 1024u * 1024u)
|
#define MIN_STREAM_BUDGET_BYTES ((size_t)64u * 1024u * 1024u)
|
||||||
|
|
||||||
@@ -689,6 +688,8 @@ static const struct profile_desc k_profiles[] = {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define PROFILE_COUNT ((int)(sizeof(k_profiles) / sizeof(k_profiles[0])))
|
||||||
|
|
||||||
static int load_cublaslt(struct cublaslt_api *api) {
|
static int load_cublaslt(struct cublaslt_api *api) {
|
||||||
memset(api, 0, sizeof(*api));
|
memset(api, 0, sizeof(*api));
|
||||||
api->lib = dlopen("libcublasLt.so.13", RTLD_NOW | RTLD_LOCAL);
|
api->lib = dlopen("libcublasLt.so.13", RTLD_NOW | RTLD_LOCAL);
|
||||||
@@ -1124,7 +1125,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
|
|||||||
const char *precision_filter,
|
const char *precision_filter,
|
||||||
struct stress_report *report) {
|
struct stress_report *report) {
|
||||||
struct cublaslt_api cublas;
|
struct cublaslt_api cublas;
|
||||||
struct prepared_profile prepared[MAX_STRESS_STREAMS * MAX_CUBLAS_PROFILES];
|
struct prepared_profile prepared[MAX_STRESS_STREAMS * PROFILE_COUNT];
|
||||||
cublasLtHandle_t handle = NULL;
|
cublasLtHandle_t handle = NULL;
|
||||||
CUcontext ctx = NULL;
|
CUcontext ctx = NULL;
|
||||||
CUstream streams[MAX_STRESS_STREAMS] = {0};
|
CUstream streams[MAX_STRESS_STREAMS] = {0};
|
||||||
@@ -1134,7 +1135,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
|
|||||||
int active = 0;
|
int active = 0;
|
||||||
int mp_count = 0;
|
int mp_count = 0;
|
||||||
int stream_count = 1;
|
int stream_count = 1;
|
||||||
int profile_count = (int)(sizeof(k_profiles) / sizeof(k_profiles[0]));
|
int profile_count = PROFILE_COUNT;
|
||||||
int prepared_count = 0;
|
int prepared_count = 0;
|
||||||
size_t requested_budget = 0;
|
size_t requested_budget = 0;
|
||||||
size_t total_budget = 0;
|
size_t total_budget = 0;
|
||||||
@@ -1159,6 +1160,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Count profiles matching the filter (for deciding what to run). */
|
||||||
for (size_t i = 0; i < sizeof(k_profiles) / sizeof(k_profiles[0]); i++) {
|
for (size_t i = 0; i < sizeof(k_profiles) / sizeof(k_profiles[0]); i++) {
|
||||||
if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc &&
|
if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc &&
|
||||||
(precision_filter == NULL || strcmp(k_profiles[i].block_label, precision_filter) == 0)) {
|
(precision_filter == NULL || strcmp(k_profiles[i].block_label, precision_filter) == 0)) {
|
||||||
@@ -1172,18 +1174,31 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Count all profiles active on this GPU regardless of filter.
|
||||||
|
* Used as the budget divisor so matrix sizes stay consistent whether
|
||||||
|
* running all precisions together or a single-precision phase. */
|
||||||
|
int planned_total = 0;
|
||||||
|
for (size_t i = 0; i < sizeof(k_profiles) / sizeof(k_profiles[0]); i++) {
|
||||||
|
if (k_profiles[i].enabled && cc >= k_profiles[i].min_cc) {
|
||||||
|
planned_total++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (planned_total < planned) {
|
||||||
|
planned_total = planned;
|
||||||
|
}
|
||||||
|
|
||||||
requested_budget = (size_t)size_mb * 1024u * 1024u;
|
requested_budget = (size_t)size_mb * 1024u * 1024u;
|
||||||
if (requested_budget < (size_t)planned * MIN_PROFILE_BUDGET_BYTES) {
|
if (requested_budget < (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES) {
|
||||||
requested_budget = (size_t)planned * MIN_PROFILE_BUDGET_BYTES;
|
requested_budget = (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES;
|
||||||
}
|
}
|
||||||
total_budget = clamp_budget_to_free_memory(cuda, requested_budget);
|
total_budget = clamp_budget_to_free_memory(cuda, requested_budget);
|
||||||
if (total_budget < (size_t)planned * MIN_PROFILE_BUDGET_BYTES) {
|
if (total_budget < (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES) {
|
||||||
total_budget = (size_t)planned * MIN_PROFILE_BUDGET_BYTES;
|
total_budget = (size_t)planned_total * MIN_PROFILE_BUDGET_BYTES;
|
||||||
}
|
}
|
||||||
if (query_multiprocessor_count(cuda, dev, &mp_count) &&
|
if (query_multiprocessor_count(cuda, dev, &mp_count) &&
|
||||||
cuda->cuStreamCreate &&
|
cuda->cuStreamCreate &&
|
||||||
cuda->cuStreamDestroy) {
|
cuda->cuStreamDestroy) {
|
||||||
stream_count = choose_stream_count(mp_count, planned, total_budget, 1);
|
stream_count = choose_stream_count(mp_count, planned_total, total_budget, 1);
|
||||||
}
|
}
|
||||||
if (stream_count > 1) {
|
if (stream_count > 1) {
|
||||||
int created = 0;
|
int created = 0;
|
||||||
@@ -1196,7 +1211,7 @@ static int run_cublaslt_stress(struct cuda_api *cuda,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
report->stream_count = stream_count;
|
report->stream_count = stream_count;
|
||||||
per_profile_budget = total_budget / ((size_t)planned * (size_t)stream_count);
|
per_profile_budget = total_budget / ((size_t)planned_total * (size_t)stream_count);
|
||||||
if (per_profile_budget < MIN_PROFILE_BUDGET_BYTES) {
|
if (per_profile_budget < MIN_PROFILE_BUDGET_BYTES) {
|
||||||
per_profile_budget = MIN_PROFILE_BUDGET_BYTES;
|
per_profile_budget = MIN_PROFILE_BUDGET_BYTES;
|
||||||
}
|
}
|
||||||
@@ -1424,7 +1439,17 @@ int main(int argc, char **argv) {
|
|||||||
ok = run_cublaslt_stress(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, precision_filter, &report);
|
ok = run_cublaslt_stress(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, precision_filter, &report);
|
||||||
#endif
|
#endif
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
if (!run_ptx_fallback(&cuda, dev, name, cc_major, cc_minor, seconds, size_mb, &report)) {
|
if (precision_filter != NULL) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"requested precision path unavailable: precision=%s device=%s cc=%d.%d\n",
|
||||||
|
precision_filter,
|
||||||
|
name,
|
||||||
|
cc_major,
|
||||||
|
cc_minor);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
int ptx_mb = size_mb;
|
||||||
|
if (!run_ptx_fallback(&cuda, dev, name, cc_major, cc_minor, seconds, ptx_mb, &report)) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -873,6 +873,22 @@ if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
|
|||||||
|
|
||||||
CUBLAS_CACHE="${DIST_DIR}/cublas-${CUBLAS_VERSION}+cuda${NCCL_CUDA_VERSION}"
|
CUBLAS_CACHE="${DIST_DIR}/cublas-${CUBLAS_VERSION}+cuda${NCCL_CUDA_VERSION}"
|
||||||
|
|
||||||
|
echo "=== bee-gpu-burn FP4 header probe ==="
|
||||||
|
fp4_type_match="$(grep -Rsnm 1 'CUDA_R_4F_E2M1' "${CUBLAS_CACHE}/include" 2>/dev/null || true)"
|
||||||
|
fp4_scale_match="$(grep -Rsnm 1 'CUBLASLT_MATMUL_MATRIX_SCALE_VEC16_UE4M3' "${CUBLAS_CACHE}/include" 2>/dev/null || true)"
|
||||||
|
if [ -n "$fp4_type_match" ]; then
|
||||||
|
echo "fp4_header_symbol=present"
|
||||||
|
echo "$fp4_type_match"
|
||||||
|
else
|
||||||
|
echo "fp4_header_symbol=missing"
|
||||||
|
fi
|
||||||
|
if [ -n "$fp4_scale_match" ]; then
|
||||||
|
echo "fp4_scale_mode_symbol=present"
|
||||||
|
echo "$fp4_scale_match"
|
||||||
|
else
|
||||||
|
echo "fp4_scale_mode_symbol=missing"
|
||||||
|
fi
|
||||||
|
|
||||||
GPU_STRESS_NEED_BUILD=1
|
GPU_STRESS_NEED_BUILD=1
|
||||||
if [ -f "$GPU_BURN_WORKER_BIN" ]; then
|
if [ -f "$GPU_BURN_WORKER_BIN" ]; then
|
||||||
GPU_STRESS_NEED_BUILD=0
|
GPU_STRESS_NEED_BUILD=0
|
||||||
@@ -901,6 +917,12 @@ if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
|
|||||||
else
|
else
|
||||||
echo "=== bee-gpu-burn worker up to date, skipping build ==="
|
echo "=== bee-gpu-burn worker up to date, skipping build ==="
|
||||||
fi
|
fi
|
||||||
|
echo "=== bee-gpu-burn compiled profile probe ==="
|
||||||
|
if grep -aq 'fp4_e2m1' "$GPU_BURN_WORKER_BIN"; then
|
||||||
|
echo "fp4_profile_string=present"
|
||||||
|
else
|
||||||
|
echo "fp4_profile_string=missing"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "=== preparing staged overlay (${BUILD_VARIANT}) ==="
|
echo "=== preparing staged overlay (${BUILD_VARIANT}) ==="
|
||||||
|
|||||||
Reference in New Issue
Block a user