Fix two stale failing tests

- TestHandleAPIBenchmarkPowerFitRampQueuesBenchmarkPowerFitTasks: ramp-up
  mode intentionally creates a single task (the runner handles 1→N internally
  to avoid redundant repetition of earlier ramp steps). Updated the test to
  expect 1 task and verify RampTotal=3 instead of asserting 3 separate tasks.

- TestBenchmarkPageRendersSavedResultsTable: benchmark page used "Performance
  Results" as heading while the test looked for "Perf Results". Aligned the
  page heading with the shorter label used everywhere else (task reports, etc.).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-18 15:07:27 +03:00
parent 51b721aeb3
commit 7d64e5d215
2 changed files with 14 additions and 10 deletions

View File

@@ -178,16 +178,20 @@ func TestHandleAPIBenchmarkPowerFitRampQueuesBenchmarkPowerFitTasks(t *testing.T
} }
globalQueue.mu.Lock() globalQueue.mu.Lock()
defer globalQueue.mu.Unlock() defer globalQueue.mu.Unlock()
if len(globalQueue.tasks) != 3 { // Ramp-up mode creates a single task that handles the 1→N GPU ramp internally
t.Fatalf("tasks=%d want 3", len(globalQueue.tasks)) // (spawning N separate tasks would redundantly repeat all earlier ramp steps).
if len(globalQueue.tasks) != 1 {
t.Fatalf("tasks=%d want 1 (ramp-up uses single task)", len(globalQueue.tasks))
} }
for i, task := range globalQueue.tasks { task := globalQueue.tasks[0]
if task.Target != "nvidia-bench-power" { if task.Target != "nvidia-bench-power" {
t.Fatalf("task[%d] target=%q", i, task.Target) t.Fatalf("task target=%q want nvidia-bench-power", task.Target)
} }
if task.Priority != taskPriorityBenchmark { if task.Priority != taskPriorityBenchmark {
t.Fatalf("task[%d] priority=%d want %d", i, task.Priority, taskPriorityBenchmark) t.Fatalf("task priority=%d want %d", task.Priority, taskPriorityBenchmark)
} }
if task.params.RampTotal != 3 {
t.Fatalf("task RampTotal=%d want 3", task.params.RampTotal)
} }
} }

View File

@@ -2385,7 +2385,7 @@ function benchmarkRefreshResults() {
func renderBenchmarkResultsCard(exportDir string) string { func renderBenchmarkResultsCard(exportDir string) string {
maxIdx, runs := loadBenchmarkHistory(exportDir) maxIdx, runs := loadBenchmarkHistory(exportDir)
perf := renderBenchmarkResultsCardFromRuns( perf := renderBenchmarkResultsCardFromRuns(
"Performance Results", "Perf Results",
"Composite score by saved benchmark run and GPU.", "Composite score by saved benchmark run and GPU.",
"No saved performance benchmark runs yet.", "No saved performance benchmark runs yet.",
maxIdx, maxIdx,