Fix two stale failing tests
- TestHandleAPIBenchmarkPowerFitRampQueuesBenchmarkPowerFitTasks: ramp-up mode intentionally creates a single task (the runner handles 1→N internally to avoid redundant repetition of earlier ramp steps). Updated the test to expect 1 task and verify RampTotal=3 instead of asserting 3 separate tasks. - TestBenchmarkPageRendersSavedResultsTable: benchmark page used "Performance Results" as heading while the test looked for "Perf Results". Aligned the page heading with the shorter label used everywhere else (task reports, etc.). Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -178,16 +178,20 @@ func TestHandleAPIBenchmarkPowerFitRampQueuesBenchmarkPowerFitTasks(t *testing.T
|
|||||||
}
|
}
|
||||||
globalQueue.mu.Lock()
|
globalQueue.mu.Lock()
|
||||||
defer globalQueue.mu.Unlock()
|
defer globalQueue.mu.Unlock()
|
||||||
if len(globalQueue.tasks) != 3 {
|
// Ramp-up mode creates a single task that handles the 1→N GPU ramp internally
|
||||||
t.Fatalf("tasks=%d want 3", len(globalQueue.tasks))
|
// (spawning N separate tasks would redundantly repeat all earlier ramp steps).
|
||||||
|
if len(globalQueue.tasks) != 1 {
|
||||||
|
t.Fatalf("tasks=%d want 1 (ramp-up uses single task)", len(globalQueue.tasks))
|
||||||
}
|
}
|
||||||
for i, task := range globalQueue.tasks {
|
task := globalQueue.tasks[0]
|
||||||
if task.Target != "nvidia-bench-power" {
|
if task.Target != "nvidia-bench-power" {
|
||||||
t.Fatalf("task[%d] target=%q", i, task.Target)
|
t.Fatalf("task target=%q want nvidia-bench-power", task.Target)
|
||||||
}
|
}
|
||||||
if task.Priority != taskPriorityBenchmark {
|
if task.Priority != taskPriorityBenchmark {
|
||||||
t.Fatalf("task[%d] priority=%d want %d", i, task.Priority, taskPriorityBenchmark)
|
t.Fatalf("task priority=%d want %d", task.Priority, taskPriorityBenchmark)
|
||||||
}
|
}
|
||||||
|
if task.params.RampTotal != 3 {
|
||||||
|
t.Fatalf("task RampTotal=%d want 3", task.params.RampTotal)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2385,7 +2385,7 @@ function benchmarkRefreshResults() {
|
|||||||
func renderBenchmarkResultsCard(exportDir string) string {
|
func renderBenchmarkResultsCard(exportDir string) string {
|
||||||
maxIdx, runs := loadBenchmarkHistory(exportDir)
|
maxIdx, runs := loadBenchmarkHistory(exportDir)
|
||||||
perf := renderBenchmarkResultsCardFromRuns(
|
perf := renderBenchmarkResultsCardFromRuns(
|
||||||
"Performance Results",
|
"Perf Results",
|
||||||
"Composite score by saved benchmark run and GPU.",
|
"Composite score by saved benchmark run and GPU.",
|
||||||
"No saved performance benchmark runs yet.",
|
"No saved performance benchmark runs yet.",
|
||||||
maxIdx,
|
maxIdx,
|
||||||
|
|||||||
Reference in New Issue
Block a user