Compare commits

..

11 Commits
v5.9 ... v6.0

Author SHA1 Message Date
Mikhail Chusavitin
d2eadedff2 Default NVIDIA ISO to open modules and add nvidia-legacy 2026-04-06 16:27:13 +03:00
Mikhail Chusavitin
a98c4d7461 Include terminal charts in benchmark report 2026-04-06 12:34:57 +03:00
Mikhail Chusavitin
2354ae367d Normalize task IDs and artifact folder prefixes 2026-04-06 12:26:47 +03:00
Mikhail Chusavitin
0d0e1f55a7 Avoid misleading SAT summaries after task cancellation 2026-04-06 12:24:19 +03:00
Mikhail Chusavitin
35f4c53887 Stabilize NVIDIA GPU device mapping across loaders 2026-04-06 12:22:04 +03:00
Mikhail Chusavitin
981315e6fd Split NVIDIA tasks by homogeneous GPU groups 2026-04-06 11:58:13 +03:00
Mikhail Chusavitin
fc5c100a29 Fix NVIDIA persistence mode and add benchmark results table 2026-04-06 10:47:07 +03:00
6e94216f3b Hide task charts while pending 2026-04-05 22:34:34 +03:00
53455063b9 Stabilize live task detail page 2026-04-05 22:14:52 +03:00
4602f97836 Enforce sequential task orchestration 2026-04-05 22:10:42 +03:00
c65d3ae3b1 Add nomodeset to default GRUB entry — fix black screen on headless servers
Servers with NVIDIA compute GPUs (H100 etc.) have no display output,
so KMS blanks the console. nomodeset disables kernel modesetting and
lets the NVIDIA proprietary driver handle display via Xorg.

KMS variant moved to advanced submenu for cases where it is needed.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-05 21:40:47 +03:00
23 changed files with 1460 additions and 272 deletions

View File

@@ -243,7 +243,7 @@ func (s *System) RunNvidiaBenchmark(ctx context.Context, baseDir string, opts Nv
return "", fmt.Errorf("write result.json: %w", err) return "", fmt.Errorf("write result.json: %w", err)
} }
report := renderBenchmarkReport(result) report := renderBenchmarkReportWithCharts(result, loadBenchmarkReportCharts(runDir, selected))
if err := os.WriteFile(filepath.Join(runDir, "report.txt"), []byte(report), 0644); err != nil { if err := os.WriteFile(filepath.Join(runDir, "report.txt"), []byte(report), 0644); err != nil {
return "", fmt.Errorf("write report.txt: %w", err) return "", fmt.Errorf("write report.txt: %w", err)
} }
@@ -679,7 +679,10 @@ func runBenchmarkInterconnect(ctx context.Context, verboseLog, runDir string, gp
"-g", strconv.Itoa(len(gpuIndices)), "-g", strconv.Itoa(len(gpuIndices)),
"--iters", strconv.Itoa(maxInt(20, spec.NCCLSec/10)), "--iters", strconv.Itoa(maxInt(20, spec.NCCLSec/10)),
} }
env := []string{"CUDA_VISIBLE_DEVICES=" + joinIndexList(gpuIndices)} env := []string{
"CUDA_DEVICE_ORDER=PCI_BUS_ID",
"CUDA_VISIBLE_DEVICES=" + joinIndexList(gpuIndices),
}
logFunc(fmt.Sprintf("NCCL interconnect: gpus=%s", joinIndexList(gpuIndices))) logFunc(fmt.Sprintf("NCCL interconnect: gpus=%s", joinIndexList(gpuIndices)))
out, err := runSATCommandCtx(ctx, verboseLog, "nccl-all-reduce.log", cmd, env, logFunc) out, err := runSATCommandCtx(ctx, verboseLog, "nccl-all-reduce.log", cmd, env, logFunc)
_ = os.WriteFile(filepath.Join(runDir, "nccl-all-reduce.log"), out, 0644) _ = os.WriteFile(filepath.Join(runDir, "nccl-all-reduce.log"), out, 0644)

View File

@@ -2,11 +2,25 @@ package platform
import ( import (
"fmt" "fmt"
"os"
"path/filepath"
"regexp"
"strings" "strings"
"time" "time"
) )
func renderBenchmarkReport(result NvidiaBenchmarkResult) string { func renderBenchmarkReport(result NvidiaBenchmarkResult) string {
return renderBenchmarkReportWithCharts(result, nil)
}
type benchmarkReportChart struct {
Title string
Content string
}
var ansiEscapePattern = regexp.MustCompile(`\x1b\[[0-9;]*m`)
func renderBenchmarkReportWithCharts(result NvidiaBenchmarkResult, charts []benchmarkReportChart) string {
var b strings.Builder var b strings.Builder
fmt.Fprintf(&b, "Bee NVIDIA Benchmark Report\n") fmt.Fprintf(&b, "Bee NVIDIA Benchmark Report\n")
fmt.Fprintf(&b, "===========================\n\n") fmt.Fprintf(&b, "===========================\n\n")
@@ -93,6 +107,20 @@ func renderBenchmarkReport(result NvidiaBenchmarkResult) string {
b.WriteString("\n") b.WriteString("\n")
} }
if len(charts) > 0 {
fmt.Fprintf(&b, "Terminal Charts\n")
fmt.Fprintf(&b, "---------------\n")
for _, chart := range charts {
content := strings.TrimSpace(stripANSIEscapeSequences(chart.Content))
if content == "" {
continue
}
fmt.Fprintf(&b, "%s\n", chart.Title)
fmt.Fprintf(&b, "%s\n", strings.Repeat("~", len(chart.Title)))
fmt.Fprintf(&b, "%s\n\n", content)
}
}
fmt.Fprintf(&b, "Methodology\n") fmt.Fprintf(&b, "Methodology\n")
fmt.Fprintf(&b, "-----------\n") fmt.Fprintf(&b, "-----------\n")
fmt.Fprintf(&b, "- Profile %s uses standardized baseline, warmup, steady-state, interconnect, and cooldown phases.\n", result.BenchmarkProfile) fmt.Fprintf(&b, "- Profile %s uses standardized baseline, warmup, steady-state, interconnect, and cooldown phases.\n", result.BenchmarkProfile)
@@ -117,6 +145,36 @@ func renderBenchmarkReport(result NvidiaBenchmarkResult) string {
return b.String() return b.String()
} }
func loadBenchmarkReportCharts(runDir string, gpuIndices []int) []benchmarkReportChart {
phases := []struct {
name string
label string
}{
{name: "baseline", label: "Baseline"},
{name: "steady", label: "Steady State"},
{name: "cooldown", label: "Cooldown"},
}
var charts []benchmarkReportChart
for _, idx := range gpuIndices {
for _, phase := range phases {
path := filepath.Join(runDir, fmt.Sprintf("gpu-%d-%s-metrics-term.txt", idx, phase.name))
raw, err := os.ReadFile(path)
if err != nil || len(raw) == 0 {
continue
}
charts = append(charts, benchmarkReportChart{
Title: fmt.Sprintf("GPU %d %s", idx, phase.label),
Content: string(raw),
})
}
}
return charts
}
func stripANSIEscapeSequences(raw string) string {
return ansiEscapePattern.ReplaceAllString(raw, "")
}
func renderBenchmarkSummary(result NvidiaBenchmarkResult) string { func renderBenchmarkSummary(result NvidiaBenchmarkResult) string {
var b strings.Builder var b strings.Builder
fmt.Fprintf(&b, "run_at_utc=%s\n", result.GeneratedAt.Format(time.RFC3339)) fmt.Fprintf(&b, "run_at_utc=%s\n", result.GeneratedAt.Format(time.RFC3339))

View File

@@ -145,3 +145,35 @@ func TestRenderBenchmarkReportIncludesFindingsAndScores(t *testing.T) {
} }
} }
} }
func TestRenderBenchmarkReportIncludesTerminalChartsWithoutANSI(t *testing.T) {
t.Parallel()
report := renderBenchmarkReportWithCharts(NvidiaBenchmarkResult{
BenchmarkProfile: NvidiaBenchmarkProfileStandard,
OverallStatus: "OK",
SelectedGPUIndices: []int{0},
Normalization: BenchmarkNormalization{
Status: "full",
},
}, []benchmarkReportChart{
{
Title: "GPU 0 Steady State",
Content: "\x1b[31mGPU 0 chart\x1b[0m\n 42┤───",
},
})
for _, needle := range []string{
"Terminal Charts",
"GPU 0 Steady State",
"GPU 0 chart",
"42┤───",
} {
if !strings.Contains(report, needle) {
t.Fatalf("report missing %q\n%s", needle, report)
}
}
if strings.Contains(report, "\x1b[31m") {
t.Fatalf("report should not contain ANSI escapes\n%s", report)
}
}

View File

@@ -16,12 +16,12 @@ func (s *System) RunNvidiaStressPack(ctx context.Context, baseDir string, opts N
return "", err return "", err
} }
return runAcceptancePackCtx(ctx, baseDir, nvidiaStressArchivePrefix(opts.Loader), []satJob{ return runAcceptancePackCtx(ctx, baseDir, nvidiaStressArchivePrefix(opts.Loader), withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{name: "02-nvidia-smi-list.log", cmd: []string{"nvidia-smi", "-L"}}, satJob{name: "02-nvidia-smi-list.log", cmd: []string{"nvidia-smi", "-L"}},
job, job,
{name: "04-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}}, satJob{name: "04-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}},
}, logFunc) ), logFunc)
} }
func nvidiaStressArchivePrefix(loader string) string { func nvidiaStressArchivePrefix(loader string) string {

View File

@@ -278,13 +278,13 @@ func (s *System) RunNCCLTests(ctx context.Context, baseDir string, logFunc func(
if gpuCount < 1 { if gpuCount < 1 {
gpuCount = 1 gpuCount = 1
} }
return runAcceptancePackCtx(ctx, baseDir, "nccl-tests", []satJob{ return runAcceptancePackCtx(ctx, baseDir, "nccl-tests", withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{name: "02-all-reduce-perf.log", cmd: []string{ satJob{name: "02-all-reduce-perf.log", cmd: []string{
"all_reduce_perf", "-b", "512M", "-e", "4G", "-f", "2", "all_reduce_perf", "-b", "512M", "-e", "4G", "-f", "2",
"-g", strconv.Itoa(gpuCount), "--iters", "20", "-g", strconv.Itoa(gpuCount), "--iters", "20",
}}, }},
}, logFunc) ), logFunc)
} }
func (s *System) RunNvidiaOfficialComputePack(ctx context.Context, baseDir string, durationSec int, gpuIndices []int, logFunc func(string)) (string, error) { func (s *System) RunNvidiaOfficialComputePack(ctx context.Context, baseDir string, durationSec int, gpuIndices []int, logFunc func(string)) (string, error) {
@@ -296,18 +296,18 @@ func (s *System) RunNvidiaOfficialComputePack(ctx context.Context, baseDir strin
if err != nil { if err != nil {
return "", err return "", err
} }
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-compute", []satJob{ return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-compute", withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{name: "02-dcgmi-version.log", cmd: []string{"dcgmi", "-v"}}, satJob{name: "02-dcgmi-version.log", cmd: []string{"dcgmi", "-v"}},
{ satJob{
name: "03-dcgmproftester.log", name: "03-dcgmproftester.log",
cmd: profCmd, cmd: profCmd,
env: nvidiaVisibleDevicesEnv(selected), env: nvidiaVisibleDevicesEnv(selected),
collectGPU: true, collectGPU: true,
gpuIndices: selected, gpuIndices: selected,
}, },
{name: "04-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}}, satJob{name: "04-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}},
}, logFunc) ), logFunc)
} }
func (s *System) RunNvidiaTargetedPowerPack(ctx context.Context, baseDir string, durationSec int, gpuIndices []int, logFunc func(string)) (string, error) { func (s *System) RunNvidiaTargetedPowerPack(ctx context.Context, baseDir string, durationSec int, gpuIndices []int, logFunc func(string)) (string, error) {
@@ -315,16 +315,16 @@ func (s *System) RunNvidiaTargetedPowerPack(ctx context.Context, baseDir string,
if err != nil { if err != nil {
return "", err return "", err
} }
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-targeted-power", []satJob{ return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-targeted-power", withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{ satJob{
name: "02-dcgmi-targeted-power.log", name: "02-dcgmi-targeted-power.log",
cmd: nvidiaDCGMNamedDiagCommand("targeted_power", normalizeNvidiaBurnDuration(durationSec), selected), cmd: nvidiaDCGMNamedDiagCommand("targeted_power", normalizeNvidiaBurnDuration(durationSec), selected),
collectGPU: true, collectGPU: true,
gpuIndices: selected, gpuIndices: selected,
}, },
{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}}, satJob{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}},
}, logFunc) ), logFunc)
} }
func (s *System) RunNvidiaPulseTestPack(ctx context.Context, baseDir string, durationSec int, gpuIndices []int, logFunc func(string)) (string, error) { func (s *System) RunNvidiaPulseTestPack(ctx context.Context, baseDir string, durationSec int, gpuIndices []int, logFunc func(string)) (string, error) {
@@ -332,16 +332,16 @@ func (s *System) RunNvidiaPulseTestPack(ctx context.Context, baseDir string, dur
if err != nil { if err != nil {
return "", err return "", err
} }
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-pulse", []satJob{ return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-pulse", withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{ satJob{
name: "02-dcgmi-pulse-test.log", name: "02-dcgmi-pulse-test.log",
cmd: nvidiaDCGMNamedDiagCommand("pulse_test", normalizeNvidiaBurnDuration(durationSec), selected), cmd: nvidiaDCGMNamedDiagCommand("pulse_test", normalizeNvidiaBurnDuration(durationSec), selected),
collectGPU: true, collectGPU: true,
gpuIndices: selected, gpuIndices: selected,
}, },
{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}}, satJob{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}},
}, logFunc) ), logFunc)
} }
func (s *System) RunNvidiaBandwidthPack(ctx context.Context, baseDir string, gpuIndices []int, logFunc func(string)) (string, error) { func (s *System) RunNvidiaBandwidthPack(ctx context.Context, baseDir string, gpuIndices []int, logFunc func(string)) (string, error) {
@@ -349,16 +349,16 @@ func (s *System) RunNvidiaBandwidthPack(ctx context.Context, baseDir string, gpu
if err != nil { if err != nil {
return "", err return "", err
} }
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-bandwidth", []satJob{ return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-bandwidth", withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{ satJob{
name: "02-dcgmi-nvbandwidth.log", name: "02-dcgmi-nvbandwidth.log",
cmd: nvidiaDCGMNamedDiagCommand("nvbandwidth", 0, selected), cmd: nvidiaDCGMNamedDiagCommand("nvbandwidth", 0, selected),
collectGPU: true, collectGPU: true,
gpuIndices: selected, gpuIndices: selected,
}, },
{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}}, satJob{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}},
}, logFunc) ), logFunc)
} }
func (s *System) RunNvidiaAcceptancePack(baseDir string, logFunc func(string)) (string, error) { func (s *System) RunNvidiaAcceptancePack(baseDir string, logFunc func(string)) (string, error) {
@@ -389,16 +389,16 @@ func (s *System) RunNvidiaTargetedStressValidatePack(ctx context.Context, baseDi
logFunc(fmt.Sprintf("pre-flight: killed stale worker pid=%d name=%s", p.PID, p.Name)) logFunc(fmt.Sprintf("pre-flight: killed stale worker pid=%d name=%s", p.PID, p.Name))
} }
} }
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-targeted-stress", []satJob{ return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia-targeted-stress", withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{ satJob{
name: "02-dcgmi-targeted-stress.log", name: "02-dcgmi-targeted-stress.log",
cmd: nvidiaDCGMNamedDiagCommand("targeted_stress", normalizeNvidiaBurnDuration(durationSec), selected), cmd: nvidiaDCGMNamedDiagCommand("targeted_stress", normalizeNvidiaBurnDuration(durationSec), selected),
collectGPU: true, collectGPU: true,
gpuIndices: selected, gpuIndices: selected,
}, },
{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}}, satJob{name: "03-nvidia-smi-after.log", cmd: []string{"nvidia-smi", "--query-gpu=index,name,temperature.gpu,power.draw,utilization.gpu,memory.used,memory.total", "--format=csv,noheader,nounits"}},
}, logFunc) ), logFunc)
} }
func resolveDCGMGPUIndices(gpuIndices []int) ([]int, error) { func resolveDCGMGPUIndices(gpuIndices []int) ([]int, error) {
@@ -568,14 +568,24 @@ type satStats struct {
Unsupported int Unsupported int
} }
func withNvidiaPersistenceMode(jobs ...satJob) []satJob {
out := make([]satJob, 0, len(jobs)+1)
out = append(out, satJob{
name: "00-nvidia-smi-persistence-mode.log",
cmd: []string{"nvidia-smi", "-pm", "1"},
})
out = append(out, jobs...)
return out
}
func nvidiaSATJobs() []satJob { func nvidiaSATJobs() []satJob {
return []satJob{ return withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{name: "02-dmidecode-baseboard.log", cmd: []string{"dmidecode", "-t", "baseboard"}}, satJob{name: "02-dmidecode-baseboard.log", cmd: []string{"dmidecode", "-t", "baseboard"}},
{name: "03-dmidecode-system.log", cmd: []string{"dmidecode", "-t", "system"}}, satJob{name: "03-dmidecode-system.log", cmd: []string{"dmidecode", "-t", "system"}},
{name: "04-nvidia-bug-report.log", cmd: []string{"nvidia-bug-report.sh", "--output-file", "{{run_dir}}/nvidia-bug-report.log"}}, satJob{name: "04-nvidia-bug-report.log", cmd: []string{"nvidia-bug-report.sh", "--output-file", "{{run_dir}}/nvidia-bug-report.log"}},
{name: "05-bee-gpu-burn.log", cmd: []string{"bee-gpu-burn", "--seconds", "5", "--size-mb", "64"}}, satJob{name: "05-bee-gpu-burn.log", cmd: []string{"bee-gpu-burn", "--seconds", "5", "--size-mb", "64"}},
} )
} }
func nvidiaDCGMJobs(diagLevel int, gpuIndices []int) []satJob { func nvidiaDCGMJobs(diagLevel int, gpuIndices []int) []satJob {
@@ -590,12 +600,12 @@ func nvidiaDCGMJobs(diagLevel int, gpuIndices []int) []satJob {
} }
diagArgs = append(diagArgs, "-i", strings.Join(ids, ",")) diagArgs = append(diagArgs, "-i", strings.Join(ids, ","))
} }
return []satJob{ return withNvidiaPersistenceMode(
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}}, satJob{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
{name: "02-dmidecode-baseboard.log", cmd: []string{"dmidecode", "-t", "baseboard"}}, satJob{name: "02-dmidecode-baseboard.log", cmd: []string{"dmidecode", "-t", "baseboard"}},
{name: "03-dmidecode-system.log", cmd: []string{"dmidecode", "-t", "system"}}, satJob{name: "03-dmidecode-system.log", cmd: []string{"dmidecode", "-t", "system"}},
{name: "04-dcgmi-diag.log", cmd: diagArgs}, satJob{name: "04-dcgmi-diag.log", cmd: diagArgs},
} )
} }
func nvidiaDCGMNamedDiagCommand(name string, durationSec int, gpuIndices []int) []string { func nvidiaDCGMNamedDiagCommand(name string, durationSec int, gpuIndices []int) []string {
@@ -620,7 +630,10 @@ func nvidiaVisibleDevicesEnv(gpuIndices []int) []string {
if len(gpuIndices) == 0 { if len(gpuIndices) == 0 {
return nil return nil
} }
return []string{"CUDA_VISIBLE_DEVICES=" + joinIndexList(gpuIndices)} return []string{
"CUDA_DEVICE_ORDER=PCI_BUS_ID",
"CUDA_VISIBLE_DEVICES=" + joinIndexList(gpuIndices),
}
} }
func runAcceptancePackCtx(ctx context.Context, baseDir, prefix string, jobs []satJob, logFunc func(string)) (string, error) { func runAcceptancePackCtx(ctx context.Context, baseDir, prefix string, jobs []satJob, logFunc func(string)) (string, error) {
@@ -661,6 +674,9 @@ func runAcceptancePackCtx(ctx context.Context, baseDir, prefix string, jobs []sa
if writeErr := os.WriteFile(filepath.Join(runDir, job.name), out, 0644); writeErr != nil { if writeErr := os.WriteFile(filepath.Join(runDir, job.name), out, 0644); writeErr != nil {
return "", writeErr return "", writeErr
} }
if ctx.Err() != nil {
return "", ctx.Err()
}
status, rc := classifySATResult(job.name, out, err) status, rc := classifySATResult(job.name, out, err)
stats.Add(status) stats.Add(status)
key := strings.TrimSuffix(strings.TrimPrefix(job.name, "0"), ".log") key := strings.TrimSuffix(strings.TrimPrefix(job.name, "0"), ".log")

View File

@@ -1,12 +1,14 @@
package platform package platform
import ( import (
"context"
"errors" "errors"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
"time"
) )
func TestStorageSATCommands(t *testing.T) { func TestStorageSATCommands(t *testing.T) {
@@ -28,13 +30,19 @@ func TestRunNvidiaAcceptancePackIncludesGPUStress(t *testing.T) {
jobs := nvidiaSATJobs() jobs := nvidiaSATJobs()
if len(jobs) != 5 { if len(jobs) != 6 {
t.Fatalf("jobs=%d want 5", len(jobs)) t.Fatalf("jobs=%d want 6", len(jobs))
} }
if got := jobs[4].cmd[0]; got != "bee-gpu-burn" { if got := jobs[0].cmd[0]; got != "nvidia-smi" {
t.Fatalf("preflight command=%q want nvidia-smi", got)
}
if got := strings.Join(jobs[0].cmd, " "); got != "nvidia-smi -pm 1" {
t.Fatalf("preflight=%q want %q", got, "nvidia-smi -pm 1")
}
if got := jobs[5].cmd[0]; got != "bee-gpu-burn" {
t.Fatalf("gpu stress command=%q want bee-gpu-burn", got) t.Fatalf("gpu stress command=%q want bee-gpu-burn", got)
} }
if got := jobs[3].cmd[1]; got != "--output-file" { if got := jobs[4].cmd[1]; got != "--output-file" {
t.Fatalf("bug report flag=%q want --output-file", got) t.Fatalf("bug report flag=%q want --output-file", got)
} }
} }
@@ -82,7 +90,7 @@ func TestAMDStressJobsIncludeBandwidthAndGST(t *testing.T) {
func TestNvidiaSATJobsUseBuiltinBurnDefaults(t *testing.T) { func TestNvidiaSATJobsUseBuiltinBurnDefaults(t *testing.T) {
jobs := nvidiaSATJobs() jobs := nvidiaSATJobs()
got := jobs[4].cmd got := jobs[5].cmd
want := []string{"bee-gpu-burn", "--seconds", "5", "--size-mb", "64"} want := []string{"bee-gpu-burn", "--seconds", "5", "--size-mb", "64"}
if len(got) != len(want) { if len(got) != len(want) {
t.Fatalf("cmd len=%d want %d", len(got), len(want)) t.Fatalf("cmd len=%d want %d", len(got), len(want))
@@ -94,6 +102,19 @@ func TestNvidiaSATJobsUseBuiltinBurnDefaults(t *testing.T) {
} }
} }
func TestNvidiaDCGMJobsEnablePersistenceModeBeforeDiag(t *testing.T) {
jobs := nvidiaDCGMJobs(3, []int{2, 0})
if len(jobs) != 5 {
t.Fatalf("jobs=%d want 5", len(jobs))
}
if got := strings.Join(jobs[0].cmd, " "); got != "nvidia-smi -pm 1" {
t.Fatalf("preflight=%q want %q", got, "nvidia-smi -pm 1")
}
if got := strings.Join(jobs[4].cmd, " "); got != "dcgmi diag -r 3 -i 2,0" {
t.Fatalf("diag=%q want %q", got, "dcgmi diag -r 3 -i 2,0")
}
}
func TestBuildNvidiaStressJobUsesSelectedLoaderAndDevices(t *testing.T) { func TestBuildNvidiaStressJobUsesSelectedLoaderAndDevices(t *testing.T) {
t.Parallel() t.Parallel()
@@ -234,11 +255,14 @@ func TestNvidiaDCGMNamedDiagCommandUsesDurationAndSelection(t *testing.T) {
func TestNvidiaVisibleDevicesEnvUsesSelectedGPUs(t *testing.T) { func TestNvidiaVisibleDevicesEnvUsesSelectedGPUs(t *testing.T) {
env := nvidiaVisibleDevicesEnv([]int{0, 2, 4}) env := nvidiaVisibleDevicesEnv([]int{0, 2, 4})
if len(env) != 1 { if len(env) != 2 {
t.Fatalf("env len=%d want 1 (%v)", len(env), env) t.Fatalf("env len=%d want 2 (%v)", len(env), env)
} }
if env[0] != "CUDA_VISIBLE_DEVICES=0,2,4" { if env[0] != "CUDA_DEVICE_ORDER=PCI_BUS_ID" {
t.Fatalf("env[0]=%q want CUDA_VISIBLE_DEVICES=0,2,4", env[0]) t.Fatalf("env[0]=%q want CUDA_DEVICE_ORDER=PCI_BUS_ID", env[0])
}
if env[1] != "CUDA_VISIBLE_DEVICES=0,2,4" {
t.Fatalf("env[1]=%q want CUDA_VISIBLE_DEVICES=0,2,4", env[1])
} }
} }
@@ -331,6 +355,38 @@ func TestClassifySATResult(t *testing.T) {
} }
} }
func TestRunAcceptancePackCtxReturnsContextErrorWithoutArchive(t *testing.T) {
dir := t.TempDir()
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
done := make(chan struct{})
go func() {
time.Sleep(100 * time.Millisecond)
cancel()
close(done)
}()
archive, err := runAcceptancePackCtx(ctx, dir, "cancelled-pack", []satJob{
{name: "01-sleep.log", cmd: []string{"sh", "-c", "sleep 5"}},
}, nil)
<-done
if !errors.Is(err, context.Canceled) {
t.Fatalf("err=%v want context.Canceled", err)
}
if archive != "" {
t.Fatalf("archive=%q want empty", archive)
}
matches, globErr := filepath.Glob(filepath.Join(dir, "cancelled-pack-*.tar.gz"))
if globErr != nil {
t.Fatalf("Glob error: %v", globErr)
}
if len(matches) != 0 {
t.Fatalf("archives=%v want none", matches)
}
}
func TestParseStorageDevicesSkipsUSBDisks(t *testing.T) { func TestParseStorageDevicesSkipsUSBDisks(t *testing.T) {
t.Parallel() t.Parallel()

View File

@@ -11,6 +11,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"regexp" "regexp"
"sort"
"strings" "strings"
"sync/atomic" "sync/atomic"
"syscall" "syscall"
@@ -21,13 +22,238 @@ import (
) )
var ansiEscapeRE = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]|\x1b[()][A-Z0-9]|\x1b[DABC]`) var ansiEscapeRE = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]|\x1b[()][A-Z0-9]|\x1b[DABC]`)
var apiListNvidiaGPUs = func(a *app.App) ([]platform.NvidiaGPU, error) {
if a == nil {
return nil, fmt.Errorf("app not configured")
}
return a.ListNvidiaGPUs()
}
// ── Job ID counter ──────────────────────────────────────────────────────────── // ── Job ID counter ────────────────────────────────────────────────────────────
var jobCounter atomic.Uint64 var jobCounter atomic.Uint64
func newJobID(prefix string) string { func newJobID(_ string) string {
return fmt.Sprintf("%s-%d", prefix, jobCounter.Add(1)) start := int((jobCounter.Add(1) - 1) % 1000)
globalQueue.mu.Lock()
defer globalQueue.mu.Unlock()
for offset := 0; offset < 1000; offset++ {
n := (start + offset) % 1000
id := fmt.Sprintf("TASK-%03d", n)
if !taskIDInUseLocked(id) {
return id
}
}
return fmt.Sprintf("TASK-%03d", start)
}
func taskIDInUseLocked(id string) bool {
for _, t := range globalQueue.tasks {
if t != nil && t.ID == id {
return true
}
}
return false
}
type taskRunResponse struct {
TaskID string `json:"task_id,omitempty"`
JobID string `json:"job_id,omitempty"`
TaskIDs []string `json:"task_ids,omitempty"`
JobIDs []string `json:"job_ids,omitempty"`
TaskCount int `json:"task_count,omitempty"`
}
type nvidiaTaskSelection struct {
GPUIndices []int
Label string
}
func writeTaskRunResponse(w http.ResponseWriter, tasks []*Task) {
if len(tasks) == 0 {
writeJSON(w, taskRunResponse{})
return
}
ids := make([]string, 0, len(tasks))
for _, t := range tasks {
if t == nil || strings.TrimSpace(t.ID) == "" {
continue
}
ids = append(ids, t.ID)
}
resp := taskRunResponse{TaskCount: len(ids)}
if len(ids) > 0 {
resp.TaskID = ids[0]
resp.JobID = ids[0]
resp.TaskIDs = ids
resp.JobIDs = ids
}
writeJSON(w, resp)
}
func shouldSplitHomogeneousNvidiaTarget(target string) bool {
switch strings.TrimSpace(target) {
case "nvidia", "nvidia-targeted-stress", "nvidia-benchmark", "nvidia-compute",
"nvidia-targeted-power", "nvidia-pulse", "nvidia-interconnect",
"nvidia-bandwidth", "nvidia-stress":
return true
default:
return false
}
}
func expandHomogeneousNvidiaSelections(gpus []platform.NvidiaGPU, include, exclude []int) ([]nvidiaTaskSelection, error) {
if len(gpus) == 0 {
return nil, fmt.Errorf("no NVIDIA GPUs detected")
}
indexed := make(map[int]platform.NvidiaGPU, len(gpus))
allIndices := make([]int, 0, len(gpus))
for _, gpu := range gpus {
indexed[gpu.Index] = gpu
allIndices = append(allIndices, gpu.Index)
}
sort.Ints(allIndices)
selected := allIndices
if len(include) > 0 {
selected = make([]int, 0, len(include))
seen := make(map[int]struct{}, len(include))
for _, idx := range include {
if _, ok := indexed[idx]; !ok {
continue
}
if _, dup := seen[idx]; dup {
continue
}
seen[idx] = struct{}{}
selected = append(selected, idx)
}
sort.Ints(selected)
}
if len(exclude) > 0 {
skip := make(map[int]struct{}, len(exclude))
for _, idx := range exclude {
skip[idx] = struct{}{}
}
filtered := selected[:0]
for _, idx := range selected {
if _, ok := skip[idx]; ok {
continue
}
filtered = append(filtered, idx)
}
selected = filtered
}
if len(selected) == 0 {
return nil, fmt.Errorf("no NVIDIA GPUs selected")
}
modelGroups := make(map[string][]platform.NvidiaGPU)
modelOrder := make([]string, 0)
for _, idx := range selected {
gpu := indexed[idx]
model := strings.TrimSpace(gpu.Name)
if model == "" {
model = fmt.Sprintf("GPU %d", gpu.Index)
}
if _, ok := modelGroups[model]; !ok {
modelOrder = append(modelOrder, model)
}
modelGroups[model] = append(modelGroups[model], gpu)
}
sort.Slice(modelOrder, func(i, j int) bool {
left := modelGroups[modelOrder[i]]
right := modelGroups[modelOrder[j]]
if len(left) == 0 || len(right) == 0 {
return modelOrder[i] < modelOrder[j]
}
return left[0].Index < right[0].Index
})
var groups []nvidiaTaskSelection
var singles []nvidiaTaskSelection
for _, model := range modelOrder {
group := modelGroups[model]
sort.Slice(group, func(i, j int) bool { return group[i].Index < group[j].Index })
indices := make([]int, 0, len(group))
for _, gpu := range group {
indices = append(indices, gpu.Index)
}
if len(indices) >= 2 {
groups = append(groups, nvidiaTaskSelection{
GPUIndices: indices,
Label: fmt.Sprintf("%s; GPUs %s", model, joinTaskIndices(indices)),
})
continue
}
gpu := group[0]
singles = append(singles, nvidiaTaskSelection{
GPUIndices: []int{gpu.Index},
Label: fmt.Sprintf("GPU %d — %s", gpu.Index, model),
})
}
return append(groups, singles...), nil
}
func joinTaskIndices(indices []int) string {
parts := make([]string, 0, len(indices))
for _, idx := range indices {
parts = append(parts, fmt.Sprintf("%d", idx))
}
return strings.Join(parts, ",")
}
func formatSplitTaskName(baseName, selectionLabel string) string {
baseName = strings.TrimSpace(baseName)
selectionLabel = strings.TrimSpace(selectionLabel)
if baseName == "" {
return selectionLabel
}
if selectionLabel == "" {
return baseName
}
return baseName + " (" + selectionLabel + ")"
}
func buildNvidiaTaskSet(target string, priority int, createdAt time.Time, params taskParams, baseName string, appRef *app.App, idPrefix string) ([]*Task, error) {
if !shouldSplitHomogeneousNvidiaTarget(target) {
t := &Task{
ID: newJobID(idPrefix),
Name: baseName,
Target: target,
Priority: priority,
Status: TaskPending,
CreatedAt: createdAt,
params: params,
}
return []*Task{t}, nil
}
gpus, err := apiListNvidiaGPUs(appRef)
if err != nil {
return nil, err
}
selections, err := expandHomogeneousNvidiaSelections(gpus, params.GPUIndices, params.ExcludeGPUIndices)
if err != nil {
return nil, err
}
tasks := make([]*Task, 0, len(selections))
for _, selection := range selections {
taskParamsCopy := params
taskParamsCopy.GPUIndices = append([]int(nil), selection.GPUIndices...)
taskParamsCopy.ExcludeGPUIndices = nil
displayName := formatSplitTaskName(baseName, selection.Label)
taskParamsCopy.DisplayName = displayName
tasks = append(tasks, &Task{
ID: newJobID(idPrefix),
Name: displayName,
Target: target,
Priority: priority,
Status: TaskPending,
CreatedAt: createdAt,
params: taskParamsCopy,
})
}
return tasks, nil
} }
// ── SSE helpers ─────────────────────────────────────────────────────────────── // ── SSE helpers ───────────────────────────────────────────────────────────────
@@ -207,28 +433,28 @@ func (h *handler) handleAPISATRun(target string) http.HandlerFunc {
} }
name := taskDisplayName(target, body.Profile, body.Loader) name := taskDisplayName(target, body.Profile, body.Loader)
t := &Task{
ID: newJobID("sat-" + target),
Name: name,
Target: target,
Status: TaskPending,
CreatedAt: time.Now(),
params: taskParams{
Duration: body.Duration,
DiagLevel: body.DiagLevel,
GPUIndices: body.GPUIndices,
ExcludeGPUIndices: body.ExcludeGPUIndices,
Loader: body.Loader,
BurnProfile: body.Profile,
DisplayName: body.DisplayName,
PlatformComponents: body.PlatformComponents,
},
}
if strings.TrimSpace(body.DisplayName) != "" { if strings.TrimSpace(body.DisplayName) != "" {
t.Name = body.DisplayName name = body.DisplayName
} }
globalQueue.enqueue(t) params := taskParams{
writeJSON(w, map[string]string{"task_id": t.ID, "job_id": t.ID}) Duration: body.Duration,
DiagLevel: body.DiagLevel,
GPUIndices: body.GPUIndices,
ExcludeGPUIndices: body.ExcludeGPUIndices,
Loader: body.Loader,
BurnProfile: body.Profile,
DisplayName: body.DisplayName,
PlatformComponents: body.PlatformComponents,
}
tasks, err := buildNvidiaTaskSet(target, 0, time.Now(), params, name, h.opts.App, "sat-"+target)
if err != nil {
writeError(w, http.StatusBadRequest, err.Error())
return
}
for _, t := range tasks {
globalQueue.enqueue(t)
}
writeTaskRunResponse(w, tasks)
} }
} }
@@ -257,27 +483,26 @@ func (h *handler) handleAPIBenchmarkNvidiaRun(w http.ResponseWriter, r *http.Req
if body.RunNCCL != nil { if body.RunNCCL != nil {
runNCCL = *body.RunNCCL runNCCL = *body.RunNCCL
} }
t := &Task{ name := taskDisplayName("nvidia-benchmark", "", "")
ID: newJobID("benchmark-nvidia"),
Name: taskDisplayName("nvidia-benchmark", "", ""),
Target: "nvidia-benchmark",
Priority: 15,
Status: TaskPending,
CreatedAt: time.Now(),
params: taskParams{
GPUIndices: body.GPUIndices,
ExcludeGPUIndices: body.ExcludeGPUIndices,
SizeMB: body.SizeMB,
BenchmarkProfile: body.Profile,
RunNCCL: runNCCL,
DisplayName: body.DisplayName,
},
}
if strings.TrimSpace(body.DisplayName) != "" { if strings.TrimSpace(body.DisplayName) != "" {
t.Name = body.DisplayName name = body.DisplayName
} }
globalQueue.enqueue(t) tasks, err := buildNvidiaTaskSet("nvidia-benchmark", 15, time.Now(), taskParams{
writeJSON(w, map[string]string{"task_id": t.ID, "job_id": t.ID}) GPUIndices: body.GPUIndices,
ExcludeGPUIndices: body.ExcludeGPUIndices,
SizeMB: body.SizeMB,
BenchmarkProfile: body.Profile,
RunNCCL: runNCCL,
DisplayName: body.DisplayName,
}, name, h.opts.App, "benchmark-nvidia")
if err != nil {
writeError(w, http.StatusBadRequest, err.Error())
return
}
for _, t := range tasks {
globalQueue.enqueue(t)
}
writeTaskRunResponse(w, tasks)
} }
func (h *handler) handleAPISATStream(w http.ResponseWriter, r *http.Request) { func (h *handler) handleAPISATStream(w http.ResponseWriter, r *http.Request) {

View File

@@ -1,6 +1,7 @@
package webui package webui
import ( import (
"encoding/json"
"net/http/httptest" "net/http/httptest"
"strings" "strings"
"testing" "testing"
@@ -74,6 +75,14 @@ func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
globalQueue.tasks = originalTasks globalQueue.tasks = originalTasks
globalQueue.mu.Unlock() globalQueue.mu.Unlock()
}) })
prevList := apiListNvidiaGPUs
apiListNvidiaGPUs = func(_ *app.App) ([]platform.NvidiaGPU, error) {
return []platform.NvidiaGPU{
{Index: 1, Name: "NVIDIA H100 PCIe"},
{Index: 3, Name: "NVIDIA H100 PCIe"},
}, nil
}
t.Cleanup(func() { apiListNvidiaGPUs = prevList })
h := &handler{opts: HandlerOptions{App: &app.App{}}} h := &handler{opts: HandlerOptions{App: &app.App{}}}
req := httptest.NewRequest("POST", "/api/benchmark/nvidia/run", strings.NewReader(`{"profile":"standard","gpu_indices":[1,3],"run_nccl":false}`)) req := httptest.NewRequest("POST", "/api/benchmark/nvidia/run", strings.NewReader(`{"profile":"standard","gpu_indices":[1,3],"run_nccl":false}`))
@@ -101,6 +110,97 @@ func TestHandleAPIBenchmarkNvidiaRunQueuesSelectedGPUs(t *testing.T) {
} }
} }
func TestHandleAPIBenchmarkNvidiaRunSplitsMixedGPUModels(t *testing.T) {
globalQueue.mu.Lock()
originalTasks := globalQueue.tasks
globalQueue.tasks = nil
globalQueue.mu.Unlock()
t.Cleanup(func() {
globalQueue.mu.Lock()
globalQueue.tasks = originalTasks
globalQueue.mu.Unlock()
})
prevList := apiListNvidiaGPUs
apiListNvidiaGPUs = func(_ *app.App) ([]platform.NvidiaGPU, error) {
return []platform.NvidiaGPU{
{Index: 0, Name: "NVIDIA H100 PCIe"},
{Index: 1, Name: "NVIDIA H100 PCIe"},
{Index: 2, Name: "NVIDIA H200 NVL"},
}, nil
}
t.Cleanup(func() { apiListNvidiaGPUs = prevList })
h := &handler{opts: HandlerOptions{App: &app.App{}}}
req := httptest.NewRequest("POST", "/api/benchmark/nvidia/run", strings.NewReader(`{"profile":"standard","gpu_indices":[0,1,2],"run_nccl":false}`))
rec := httptest.NewRecorder()
h.handleAPIBenchmarkNvidiaRun(rec, req)
if rec.Code != 200 {
t.Fatalf("status=%d body=%s", rec.Code, rec.Body.String())
}
var resp taskRunResponse
if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil {
t.Fatalf("decode response: %v", err)
}
if len(resp.TaskIDs) != 2 {
t.Fatalf("task_ids=%v want 2 items", resp.TaskIDs)
}
globalQueue.mu.Lock()
defer globalQueue.mu.Unlock()
if len(globalQueue.tasks) != 2 {
t.Fatalf("tasks=%d want 2", len(globalQueue.tasks))
}
if got := globalQueue.tasks[0].params.GPUIndices; len(got) != 2 || got[0] != 0 || got[1] != 1 {
t.Fatalf("task[0] gpu indices=%v want [0 1]", got)
}
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
t.Fatalf("task[1] gpu indices=%v want [2]", got)
}
}
func TestHandleAPISATRunSplitsMixedNvidiaTaskSet(t *testing.T) {
globalQueue.mu.Lock()
originalTasks := globalQueue.tasks
globalQueue.tasks = nil
globalQueue.mu.Unlock()
t.Cleanup(func() {
globalQueue.mu.Lock()
globalQueue.tasks = originalTasks
globalQueue.mu.Unlock()
})
prevList := apiListNvidiaGPUs
apiListNvidiaGPUs = func(_ *app.App) ([]platform.NvidiaGPU, error) {
return []platform.NvidiaGPU{
{Index: 0, Name: "NVIDIA H100 PCIe"},
{Index: 1, Name: "NVIDIA H100 PCIe"},
{Index: 2, Name: "NVIDIA H200 NVL"},
}, nil
}
t.Cleanup(func() { apiListNvidiaGPUs = prevList })
h := &handler{opts: HandlerOptions{App: &app.App{}}}
req := httptest.NewRequest("POST", "/api/sat/nvidia-targeted-power/run", strings.NewReader(`{"profile":"acceptance","gpu_indices":[0,1,2]}`))
rec := httptest.NewRecorder()
h.handleAPISATRun("nvidia-targeted-power").ServeHTTP(rec, req)
if rec.Code != 200 {
t.Fatalf("status=%d body=%s", rec.Code, rec.Body.String())
}
globalQueue.mu.Lock()
defer globalQueue.mu.Unlock()
if len(globalQueue.tasks) != 2 {
t.Fatalf("tasks=%d want 2", len(globalQueue.tasks))
}
if got := globalQueue.tasks[0].params.GPUIndices; len(got) != 2 || got[0] != 0 || got[1] != 1 {
t.Fatalf("task[0] gpu indices=%v want [0 1]", got)
}
if got := globalQueue.tasks[1].params.GPUIndices; len(got) != 1 || got[0] != 2 {
t.Fatalf("task[1] gpu indices=%v want [2]", got)
}
}
func TestPushFanRingsTracksByNameAndCarriesForwardMissingSamples(t *testing.T) { func TestPushFanRingsTracksByNameAndCarriesForwardMissingSamples(t *testing.T) {
h := &handler{} h := &handler{}
h.pushFanRings([]platform.FanReading{ h.pushFanRings([]platform.FanReading{

View File

@@ -8,9 +8,12 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv"
"strings" "strings"
"time"
"bee/audit/internal/app" "bee/audit/internal/app"
"bee/audit/internal/platform"
"bee/audit/internal/schema" "bee/audit/internal/schema"
) )
@@ -161,7 +164,7 @@ func renderPage(page string, opts HandlerOptions) string {
case "benchmark": case "benchmark":
pageID = "benchmark" pageID = "benchmark"
title = "Benchmark" title = "Benchmark"
body = renderBenchmark() body = renderBenchmark(opts)
case "tasks": case "tasks":
pageID = "tasks" pageID = "tasks"
title = "Tasks" title = "Tasks"
@@ -1068,17 +1071,23 @@ func renderValidate(opts HandlerOptions) string {
`</div> `</div>
<div style="height:1px;background:var(--border);margin:16px 0"></div> <div style="height:1px;background:var(--border);margin:16px 0"></div>
<div class="grid3"> <div class="grid3">
` + renderSATCard("nvidia", "NVIDIA GPU", "runNvidiaValidateSet('nvidia')", "", renderValidateCardBody( ` + renderSATCard("nvidia-selection", "NVIDIA GPU Selection", "", "", renderValidateCardBody(
inv.NVIDIA, inv.NVIDIA,
`Runs NVIDIA diagnostics and board inventory checks.`, `Select which NVIDIA GPUs to include in Validate. The same selection is used by both NVIDIA GPU cards below and by Validate one by one.`,
`<code>nvidia-smi</code>, <code>dmidecode</code>, <code>dcgmi diag</code>`, `<code>nvidia-smi --query-gpu=index,name,memory.total</code>`,
`Runs one GPU at a time. Diag level is taken from Validate Profile.`, `<div id="sat-gpu-list"><p style="color:var(--muted);font-size:13px">Loading NVIDIA GPUs…</p></div><div style="display:flex;gap:8px;flex-wrap:wrap;margin-top:8px"><button type="button" class="btn btn-sm btn-secondary" onclick="satSelectAllGPUs()">Select all</button><button type="button" class="btn btn-sm btn-secondary" onclick="satSelectNoGPUs()">Clear</button></div><div id="sat-gpu-selection-note" style="font-size:12px;color:var(--muted);margin-top:8px"></div>`,
)) + )) +
renderSATCard("nvidia", "NVIDIA GPU", "runNvidiaValidateSet('nvidia')", "", renderValidateCardBody(
inv.NVIDIA,
`Runs NVIDIA diagnostics and board inventory checks.`,
`<code>nvidia-smi</code>, <code>dmidecode</code>, <code>dcgmi diag</code>`,
`Runs one GPU at a time on the selected NVIDIA GPUs. Diag level is taken from Validate Profile.`,
)) +
renderSATCard("nvidia-targeted-stress", "NVIDIA GPU Targeted Stress", "runNvidiaValidateSet('nvidia-targeted-stress')", "", renderValidateCardBody( renderSATCard("nvidia-targeted-stress", "NVIDIA GPU Targeted Stress", "runNvidiaValidateSet('nvidia-targeted-stress')", "", renderValidateCardBody(
inv.NVIDIA, inv.NVIDIA,
`Runs a controlled NVIDIA DCGM load in Validate to check stability under moderate stress.`, `Runs a controlled NVIDIA DCGM load in Validate to check stability under moderate stress.`,
`<code>dcgmi diag targeted_stress</code>`, `<code>dcgmi diag targeted_stress</code>`,
`Runs one GPU at a time with the fixed DCGM targeted stress recipe.`, `Runs one GPU at a time on the selected NVIDIA GPUs with the fixed DCGM targeted stress recipe.`,
)) + )) +
`</div> `</div>
<div class="grid3" style="margin-top:16px"> <div class="grid3" style="margin-top:16px">
@@ -1100,6 +1109,8 @@ func renderValidate(opts HandlerOptions) string {
.validate-card-body { padding:0; } .validate-card-body { padding:0; }
.validate-card-section { padding:12px 16px 0; } .validate-card-section { padding:12px 16px 0; }
.validate-card-section:last-child { padding-bottom:16px; } .validate-card-section:last-child { padding-bottom:16px; }
.sat-gpu-row { display:flex; align-items:flex-start; gap:8px; padding:6px 0; cursor:pointer; font-size:13px; }
.sat-gpu-row input[type=checkbox] { width:16px; height:16px; margin-top:2px; flex-shrink:0; }
@media(max-width:900px){ .validate-profile-body { grid-template-columns:1fr; } } @media(max-width:900px){ .validate-profile-body { grid-template-columns:1fr; } }
</style> </style>
<script> <script>
@@ -1128,6 +1139,59 @@ function loadSatNvidiaGPUs() {
} }
return satNvidiaGPUsPromise; return satNvidiaGPUsPromise;
} }
function satSelectedGPUIndices() {
return Array.from(document.querySelectorAll('.sat-nvidia-checkbox'))
.filter(function(el) { return el.checked && !el.disabled; })
.map(function(el) { return parseInt(el.value, 10); })
.filter(function(v) { return !Number.isNaN(v); })
.sort(function(a, b) { return a - b; });
}
function satUpdateGPUSelectionNote() {
const note = document.getElementById('sat-gpu-selection-note');
if (!note) return;
const selected = satSelectedGPUIndices();
if (!selected.length) {
note.textContent = 'Select at least one NVIDIA GPU to enable NVIDIA validate tasks.';
return;
}
note.textContent = 'Selected NVIDIA GPUs: ' + selected.join(', ') + '.';
}
function satRenderGPUList(gpus) {
const root = document.getElementById('sat-gpu-list');
if (!root) return;
if (!gpus || !gpus.length) {
root.innerHTML = '<p style="color:var(--muted);font-size:13px">No NVIDIA GPUs detected.</p>';
satUpdateGPUSelectionNote();
return;
}
root.innerHTML = gpus.map(function(gpu) {
const mem = gpu.memory_mb > 0 ? ' · ' + gpu.memory_mb + ' MiB' : '';
return '<label class="sat-gpu-row">'
+ '<input class="sat-nvidia-checkbox" type="checkbox" value="' + gpu.index + '" checked onchange="satUpdateGPUSelectionNote()">'
+ '<span><strong>GPU ' + gpu.index + '</strong> — ' + gpu.name + mem + '</span>'
+ '</label>';
}).join('');
satUpdateGPUSelectionNote();
}
function satSelectAllGPUs() {
document.querySelectorAll('.sat-nvidia-checkbox').forEach(function(el) { el.checked = true; });
satUpdateGPUSelectionNote();
}
function satSelectNoGPUs() {
document.querySelectorAll('.sat-nvidia-checkbox').forEach(function(el) { el.checked = false; });
satUpdateGPUSelectionNote();
}
function satLoadGPUs() {
loadSatNvidiaGPUs().then(function(gpus) {
satRenderGPUList(gpus);
}).catch(function(err) {
const root = document.getElementById('sat-gpu-list');
if (root) {
root.innerHTML = '<p style="color:var(--crit-fg);font-size:13px">Error: ' + err.message + '</p>';
}
satUpdateGPUSelectionNote();
});
}
function satGPUDisplayName(gpu) { function satGPUDisplayName(gpu) {
const idx = (gpu && Number.isFinite(Number(gpu.index))) ? Number(gpu.index) : 0; const idx = (gpu && Number.isFinite(Number(gpu.index))) ? Number(gpu.index) : 0;
const name = gpu && gpu.name ? gpu.name : ('GPU ' + idx); const name = gpu && gpu.name ? gpu.name : ('GPU ' + idx);
@@ -1149,6 +1213,36 @@ function enqueueSATTarget(target, overrides) {
return fetch('/api/sat/'+target+'/run', {method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify(satRequestBody(target, overrides))}) return fetch('/api/sat/'+target+'/run', {method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify(satRequestBody(target, overrides))})
.then(r => r.json()); .then(r => r.json());
} }
function streamSATTask(taskId, title, resetTerminal) {
if (satES) { satES.close(); satES = null; }
document.getElementById('sat-output').style.display='block';
document.getElementById('sat-title').textContent = '— ' + title;
const term = document.getElementById('sat-terminal');
if (resetTerminal) {
term.textContent = '';
}
term.textContent += 'Task ' + taskId + ' queued. Streaming log...\n';
return new Promise(function(resolve) {
satES = new EventSource('/api/tasks/' + taskId + '/stream');
satES.onmessage = function(e) { term.textContent += e.data + '\n'; term.scrollTop = term.scrollHeight; };
satES.addEventListener('done', function(e) {
satES.close();
satES = null;
term.textContent += (e.data ? '\nERROR: ' + e.data : '\nCompleted.') + '\n';
term.scrollTop = term.scrollHeight;
resolve({ok: !e.data, error: e.data || ''});
});
satES.onerror = function() {
if (satES) {
satES.close();
satES = null;
}
term.textContent += '\nERROR: stream disconnected.\n';
term.scrollTop = term.scrollHeight;
resolve({ok: false, error: 'stream disconnected'});
};
});
}
function selectedAMDValidateTargets() { function selectedAMDValidateTargets() {
const targets = []; const targets = [];
const gpu = document.getElementById('sat-amd-target'); const gpu = document.getElementById('sat-amd-target');
@@ -1163,24 +1257,23 @@ function runSAT(target) {
return runSATWithOverrides(target, null); return runSATWithOverrides(target, null);
} }
function runSATWithOverrides(target, overrides) { function runSATWithOverrides(target, overrides) {
if (satES) { satES.close(); satES = null; } const title = (overrides && overrides.display_name) || target;
document.getElementById('sat-output').style.display='block';
document.getElementById('sat-title').textContent = '— ' + target;
const term = document.getElementById('sat-terminal'); const term = document.getElementById('sat-terminal');
term.textContent = 'Enqueuing ' + target + ' test...\n'; document.getElementById('sat-output').style.display='block';
document.getElementById('sat-title').textContent = '— ' + title;
term.textContent = 'Enqueuing ' + title + ' test...\n';
return enqueueSATTarget(target, overrides) return enqueueSATTarget(target, overrides)
.then(d => { .then(d => streamSATTask(d.task_id, title, false));
term.textContent += 'Task ' + d.task_id + ' queued. Streaming log...\n';
satES = new EventSource('/api/tasks/'+d.task_id+'/stream');
satES.onmessage = e => { term.textContent += e.data+'\n'; term.scrollTop=term.scrollHeight; };
satES.addEventListener('done', e => { satES.close(); satES=null; term.textContent += (e.data ? '\nERROR: '+e.data : '\nCompleted.')+'\n'; });
});
} }
function expandSATTarget(target) { function expandSATTarget(target) {
if (target !== 'nvidia' && target !== 'nvidia-targeted-stress') { if (target !== 'nvidia' && target !== 'nvidia-targeted-stress') {
return Promise.resolve([{target: target}]); return Promise.resolve([{target: target}]);
} }
return loadSatNvidiaGPUs().then(gpus => gpus.map(gpu => ({ const selected = satSelectedGPUIndices();
if (!selected.length) {
return Promise.reject(new Error('Select at least one NVIDIA GPU.'));
}
return loadSatNvidiaGPUs().then(gpus => gpus.filter(gpu => selected.indexOf(Number(gpu.index)) >= 0).map(gpu => ({
target: target, target: target,
overrides: { overrides: {
gpu_indices: [Number(gpu.index)], gpu_indices: [Number(gpu.index)],
@@ -1191,65 +1284,61 @@ function expandSATTarget(target) {
} }
function runNvidiaValidateSet(target) { function runNvidiaValidateSet(target) {
return loadSatNvidiaGPUs().then(gpus => { return loadSatNvidiaGPUs().then(gpus => {
if (!gpus.length) return; const selected = satSelectedGPUIndices();
if (gpus.length === 1) { const picked = gpus.filter(gpu => selected.indexOf(Number(gpu.index)) >= 0);
const gpu = gpus[0]; if (!picked.length) {
throw new Error('Select at least one NVIDIA GPU.');
}
if (picked.length === 1) {
const gpu = picked[0];
return runSATWithOverrides(target, { return runSATWithOverrides(target, {
gpu_indices: [Number(gpu.index)], gpu_indices: [Number(gpu.index)],
display_name: (satLabels()[target] || ('Validate ' + target)) + ' (' + satGPUDisplayName(gpu) + ')' display_name: (satLabels()[target] || ('Validate ' + target)) + ' (' + satGPUDisplayName(gpu) + ')'
}); });
} }
if (satES) { satES.close(); satES = null; }
document.getElementById('sat-output').style.display='block'; document.getElementById('sat-output').style.display='block';
document.getElementById('sat-title').textContent = '— ' + target; document.getElementById('sat-title').textContent = '— ' + target;
const term = document.getElementById('sat-terminal'); const term = document.getElementById('sat-terminal');
term.textContent = 'Enqueuing ' + target + ' tests one GPU at a time...\n'; term.textContent = 'Running ' + target + ' one GPU at a time...\n';
const labelBase = satLabels()[target] || ('Validate ' + target); const labelBase = satLabels()[target] || ('Validate ' + target);
const enqueueNext = (idx) => { const runNext = (idx) => {
if (idx >= gpus.length) return; if (idx >= picked.length) return Promise.resolve();
const gpu = gpus[idx]; const gpu = picked[idx];
const gpuLabel = satGPUDisplayName(gpu); const gpuLabel = satGPUDisplayName(gpu);
enqueueSATTarget(target, { term.textContent += '\n[' + (idx + 1) + '/' + picked.length + '] ' + gpuLabel + '\n';
return enqueueSATTarget(target, {
gpu_indices: [Number(gpu.index)], gpu_indices: [Number(gpu.index)],
display_name: labelBase + ' (' + gpuLabel + ')' display_name: labelBase + ' (' + gpuLabel + ')'
}).then(d => { }).then(d => {
term.textContent += 'Task ' + d.task_id + ' queued for ' + gpuLabel + '.\n'; return streamSATTask(d.task_id, labelBase + ' (' + gpuLabel + ')', false);
if (idx === gpus.length - 1) { }).then(function() {
satES = new EventSource('/api/tasks/' + d.task_id + '/stream'); return runNext(idx + 1);
satES.onmessage = e => { term.textContent += e.data+'\n'; term.scrollTop=term.scrollHeight; };
satES.addEventListener('done', e => { satES.close(); satES=null; term.textContent += (e.data ? '\nERROR: '+e.data : '\nCompleted.')+'\n'; });
}
enqueueNext(idx + 1);
}); });
}; };
enqueueNext(0); return runNext(0);
}); });
} }
function runAMDValidateSet() { function runAMDValidateSet() {
const targets = selectedAMDValidateTargets(); const targets = selectedAMDValidateTargets();
if (!targets.length) return; if (!targets.length) return;
if (targets.length === 1) return runSAT(targets[0]); if (targets.length === 1) return runSAT(targets[0]);
if (satES) { satES.close(); satES = null; }
document.getElementById('sat-output').style.display='block'; document.getElementById('sat-output').style.display='block';
document.getElementById('sat-title').textContent = '— amd'; document.getElementById('sat-title').textContent = '— amd';
const term = document.getElementById('sat-terminal'); const term = document.getElementById('sat-terminal');
term.textContent = 'Enqueuing AMD validate set...\n'; term.textContent = 'Running AMD validate set one by one...\n';
const labels = satLabels(); const labels = satLabels();
const enqueueNext = (idx) => { const runNext = (idx) => {
if (idx >= targets.length) return; if (idx >= targets.length) return Promise.resolve();
const target = targets[idx]; const target = targets[idx];
enqueueSATTarget(target) term.textContent += '\n[' + (idx + 1) + '/' + targets.length + '] ' + labels[target] + '\n';
return enqueueSATTarget(target)
.then(d => { .then(d => {
term.textContent += 'Task ' + d.task_id + ' queued for ' + labels[target] + '.\n'; return streamSATTask(d.task_id, labels[target], false);
if (idx === targets.length - 1) { }).then(function() {
satES = new EventSource('/api/tasks/'+d.task_id+'/stream'); return runNext(idx + 1);
satES.onmessage = e => { term.textContent += e.data+'\n'; term.scrollTop=term.scrollHeight; };
satES.addEventListener('done', e => { satES.close(); satES=null; term.textContent += (e.data ? '\nERROR: '+e.data : '\nCompleted.')+'\n'; });
}
enqueueNext(idx + 1);
}); });
}; };
enqueueNext(0); return runNext(0);
} }
function runAllSAT() { function runAllSAT() {
const cycles = Math.max(1, parseInt(document.getElementById('sat-cycles').value)||1); const cycles = Math.max(1, parseInt(document.getElementById('sat-cycles').value)||1);
@@ -1271,17 +1360,17 @@ function runAllSAT() {
status.textContent = 'No tasks selected.'; status.textContent = 'No tasks selected.';
return; return;
} }
const enqueueNext = (idx) => { const runNext = (idx) => {
if (idx >= expanded.length) { status.textContent = 'Enqueued ' + total + ' tasks.'; return; } if (idx >= expanded.length) { status.textContent = 'Completed ' + total + ' task(s).'; return Promise.resolve(); }
const item = expanded[idx]; const item = expanded[idx];
enqueueSATTarget(item.target, item.overrides) status.textContent = 'Running ' + (idx + 1) + '/' + total + '...';
return enqueueSATTarget(item.target, item.overrides)
.then(() => { .then(() => {
enqueued++; enqueued++;
status.textContent = 'Enqueued ' + enqueued + '/' + total + '...'; return runNext(idx + 1);
enqueueNext(idx + 1);
}); });
}; };
enqueueNext(0); return runNext(0);
}).catch(err => { }).catch(err => {
status.textContent = 'Error: ' + err.message; status.textContent = 'Error: ' + err.message;
}); });
@@ -1294,6 +1383,7 @@ fetch('/api/gpu/presence').then(r=>r.json()).then(gp => {
if (!gp.amd) disableSATCard('amd', 'No AMD GPU detected'); if (!gp.amd) disableSATCard('amd', 'No AMD GPU detected');
if (!gp.amd) disableSATAMDOptions('No AMD GPU detected'); if (!gp.amd) disableSATAMDOptions('No AMD GPU detected');
}); });
satLoadGPUs();
function disableSATAMDOptions(reason) { function disableSATAMDOptions(reason) {
['sat-amd-target','sat-amd-mem-target','sat-amd-bandwidth-target'].forEach(function(id) { ['sat-amd-target','sat-amd-mem-target','sat-amd-bandwidth-target'].forEach(function(id) {
const cb = document.getElementById(id); const cb = document.getElementById(id);
@@ -1482,7 +1572,25 @@ func renderSATCard(id, label, runAction, headerActions, body string) string {
// ── Benchmark ───────────────────────────────────────────────────────────────── // ── Benchmark ─────────────────────────────────────────────────────────────────
func renderBenchmark() string { type benchmarkHistoryColumn struct {
key string
label string
name string
index int
}
type benchmarkHistoryCell struct {
score float64
present bool
}
type benchmarkHistoryRun struct {
generatedAt time.Time
displayTime string
cells map[string]benchmarkHistoryCell
}
func renderBenchmark(opts HandlerOptions) string {
return `<p style="color:var(--muted);font-size:13px;margin-bottom:16px">Benchmark runs generate a human-readable TXT report and machine-readable result bundle. Tasks continue in the background — view progress in <a href="/tasks">Tasks</a>.</p> return `<p style="color:var(--muted);font-size:13px;margin-bottom:16px">Benchmark runs generate a human-readable TXT report and machine-readable result bundle. Tasks continue in the background — view progress in <a href="/tasks">Tasks</a>.</p>
<div class="grid2"> <div class="grid2">
@@ -1531,6 +1639,8 @@ func renderBenchmark() string {
</div> </div>
</div> </div>
` + renderBenchmarkResultsCard(opts.ExportDir) + `
<div id="benchmark-output" style="display:none;margin-top:16px" class="card"> <div id="benchmark-output" style="display:none;margin-top:16px" class="card">
<div class="card-head">Benchmark Output <span id="benchmark-title"></span></div> <div class="card-head">Benchmark Output <span id="benchmark-title"></span></div>
<div class="card-body"><div id="benchmark-terminal" class="terminal"></div></div> <div class="card-body"><div id="benchmark-terminal" class="terminal"></div></div>
@@ -1546,6 +1656,12 @@ func renderBenchmark() string {
<script> <script>
let benchmarkES = null; let benchmarkES = null;
function benchmarkTaskIDs(payload) {
if (payload && Array.isArray(payload.task_ids) && payload.task_ids.length) return payload.task_ids;
if (payload && payload.task_id) return [payload.task_id];
return [];
}
function benchmarkSelectedGPUIndices() { function benchmarkSelectedGPUIndices() {
return Array.from(document.querySelectorAll('.benchmark-gpu-checkbox')) return Array.from(document.querySelectorAll('.benchmark-gpu-checkbox'))
.filter(function(el) { return el.checked && !el.disabled; }) .filter(function(el) { return el.checked && !el.disabled; })
@@ -1645,17 +1761,37 @@ function runNvidiaBenchmark() {
return payload; return payload;
}); });
}).then(function(d) { }).then(function(d) {
status.textContent = 'Task ' + d.task_id + ' queued.'; const taskIds = benchmarkTaskIDs(d);
term.textContent += 'Task ' + d.task_id + ' queued. Streaming log...\n'; if (!taskIds.length) throw new Error('No benchmark task was queued.');
benchmarkES = new EventSource('/api/tasks/' + d.task_id + '/stream'); status.textContent = taskIds.length === 1 ? ('Task ' + taskIds[0] + ' queued.') : ('Queued ' + taskIds.length + ' tasks.');
benchmarkES.onmessage = function(e) { term.textContent += e.data + '\n'; term.scrollTop = term.scrollHeight; }; const streamNext = function(idx, failures) {
benchmarkES.addEventListener('done', function(e) { if (idx >= taskIds.length) {
benchmarkES.close(); status.textContent = failures ? 'Completed with failures.' : 'Completed.';
benchmarkES = null; return;
term.textContent += (e.data ? '\nERROR: ' + e.data : '\nCompleted.') + '\n'; }
term.scrollTop = term.scrollHeight; const taskId = taskIds[idx];
status.textContent = e.data ? 'Failed.' : 'Completed.'; term.textContent += '\n[' + (idx + 1) + '/' + taskIds.length + '] Task ' + taskId + ' queued. Streaming log...\n';
}); benchmarkES = new EventSource('/api/tasks/' + taskId + '/stream');
benchmarkES.onmessage = function(e) { term.textContent += e.data + '\n'; term.scrollTop = term.scrollHeight; };
benchmarkES.addEventListener('done', function(e) {
benchmarkES.close();
benchmarkES = null;
if (e.data) failures += 1;
term.textContent += (e.data ? '\nERROR: ' + e.data : '\nCompleted.') + '\n';
term.scrollTop = term.scrollHeight;
streamNext(idx + 1, failures);
});
benchmarkES.onerror = function() {
if (benchmarkES) {
benchmarkES.close();
benchmarkES = null;
}
term.textContent += '\nERROR: stream disconnected.\n';
term.scrollTop = term.scrollHeight;
streamNext(idx + 1, failures + 1);
};
};
streamNext(0, 0);
}).catch(function(err) { }).catch(function(err) {
status.textContent = 'Error.'; status.textContent = 'Error.';
term.textContent += 'ERROR: ' + err.message + '\n'; term.textContent += 'ERROR: ' + err.message + '\n';
@@ -1667,6 +1803,129 @@ benchmarkLoadGPUs();
</script>` </script>`
} }
func renderBenchmarkResultsCard(exportDir string) string {
columns, runs := loadBenchmarkHistory(exportDir)
return renderBenchmarkResultsCardFromRuns(
"Benchmark Results",
"Composite score by saved benchmark run and GPU.",
"No saved benchmark runs yet.",
columns,
runs,
)
}
func renderBenchmarkResultsCardFromRuns(title, description, emptyMessage string, columns []benchmarkHistoryColumn, runs []benchmarkHistoryRun) string {
if len(runs) == 0 {
return `<div class="card"><div class="card-head">` + html.EscapeString(title) + `</div><div class="card-body"><p style="color:var(--muted);font-size:13px">` + html.EscapeString(emptyMessage) + `</p></div></div>`
}
var b strings.Builder
b.WriteString(`<div class="card"><div class="card-head">` + html.EscapeString(title) + `</div><div class="card-body">`)
if strings.TrimSpace(description) != "" {
b.WriteString(`<p style="color:var(--muted);font-size:13px;margin-bottom:12px">` + html.EscapeString(description) + `</p>`)
}
b.WriteString(`<div style="overflow-x:auto">`)
b.WriteString(`<table><thead><tr><th>Test</th><th>Time</th>`)
for _, col := range columns {
b.WriteString(`<th>` + html.EscapeString(col.label) + `</th>`)
}
b.WriteString(`</tr></thead><tbody>`)
for i, run := range runs {
b.WriteString(`<tr>`)
b.WriteString(`<td>#` + strconv.Itoa(i+1) + `</td>`)
b.WriteString(`<td>` + html.EscapeString(run.displayTime) + `</td>`)
for _, col := range columns {
cell, ok := run.cells[col.key]
if !ok || !cell.present {
b.WriteString(`<td style="color:var(--muted)">-</td>`)
continue
}
b.WriteString(`<td>` + fmt.Sprintf("%.2f", cell.score) + `</td>`)
}
b.WriteString(`</tr>`)
}
b.WriteString(`</tbody></table></div></div></div>`)
return b.String()
}
func loadBenchmarkHistory(exportDir string) ([]benchmarkHistoryColumn, []benchmarkHistoryRun) {
baseDir := app.DefaultBenchmarkBaseDir
if strings.TrimSpace(exportDir) != "" {
baseDir = filepath.Join(exportDir, "bee-benchmark")
}
paths, err := filepath.Glob(filepath.Join(baseDir, "gpu-benchmark-*", "result.json"))
if err != nil || len(paths) == 0 {
return nil, nil
}
sort.Strings(paths)
return loadBenchmarkHistoryFromPaths(paths)
}
func loadBenchmarkHistoryFromPaths(paths []string) ([]benchmarkHistoryColumn, []benchmarkHistoryRun) {
columnByKey := make(map[string]benchmarkHistoryColumn)
runs := make([]benchmarkHistoryRun, 0, len(paths))
for _, path := range paths {
raw, err := os.ReadFile(path)
if err != nil {
continue
}
var result platform.NvidiaBenchmarkResult
if err := json.Unmarshal(raw, &result); err != nil {
continue
}
run := benchmarkHistoryRun{
generatedAt: result.GeneratedAt,
displayTime: result.GeneratedAt.Local().Format("2006-01-02 15:04:05"),
cells: make(map[string]benchmarkHistoryCell),
}
for _, gpu := range result.GPUs {
key := benchmarkHistoryColumnKey(gpu.Name, gpu.Index)
columnByKey[key] = benchmarkHistoryColumn{
key: key,
label: benchmarkHistoryColumnLabel(gpu.Name, gpu.Index),
name: strings.TrimSpace(gpu.Name),
index: gpu.Index,
}
run.cells[key] = benchmarkHistoryCell{
score: gpu.Scores.CompositeScore,
present: true,
}
}
runs = append(runs, run)
}
columns := make([]benchmarkHistoryColumn, 0, len(columnByKey))
for _, col := range columnByKey {
columns = append(columns, col)
}
sort.Slice(columns, func(i, j int) bool {
leftName := strings.ToLower(strings.TrimSpace(columns[i].name))
rightName := strings.ToLower(strings.TrimSpace(columns[j].name))
if leftName != rightName {
return leftName < rightName
}
if columns[i].index != columns[j].index {
return columns[i].index < columns[j].index
}
return columns[i].key < columns[j].key
})
sort.Slice(runs, func(i, j int) bool {
return runs[i].generatedAt.After(runs[j].generatedAt)
})
return columns, runs
}
func benchmarkHistoryColumnKey(name string, index int) string {
return strings.TrimSpace(name) + "|" + strconv.Itoa(index)
}
func benchmarkHistoryColumnLabel(name string, index int) string {
name = strings.TrimSpace(name)
if name == "" {
return fmt.Sprintf("GPU %d", index)
}
return fmt.Sprintf("%s / GPU %d", name, index)
}
// ── Burn ────────────────────────────────────────────────────────────────────── // ── Burn ──────────────────────────────────────────────────────────────────────
func renderBurn() string { func renderBurn() string {
@@ -1786,6 +2045,12 @@ func renderBurn() string {
<script> <script>
let biES = null; let biES = null;
function burnTaskIDs(payload) {
if (payload && Array.isArray(payload.task_ids) && payload.task_ids.length) return payload.task_ids;
if (payload && payload.task_id) return [payload.task_id];
return [];
}
function burnProfile() { function burnProfile() {
const selected = document.querySelector('input[name="burn-profile"]:checked'); const selected = document.querySelector('input[name="burn-profile"]:checked');
return selected ? selected.value : 'smoke'; return selected ? selected.value : 'smoke';
@@ -1886,6 +2151,52 @@ function streamTask(taskId, label) {
term.scrollTop = term.scrollHeight; term.scrollTop = term.scrollHeight;
}); });
} }
function streamBurnTask(taskId, label, resetTerminal) {
return streamBurnTaskSet([taskId], label, resetTerminal);
}
function streamBurnTaskSet(taskIds, label, resetTerminal) {
if (biES) { biES.close(); biES = null; }
document.getElementById('bi-output').style.display = 'block';
document.getElementById('bi-title').textContent = '— ' + label + ' [' + burnProfile() + ']';
const term = document.getElementById('bi-terminal');
if (resetTerminal) {
term.textContent = '';
}
if (!Array.isArray(taskIds) || !taskIds.length) {
term.textContent += 'ERROR: no tasks queued.\n';
return Promise.resolve({ok:false, error:'no tasks queued'});
}
const streamNext = function(idx, failures) {
if (idx >= taskIds.length) {
return Promise.resolve({ok: failures === 0, error: failures ? (failures + ' task(s) failed') : ''});
}
const taskId = taskIds[idx];
term.textContent += '[' + (idx + 1) + '/' + taskIds.length + '] Task ' + taskId + ' queued. Streaming...\n';
return new Promise(function(resolve) {
biES = new EventSource('/api/tasks/' + taskId + '/stream');
biES.onmessage = function(e) { term.textContent += e.data + '\n'; term.scrollTop = term.scrollHeight; };
biES.addEventListener('done', function(e) {
biES.close();
biES = null;
term.textContent += (e.data ? '\nERROR: ' + e.data : '\nCompleted.') + '\n';
term.scrollTop = term.scrollHeight;
resolve(failures + (e.data ? 1 : 0));
});
biES.onerror = function() {
if (biES) {
biES.close();
biES = null;
}
term.textContent += '\nERROR: stream disconnected.\n';
term.scrollTop = term.scrollHeight;
resolve(failures + 1);
};
}).then(function(nextFailures) {
return streamNext(idx + 1, nextFailures);
});
};
return streamNext(0, 0);
}
function runBurnTaskSet(tasks, statusElId) { function runBurnTaskSet(tasks, statusElId) {
const enabled = tasks.filter(function(t) { const enabled = tasks.filter(function(t) {
@@ -1898,19 +2209,33 @@ function runBurnTaskSet(tasks, statusElId) {
if (status) status.textContent = 'No tasks selected.'; if (status) status.textContent = 'No tasks selected.';
return; return;
} }
enabled.forEach(function(t) { const term = document.getElementById('bi-terminal');
enqueueBurnTask(t.target, t.label, t.extra, !!t.nvidia) document.getElementById('bi-output').style.display = 'block';
document.getElementById('bi-title').textContent = '— Burn one by one [' + burnProfile() + ']';
term.textContent = '';
const runNext = function(idx) {
if (idx >= enabled.length) {
if (status) status.textContent = 'Completed ' + enabled.length + ' task(s).';
return Promise.resolve();
}
const t = enabled[idx];
term.textContent += '\n[' + (idx + 1) + '/' + enabled.length + '] ' + t.label + '\n';
if (status) status.textContent = 'Running ' + (idx + 1) + '/' + enabled.length + '...';
return enqueueBurnTask(t.target, t.label, t.extra, !!t.nvidia)
.then(function(d) { .then(function(d) {
if (status) status.textContent = enabled.length + ' task(s) queued.'; return streamBurnTaskSet(burnTaskIDs(d), t.label, false);
streamTask(d.task_id, t.label); })
.then(function() {
return runNext(idx + 1);
}) })
.catch(function(err) { .catch(function(err) {
if (status) status.textContent = 'Error: ' + err.message; if (status) status.textContent = 'Error: ' + err.message;
const term = document.getElementById('bi-terminal');
document.getElementById('bi-output').style.display = 'block'; document.getElementById('bi-output').style.display = 'block';
term.textContent += 'ERROR: ' + err.message + '\n'; term.textContent += 'ERROR: ' + err.message + '\n';
return Promise.reject(err);
}); });
}); };
return runNext(0);
} }
function runPlatformStress() { function runPlatformStress() {

View File

@@ -1,6 +1,7 @@
package webui package webui
import ( import (
"encoding/json"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os" "os"
@@ -601,8 +602,8 @@ func TestToolsPageRendersRestartGPUDriversButton(t *testing.T) {
if !strings.Contains(body, `Restart GPU Drivers`) { if !strings.Contains(body, `Restart GPU Drivers`) {
t.Fatalf("tools page missing restart gpu drivers button: %s", body) t.Fatalf("tools page missing restart gpu drivers button: %s", body)
} }
if !strings.Contains(body, `svcAction('bee-nvidia', 'restart')`) { if !strings.Contains(body, `restartGPUDrivers()`) {
t.Fatalf("tools page missing bee-nvidia restart action: %s", body) t.Fatalf("tools page missing restartGPUDrivers action: %s", body)
} }
if !strings.Contains(body, `id="boot-source-text"`) { if !strings.Contains(body, `id="boot-source-text"`) {
t.Fatalf("tools page missing boot source field: %s", body) t.Fatalf("tools page missing boot source field: %s", body)
@@ -636,6 +637,66 @@ func TestBenchmarkPageRendersGPUSelectionControls(t *testing.T) {
} }
} }
func TestBenchmarkPageRendersSavedResultsTable(t *testing.T) {
dir := t.TempDir()
exportDir := filepath.Join(dir, "export")
runDir := filepath.Join(exportDir, "bee-benchmark", "gpu-benchmark-20260406-120000")
if err := os.MkdirAll(runDir, 0755); err != nil {
t.Fatal(err)
}
result := platform.NvidiaBenchmarkResult{
GeneratedAt: time.Date(2026, time.April, 6, 12, 0, 0, 0, time.UTC),
BenchmarkProfile: "standard",
OverallStatus: "OK",
GPUs: []platform.BenchmarkGPUResult{
{
Index: 0,
Name: "NVIDIA H100 PCIe",
Scores: platform.BenchmarkScorecard{
CompositeScore: 1176.25,
},
},
{
Index: 1,
Name: "NVIDIA H100 PCIe",
Scores: platform.BenchmarkScorecard{
CompositeScore: 1168.50,
},
},
},
}
raw, err := json.Marshal(result)
if err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(runDir, "result.json"), raw, 0644); err != nil {
t.Fatal(err)
}
handler := NewHandler(HandlerOptions{ExportDir: exportDir})
rec := httptest.NewRecorder()
handler.ServeHTTP(rec, httptest.NewRequest(http.MethodGet, "/benchmark", nil))
if rec.Code != http.StatusOK {
t.Fatalf("status=%d", rec.Code)
}
body := rec.Body.String()
wantTime := result.GeneratedAt.Local().Format("2006-01-02 15:04:05")
for _, needle := range []string{
`Benchmark Results`,
`Composite score by saved benchmark run and GPU.`,
`NVIDIA H100 PCIe / GPU 0`,
`NVIDIA H100 PCIe / GPU 1`,
`#1`,
wantTime,
`1176.25`,
`1168.50`,
} {
if !strings.Contains(body, needle) {
t.Fatalf("benchmark page missing %q: %s", needle, body)
}
}
}
func TestValidatePageRendersNvidiaTargetedStressCard(t *testing.T) { func TestValidatePageRendersNvidiaTargetedStressCard(t *testing.T) {
handler := NewHandler(HandlerOptions{}) handler := NewHandler(HandlerOptions{})
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
@@ -649,6 +710,8 @@ func TestValidatePageRendersNvidiaTargetedStressCard(t *testing.T) {
`nvidia-targeted-stress`, `nvidia-targeted-stress`,
`controlled NVIDIA DCGM load`, `controlled NVIDIA DCGM load`,
`<code>dcgmi diag targeted_stress</code>`, `<code>dcgmi diag targeted_stress</code>`,
`NVIDIA GPU Selection`,
`id="sat-gpu-list"`,
} { } {
if !strings.Contains(body, needle) { if !strings.Contains(body, needle) {
t.Fatalf("validate page missing %q: %s", needle, body) t.Fatalf("validate page missing %q: %s", needle, body)

View File

@@ -97,32 +97,73 @@ func renderTaskDetailPage(opts HandlerOptions, task Task) string {
body.WriteString(`</div></div>`) body.WriteString(`</div></div>`)
} }
if task.Status == TaskRunning || task.Status == TaskPending { if task.Status == TaskRunning {
body.WriteString(`<div class="card"><div class="card-head">Live Charts</div><div class="card-body">`) body.WriteString(`<div class="card"><div class="card-head">Live Charts</div><div class="card-body">`)
body.WriteString(`<div id="task-live-charts" style="display:flex;flex-direction:column;gap:16px;color:var(--muted);font-size:13px">Loading charts...</div>`) body.WriteString(`<div id="task-live-charts" style="display:flex;flex-direction:column;gap:16px;color:var(--muted);font-size:13px">Loading charts...</div>`)
body.WriteString(`</div></div>`) body.WriteString(`</div></div>`)
}
if task.Status == TaskRunning || task.Status == TaskPending {
body.WriteString(`<div class="card"><div class="card-head">Live Logs</div><div class="card-body">`) body.WriteString(`<div class="card"><div class="card-head">Live Logs</div><div class="card-body">`)
body.WriteString(`<div id="task-live-log" class="terminal" style="max-height:none;white-space:pre-wrap">Connecting...</div>`) body.WriteString(`<div id="task-live-log" class="terminal" style="max-height:none;white-space:pre-wrap">Connecting...</div>`)
body.WriteString(`</div></div>`) body.WriteString(`</div></div>`)
body.WriteString(`<script> body.WriteString(`<script>
function cancelTaskDetail(id) { function cancelTaskDetail(id) {
fetch('/api/tasks/' + id + '/cancel', {method:'POST'}).then(function(){ window.location.reload(); }); fetch('/api/tasks/' + id + '/cancel', {method:'POST'}).then(function(){
var term = document.getElementById('task-live-log');
if (term) {
term.textContent += '\nCancel requested.\n';
term.scrollTop = term.scrollHeight;
}
});
}
function renderTaskLiveCharts(taskId, charts) {
const host = document.getElementById('task-live-charts');
if (!host) return;
if (!Array.isArray(charts) || charts.length === 0) {
host.innerHTML = 'Waiting for metric samples...';
return;
}
const seen = {};
charts.forEach(function(chart) {
seen[chart.file] = true;
let img = host.querySelector('img[data-chart-file="' + chart.file + '"]');
if (img) {
const card = img.closest('.card');
if (card) {
const title = card.querySelector('.card-head');
if (title) title.textContent = chart.title;
}
return;
}
const card = document.createElement('div');
card.className = 'card';
card.style.margin = '0';
card.innerHTML = '<div class="card-head"></div><div class="card-body" style="padding:12px"></div>';
card.querySelector('.card-head').textContent = chart.title;
const body = card.querySelector('.card-body');
img = document.createElement('img');
img.setAttribute('data-task-chart', '1');
img.setAttribute('data-chart-file', chart.file);
img.setAttribute('data-base-src', '/api/tasks/' + taskId + '/chart/' + chart.file);
img.src = '/api/tasks/' + taskId + '/chart/' + chart.file + '?t=' + Date.now();
img.style.width = '100%';
img.style.display = 'block';
img.style.borderRadius = '6px';
img.alt = chart.title;
body.appendChild(img);
host.appendChild(card);
});
Array.from(host.querySelectorAll('img[data-task-chart="1"]')).forEach(function(img) {
const file = img.getAttribute('data-chart-file') || '';
if (seen[file]) return;
const card = img.closest('.card');
if (card) card.remove();
});
} }
function loadTaskLiveCharts(taskId) { function loadTaskLiveCharts(taskId) {
fetch('/api/tasks/' + taskId + '/charts').then(function(r){ return r.json(); }).then(function(charts){ fetch('/api/tasks/' + taskId + '/charts').then(function(r){ return r.json(); }).then(function(charts){
const host = document.getElementById('task-live-charts'); renderTaskLiveCharts(taskId, charts);
if (!host) return;
if (!Array.isArray(charts) || charts.length === 0) {
host.innerHTML = 'Waiting for metric samples...';
return;
}
host.innerHTML = charts.map(function(chart) {
return '<div class="card" style="margin:0">' +
'<div class="card-head">' + chart.title + '</div>' +
'<div class="card-body" style="padding:12px">' +
'<img data-task-chart="1" data-base-src="/api/tasks/' + taskId + '/chart/' + chart.file + '" src="/api/tasks/' + taskId + '/chart/' + chart.file + '?t=' + Date.now() + '" style="width:100%;display:block;border-radius:6px" alt="' + chart.title + '">' +
'</div></div>';
}).join('');
}).catch(function(){ }).catch(function(){
const host = document.getElementById('task-live-charts'); const host = document.getElementById('task-live-charts');
if (host) host.innerHTML = 'Task charts are unavailable.'; if (host) host.innerHTML = 'Task charts are unavailable.';
@@ -138,12 +179,31 @@ function refreshTaskLiveCharts() {
var _taskDetailES = new EventSource('/api/tasks/` + html.EscapeString(task.ID) + `/stream'); var _taskDetailES = new EventSource('/api/tasks/` + html.EscapeString(task.ID) + `/stream');
var _taskDetailTerm = document.getElementById('task-live-log'); var _taskDetailTerm = document.getElementById('task-live-log');
var _taskChartTimer = null; var _taskChartTimer = null;
var _taskChartsFrozen = false;
_taskDetailES.onopen = function(){ _taskDetailTerm.textContent = ''; }; _taskDetailES.onopen = function(){ _taskDetailTerm.textContent = ''; };
_taskDetailES.onmessage = function(e){ _taskDetailTerm.textContent += e.data + "\n"; _taskDetailTerm.scrollTop = _taskDetailTerm.scrollHeight; }; _taskDetailES.onmessage = function(e){ _taskDetailTerm.textContent += e.data + "\n"; _taskDetailTerm.scrollTop = _taskDetailTerm.scrollHeight; };
_taskDetailES.addEventListener('done', function(){ if (_taskChartTimer) clearInterval(_taskChartTimer); _taskDetailES.close(); setTimeout(function(){ window.location.reload(); }, 1000); }); _taskDetailES.addEventListener('done', function(e){
_taskDetailES.onerror = function(){ if (_taskChartTimer) clearInterval(_taskChartTimer); _taskDetailES.close(); }; if (_taskChartTimer) clearInterval(_taskChartTimer);
_taskDetailES.close();
_taskDetailES = null;
_taskChartsFrozen = true;
_taskDetailTerm.textContent += (e.data ? '\nTask finished with error.\n' : '\nTask finished.\n');
_taskDetailTerm.scrollTop = _taskDetailTerm.scrollHeight;
refreshTaskLiveCharts();
});
_taskDetailES.onerror = function(){
if (_taskChartTimer) clearInterval(_taskChartTimer);
if (_taskDetailES) {
_taskDetailES.close();
_taskDetailES = null;
}
};
loadTaskLiveCharts('` + html.EscapeString(task.ID) + `'); loadTaskLiveCharts('` + html.EscapeString(task.ID) + `');
_taskChartTimer = setInterval(function(){ refreshTaskLiveCharts(); loadTaskLiveCharts('` + html.EscapeString(task.ID) + `'); }, 2000); _taskChartTimer = setInterval(function(){
if (_taskChartsFrozen) return;
loadTaskLiveCharts('` + html.EscapeString(task.ID) + `');
refreshTaskLiveCharts();
}, 2000);
</script>`) </script>`)
} }

View File

@@ -230,6 +230,9 @@ func renderTaskReportFragment(report taskReport, charts map[string]string, logTe
b.WriteString(`<div style="margin-top:14px;font-size:13px;color:var(--muted)">`) b.WriteString(`<div style="margin-top:14px;font-size:13px;color:var(--muted)">`)
b.WriteString(`Started: ` + formatTaskTime(report.StartedAt, report.CreatedAt) + ` | Finished: ` + formatTaskTime(report.DoneAt, time.Time{}) + ` | Duration: ` + formatTaskDuration(report.DurationSec)) b.WriteString(`Started: ` + formatTaskTime(report.StartedAt, report.CreatedAt) + ` | Finished: ` + formatTaskTime(report.DoneAt, time.Time{}) + ` | Duration: ` + formatTaskDuration(report.DurationSec))
b.WriteString(`</div></div></div>`) b.WriteString(`</div></div></div>`)
if benchmarkCard := renderTaskBenchmarkResultsCard(report.Target, logText); benchmarkCard != "" {
b.WriteString(benchmarkCard)
}
if len(report.Charts) > 0 { if len(report.Charts) > 0 {
for _, chart := range report.Charts { for _, chart := range report.Charts {
@@ -247,6 +250,57 @@ func renderTaskReportFragment(report taskReport, charts map[string]string, logTe
return b.String() return b.String()
} }
func renderTaskBenchmarkResultsCard(target, logText string) string {
if strings.TrimSpace(target) != "nvidia-benchmark" {
return ""
}
resultPath := taskBenchmarkResultPath(logText)
if strings.TrimSpace(resultPath) == "" {
return ""
}
columns, runs := loadBenchmarkHistoryFromPaths([]string{resultPath})
if len(runs) == 0 {
return ""
}
return renderBenchmarkResultsCardFromRuns(
"Benchmark Results",
"Composite score for this benchmark task.",
"No benchmark results were saved for this task.",
columns,
runs,
)
}
func taskBenchmarkResultPath(logText string) string {
archivePath := taskArchivePathFromLog(logText)
if archivePath == "" {
return ""
}
runDir := strings.TrimSuffix(archivePath, ".tar.gz")
if runDir == archivePath {
return ""
}
return filepath.Join(runDir, "result.json")
}
func taskArchivePathFromLog(logText string) string {
lines := strings.Split(logText, "\n")
for i := len(lines) - 1; i >= 0; i-- {
line := strings.TrimSpace(lines[i])
if line == "" || !strings.HasPrefix(line, "Archive:") {
continue
}
path := strings.TrimSpace(strings.TrimPrefix(line, "Archive:"))
if strings.HasPrefix(path, "Archive written to ") {
path = strings.TrimSpace(strings.TrimPrefix(path, "Archive written to "))
}
if strings.HasSuffix(path, ".tar.gz") {
return path
}
}
return ""
}
func renderTaskStatusBadge(status string) string { func renderTaskStatusBadge(status string) string {
className := map[string]string{ className := map[string]string{
TaskRunning: "badge-ok", TaskRunning: "badge-ok",

View File

@@ -423,13 +423,14 @@ func (q *taskQueue) worker() {
setCPUGovernor("performance") setCPUGovernor("performance")
defer setCPUGovernor("powersave") defer setCPUGovernor("powersave")
// Drain all pending tasks and start them in parallel.
q.mu.Lock()
var batch []*Task
for { for {
q.mu.Lock()
t := q.nextPending() t := q.nextPending()
if t == nil { if t == nil {
break q.prune()
q.persistLocked()
q.mu.Unlock()
return
} }
now := time.Now() now := time.Now()
t.Status = TaskRunning t.Status = TaskRunning
@@ -438,29 +439,14 @@ func (q *taskQueue) worker() {
t.ErrMsg = "" t.ErrMsg = ""
j := newTaskJobState(t.LogPath, taskSerialPrefix(t)) j := newTaskJobState(t.LogPath, taskSerialPrefix(t))
t.job = j t.job = j
batch = append(batch, t)
}
if len(batch) > 0 {
q.persistLocked() q.persistLocked()
} q.mu.Unlock()
q.mu.Unlock()
var wg sync.WaitGroup
for _, t := range batch {
t := t
j := t.job
taskCtx, taskCancel := context.WithCancel(context.Background()) taskCtx, taskCancel := context.WithCancel(context.Background())
j.cancel = taskCancel j.cancel = taskCancel
wg.Add(1) q.executeTask(t, j, taskCtx)
goRecoverOnce("task "+t.Target, func() { taskCancel()
defer wg.Done()
defer taskCancel()
q.executeTask(t, j, taskCtx)
})
}
wg.Wait()
if len(batch) > 0 {
q.mu.Lock() q.mu.Lock()
q.prune() q.prune()
q.persistLocked() q.persistLocked()
@@ -1163,7 +1149,32 @@ func taskArtifactsDir(root string, t *Task, status string) string {
if strings.TrimSpace(root) == "" || t == nil { if strings.TrimSpace(root) == "" || t == nil {
return "" return ""
} }
return filepath.Join(root, fmt.Sprintf("%s_%s_%s", t.ID, sanitizeTaskFolderPart(t.Name), taskFolderStatus(status))) prefix := taskFolderNumberPrefix(t.ID)
return filepath.Join(root, fmt.Sprintf("%s_%s_%s", prefix, sanitizeTaskFolderPart(t.Name), taskFolderStatus(status)))
}
func taskFolderNumberPrefix(taskID string) string {
taskID = strings.TrimSpace(taskID)
if strings.HasPrefix(taskID, "TASK-") && len(taskID) >= len("TASK-000") {
num := strings.TrimSpace(strings.TrimPrefix(taskID, "TASK-"))
if len(num) == 3 {
allDigits := true
for _, r := range num {
if r < '0' || r > '9' {
allDigits = false
break
}
}
if allDigits {
return num
}
}
}
fallback := sanitizeTaskFolderPart(taskID)
if fallback == "" {
return "000"
}
return fallback
} }
func ensureTaskReportPaths(t *Task) { func ensureTaskReportPaths(t *Task) {

View File

@@ -163,6 +163,40 @@ func TestTaskQueueSnapshotSortsNewestFirst(t *testing.T) {
} }
} }
func TestNewJobIDUsesTASKPrefixAndZeroPadding(t *testing.T) {
globalQueue.mu.Lock()
origTasks := globalQueue.tasks
globalQueue.tasks = nil
globalQueue.mu.Unlock()
origCounter := jobCounter.Load()
jobCounter.Store(0)
t.Cleanup(func() {
globalQueue.mu.Lock()
globalQueue.tasks = origTasks
globalQueue.mu.Unlock()
jobCounter.Store(origCounter)
})
if got := newJobID("ignored"); got != "TASK-000" {
t.Fatalf("id=%q want TASK-000", got)
}
if got := newJobID("ignored"); got != "TASK-001" {
t.Fatalf("id=%q want TASK-001", got)
}
}
func TestTaskArtifactsDirStartsWithTaskNumber(t *testing.T) {
root := t.TempDir()
task := &Task{
ID: "TASK-007",
Name: "NVIDIA Benchmark",
}
got := filepath.Base(taskArtifactsDir(root, task, TaskDone))
if !strings.HasPrefix(got, "007_") {
t.Fatalf("artifacts dir=%q want prefix 007_", got)
}
}
func TestHandleAPITasksStreamReplaysPersistedLogWithoutLiveJob(t *testing.T) { func TestHandleAPITasksStreamReplaysPersistedLogWithoutLiveJob(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
logPath := filepath.Join(dir, "task.log") logPath := filepath.Join(dir, "task.log")
@@ -325,6 +359,78 @@ func TestFinalizeTaskRunCreatesReportFolderAndArtifacts(t *testing.T) {
} }
} }
func TestWriteTaskReportArtifactsIncludesBenchmarkResultsForTask(t *testing.T) {
dir := t.TempDir()
metricsPath := filepath.Join(dir, "metrics.db")
prevMetricsPath := taskReportMetricsDBPath
taskReportMetricsDBPath = metricsPath
t.Cleanup(func() { taskReportMetricsDBPath = prevMetricsPath })
benchmarkDir := filepath.Join(dir, "bee-benchmark", "gpu-benchmark-20260406-120000")
if err := os.MkdirAll(benchmarkDir, 0755); err != nil {
t.Fatal(err)
}
result := platform.NvidiaBenchmarkResult{
GeneratedAt: time.Date(2026, time.April, 6, 12, 0, 0, 0, time.UTC),
BenchmarkProfile: "standard",
OverallStatus: "OK",
GPUs: []platform.BenchmarkGPUResult{
{
Index: 0,
Name: "NVIDIA H100 PCIe",
Scores: platform.BenchmarkScorecard{
CompositeScore: 1176.25,
},
},
},
}
raw, err := json.Marshal(result)
if err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(benchmarkDir, "result.json"), raw, 0644); err != nil {
t.Fatal(err)
}
artifactsDir := filepath.Join(dir, "tasks", "task-bench_done")
if err := os.MkdirAll(artifactsDir, 0755); err != nil {
t.Fatal(err)
}
task := &Task{
ID: "task-bench",
Name: "NVIDIA Benchmark",
Target: "nvidia-benchmark",
Status: TaskDone,
CreatedAt: time.Now().UTC().Add(-time.Minute),
ArtifactsDir: artifactsDir,
}
ensureTaskReportPaths(task)
logText := "line-1\nArchive: " + filepath.Join(dir, "bee-benchmark", "gpu-benchmark-20260406-120000.tar.gz") + "\n"
if err := os.WriteFile(task.LogPath, []byte(logText), 0644); err != nil {
t.Fatal(err)
}
if err := writeTaskReportArtifacts(task); err != nil {
t.Fatalf("writeTaskReportArtifacts: %v", err)
}
body, err := os.ReadFile(task.ReportHTMLPath)
if err != nil {
t.Fatalf("ReadFile(report.html): %v", err)
}
html := string(body)
for _, needle := range []string{
`Benchmark Results`,
`Composite score for this benchmark task.`,
`NVIDIA H100 PCIe / GPU 0`,
`1176.25`,
} {
if !strings.Contains(html, needle) {
t.Fatalf("report missing %q: %s", needle, html)
}
}
}
func TestTaskLifecycleMirrorsToSerialConsole(t *testing.T) { func TestTaskLifecycleMirrorsToSerialConsole(t *testing.T) {
var lines []string var lines []string
prev := taskSerialWriteLine prev := taskSerialWriteLine

View File

@@ -41,15 +41,15 @@ while [ $# -gt 0 ]; do
;; ;;
*) *)
echo "unknown arg: $1" >&2 echo "unknown arg: $1" >&2
echo "usage: $0 [--cache-dir /path] [--rebuild-image] [--clean-build] [--authorized-keys /path/to/authorized_keys] [--variant nvidia|amd|all]" >&2 echo "usage: $0 [--cache-dir /path] [--rebuild-image] [--clean-build] [--authorized-keys /path/to/authorized_keys] [--variant nvidia|nvidia-legacy|amd|nogpu|all]" >&2
exit 1 exit 1
;; ;;
esac esac
done done
case "$VARIANT" in case "$VARIANT" in
nvidia|amd|nogpu|all) ;; nvidia|nvidia-legacy|amd|nogpu|all) ;;
*) echo "unknown variant: $VARIANT (expected nvidia, amd, nogpu, or all)" >&2; exit 1 ;; *) echo "unknown variant: $VARIANT (expected nvidia, nvidia-legacy, amd, nogpu, or all)" >&2; exit 1 ;;
esac esac
if [ "$CLEAN_CACHE" = "1" ]; then if [ "$CLEAN_CACHE" = "1" ]; then
@@ -61,8 +61,13 @@ if [ "$CLEAN_CACHE" = "1" ]; then
"${CACHE_DIR:?}/lb-packages" "${CACHE_DIR:?}/lb-packages"
echo "=== cleaning live-build work dirs ===" echo "=== cleaning live-build work dirs ==="
rm -rf "${REPO_ROOT}/dist/live-build-work-nvidia" rm -rf "${REPO_ROOT}/dist/live-build-work-nvidia"
rm -rf "${REPO_ROOT}/dist/live-build-work-nvidia-legacy"
rm -rf "${REPO_ROOT}/dist/live-build-work-amd" rm -rf "${REPO_ROOT}/dist/live-build-work-amd"
rm -rf "${REPO_ROOT}/dist/live-build-work-nogpu" rm -rf "${REPO_ROOT}/dist/live-build-work-nogpu"
rm -rf "${REPO_ROOT}/dist/overlay-stage-nvidia"
rm -rf "${REPO_ROOT}/dist/overlay-stage-nvidia-legacy"
rm -rf "${REPO_ROOT}/dist/overlay-stage-amd"
rm -rf "${REPO_ROOT}/dist/overlay-stage-nogpu"
echo "=== caches cleared, proceeding with build ===" echo "=== caches cleared, proceeding with build ==="
fi fi
@@ -180,6 +185,9 @@ case "$VARIANT" in
nvidia) nvidia)
run_variant nvidia run_variant nvidia
;; ;;
nvidia-legacy)
run_variant nvidia-legacy
;;
amd) amd)
run_variant amd run_variant amd
;; ;;
@@ -188,6 +196,7 @@ case "$VARIANT" in
;; ;;
all) all)
run_variant nvidia run_variant nvidia
run_variant nvidia-legacy
run_variant amd run_variant amd
run_variant nogpu run_variant nogpu
;; ;;

View File

@@ -1,8 +1,10 @@
#!/bin/sh #!/bin/sh
# build-nvidia-module.sh — compile NVIDIA proprietary driver modules for Debian 12 # build-nvidia-module.sh — compile NVIDIA kernel modules for Debian 12
# #
# Downloads the official NVIDIA .run installer, extracts kernel modules and # Downloads the official NVIDIA .run installer, extracts kernel modules and
# userspace tools (nvidia-smi, libnvidia-ml). Everything is proprietary NVIDIA. # userspace tools (nvidia-smi, libnvidia-ml). Supports both:
# - open -> kernel-open/ sources from the .run installer
# - proprietary -> traditional proprietary kernel sources from the .run installer
# #
# Output is cached in DIST_DIR/nvidia-<version>-<kver>/ so subsequent builds # Output is cached in DIST_DIR/nvidia-<version>-<kver>/ so subsequent builds
# are instant unless NVIDIA_DRIVER_VERSION or kernel version changes. # are instant unless NVIDIA_DRIVER_VERSION or kernel version changes.
@@ -17,10 +19,19 @@ set -e
NVIDIA_VERSION="$1" NVIDIA_VERSION="$1"
DIST_DIR="$2" DIST_DIR="$2"
DEBIAN_KERNEL_ABI="$3" DEBIAN_KERNEL_ABI="$3"
NVIDIA_FLAVOR="${4:-open}"
[ -n "$NVIDIA_VERSION" ] || { echo "usage: $0 <nvidia-version> <dist-dir> <debian-kernel-abi>"; exit 1; } [ -n "$NVIDIA_VERSION" ] || { echo "usage: $0 <nvidia-version> <dist-dir> <debian-kernel-abi> [open|proprietary]"; exit 1; }
[ -n "$DIST_DIR" ] || { echo "usage: $0 <nvidia-version> <dist-dir> <debian-kernel-abi>"; exit 1; } [ -n "$DIST_DIR" ] || { echo "usage: $0 <nvidia-version> <dist-dir> <debian-kernel-abi> [open|proprietary]"; exit 1; }
[ -n "$DEBIAN_KERNEL_ABI" ] || { echo "usage: $0 <nvidia-version> <dist-dir> <debian-kernel-abi>"; exit 1; } [ -n "$DEBIAN_KERNEL_ABI" ] || { echo "usage: $0 <nvidia-version> <dist-dir> <debian-kernel-abi> [open|proprietary]"; exit 1; }
case "$NVIDIA_FLAVOR" in
open|proprietary) ;;
*)
echo "unsupported NVIDIA flavor: $NVIDIA_FLAVOR (expected open or proprietary)" >&2
exit 1
;;
esac
KVER="${DEBIAN_KERNEL_ABI}-amd64" KVER="${DEBIAN_KERNEL_ABI}-amd64"
# On Debian, kernel headers are split into two packages: # On Debian, kernel headers are split into two packages:
@@ -31,22 +42,13 @@ KVER="${DEBIAN_KERNEL_ABI}-amd64"
KDIR_ARCH="/usr/src/linux-headers-${KVER}" KDIR_ARCH="/usr/src/linux-headers-${KVER}"
KDIR_COMMON="/usr/src/linux-headers-${DEBIAN_KERNEL_ABI}-common" KDIR_COMMON="/usr/src/linux-headers-${DEBIAN_KERNEL_ABI}-common"
echo "=== NVIDIA ${NVIDIA_VERSION} (proprietary) for kernel ${KVER} ===" echo "=== NVIDIA ${NVIDIA_VERSION} (${NVIDIA_FLAVOR}) for kernel ${KVER} ==="
if [ ! -d "$KDIR_ARCH" ] || [ ! -d "$KDIR_COMMON" ]; then CACHE_DIR="${DIST_DIR}/nvidia-${NVIDIA_FLAVOR}-${NVIDIA_VERSION}-${KVER}"
echo "=== installing linux-headers-${KVER} ==="
DEBIAN_FRONTEND=noninteractive apt-get install -y \
"linux-headers-${KVER}" \
gcc make perl
fi
echo "kernel headers (arch): $KDIR_ARCH"
echo "kernel headers (common): $KDIR_COMMON"
CACHE_DIR="${DIST_DIR}/nvidia-${NVIDIA_VERSION}-${KVER}"
CACHE_ROOT="${BEE_CACHE_DIR:-${DIST_DIR}/cache}" CACHE_ROOT="${BEE_CACHE_DIR:-${DIST_DIR}/cache}"
DOWNLOAD_CACHE_DIR="${CACHE_ROOT}/nvidia-downloads" DOWNLOAD_CACHE_DIR="${CACHE_ROOT}/nvidia-downloads"
EXTRACT_CACHE_DIR="${CACHE_ROOT}/nvidia-extract" EXTRACT_CACHE_DIR="${CACHE_ROOT}/nvidia-extract"
CACHE_LAYOUT_VERSION="2" CACHE_LAYOUT_VERSION="3"
CACHE_LAYOUT_MARKER="${CACHE_DIR}/.cache-layout-v${CACHE_LAYOUT_VERSION}" CACHE_LAYOUT_MARKER="${CACHE_DIR}/.cache-layout-v${CACHE_LAYOUT_VERSION}"
if [ -d "$CACHE_DIR/modules" ] && [ -f "$CACHE_DIR/bin/nvidia-smi" ] \ if [ -d "$CACHE_DIR/modules" ] && [ -f "$CACHE_DIR/bin/nvidia-smi" ] \
&& [ -f "$CACHE_LAYOUT_MARKER" ] \ && [ -f "$CACHE_LAYOUT_MARKER" ] \
@@ -57,6 +59,15 @@ if [ -d "$CACHE_DIR/modules" ] && [ -f "$CACHE_DIR/bin/nvidia-smi" ] \
exit 0 exit 0
fi fi
if [ ! -d "$KDIR_ARCH" ] || [ ! -d "$KDIR_COMMON" ]; then
echo "=== installing linux-headers-${KVER} ==="
DEBIAN_FRONTEND=noninteractive apt-get install -y \
"linux-headers-${KVER}" \
gcc make perl
fi
echo "kernel headers (arch): $KDIR_ARCH"
echo "kernel headers (common): $KDIR_COMMON"
# Download official NVIDIA .run installer with sha256 verification # Download official NVIDIA .run installer with sha256 verification
BASE_URL="https://download.nvidia.com/XFree86/Linux-x86_64/${NVIDIA_VERSION}" BASE_URL="https://download.nvidia.com/XFree86/Linux-x86_64/${NVIDIA_VERSION}"
mkdir -p "$DOWNLOAD_CACHE_DIR" "$EXTRACT_CACHE_DIR" mkdir -p "$DOWNLOAD_CACHE_DIR" "$EXTRACT_CACHE_DIR"
@@ -90,12 +101,18 @@ EXTRACT_DIR="${EXTRACT_CACHE_DIR}/nvidia-extract-${NVIDIA_VERSION}"
rm -rf "$EXTRACT_DIR" rm -rf "$EXTRACT_DIR"
"$RUN_FILE" --extract-only --target "$EXTRACT_DIR" "$RUN_FILE" --extract-only --target "$EXTRACT_DIR"
# Find kernel source directory (proprietary: kernel/, open: kernel-open/) # Find kernel source directory for the selected flavor.
KERNEL_SRC="" KERNEL_SRC=""
for d in "$EXTRACT_DIR/kernel" "$EXTRACT_DIR/kernel-modules-sources" "$EXTRACT_DIR/kernel-source"; do if [ "$NVIDIA_FLAVOR" = "open" ]; then
[ -f "$d/Makefile" ] && KERNEL_SRC="$d" && break for d in "$EXTRACT_DIR/kernel-open" "$EXTRACT_DIR/kernel-open/"*; do
done [ -f "$d/Makefile" ] && KERNEL_SRC="$d" && break
[ -n "$KERNEL_SRC" ] || { echo "ERROR: kernel source dir not found in:"; ls "$EXTRACT_DIR/"; exit 1; } done
else
for d in "$EXTRACT_DIR/kernel" "$EXTRACT_DIR/kernel-modules-sources" "$EXTRACT_DIR/kernel-source"; do
[ -f "$d/Makefile" ] && KERNEL_SRC="$d" && break
done
fi
[ -n "$KERNEL_SRC" ] || { echo "ERROR: kernel source dir not found for flavor ${NVIDIA_FLAVOR} in:"; ls "$EXTRACT_DIR/"; exit 1; }
echo "kernel source: $KERNEL_SRC" echo "kernel source: $KERNEL_SRC"
# Build kernel modules # Build kernel modules

View File

@@ -15,26 +15,46 @@ DIST_DIR="${REPO_ROOT}/dist"
VENDOR_DIR="${REPO_ROOT}/iso/vendor" VENDOR_DIR="${REPO_ROOT}/iso/vendor"
CACHE_ROOT="${BEE_CACHE_DIR:-${DIST_DIR}/cache}" CACHE_ROOT="${BEE_CACHE_DIR:-${DIST_DIR}/cache}"
AUTH_KEYS="" AUTH_KEYS=""
BUILD_VARIANT="nvidia"
BEE_GPU_VENDOR="nvidia" BEE_GPU_VENDOR="nvidia"
BEE_NVIDIA_MODULE_FLAVOR="open"
# parse args # parse args
while [ $# -gt 0 ]; do while [ $# -gt 0 ]; do
case "$1" in case "$1" in
--authorized-keys) AUTH_KEYS="$2"; shift 2 ;; --authorized-keys) AUTH_KEYS="$2"; shift 2 ;;
--variant) BEE_GPU_VENDOR="$2"; shift 2 ;; --variant) BUILD_VARIANT="$2"; shift 2 ;;
*) echo "unknown arg: $1"; exit 1 ;; *) echo "unknown arg: $1"; exit 1 ;;
esac esac
done done
case "$BEE_GPU_VENDOR" in case "$BUILD_VARIANT" in
nvidia|amd|nogpu) ;; nvidia)
*) echo "unknown variant: $BEE_GPU_VENDOR (expected nvidia, amd, or nogpu)" >&2; exit 1 ;; BEE_GPU_VENDOR="nvidia"
BEE_NVIDIA_MODULE_FLAVOR="open"
;;
nvidia-legacy)
BEE_GPU_VENDOR="nvidia"
BEE_NVIDIA_MODULE_FLAVOR="proprietary"
;;
amd)
BEE_GPU_VENDOR="amd"
BEE_NVIDIA_MODULE_FLAVOR=""
;;
nogpu)
BEE_GPU_VENDOR="nogpu"
BEE_NVIDIA_MODULE_FLAVOR=""
;;
*)
echo "unknown variant: $BUILD_VARIANT (expected nvidia, nvidia-legacy, amd, or nogpu)" >&2
exit 1
;;
esac esac
BUILD_WORK_DIR="${DIST_DIR}/live-build-work-${BEE_GPU_VENDOR}" BUILD_WORK_DIR="${DIST_DIR}/live-build-work-${BUILD_VARIANT}"
OVERLAY_STAGE_DIR="${DIST_DIR}/overlay-stage-${BEE_GPU_VENDOR}" OVERLAY_STAGE_DIR="${DIST_DIR}/overlay-stage-${BUILD_VARIANT}"
export BEE_GPU_VENDOR export BEE_GPU_VENDOR BEE_NVIDIA_MODULE_FLAVOR BUILD_VARIANT
. "${BUILDER_DIR}/VERSIONS" . "${BUILDER_DIR}/VERSIONS"
export PATH="$PATH:/usr/local/go/bin" export PATH="$PATH:/usr/local/go/bin"
@@ -627,7 +647,7 @@ recover_iso_memtest() {
AUDIT_VERSION_EFFECTIVE="$(resolve_audit_version)" AUDIT_VERSION_EFFECTIVE="$(resolve_audit_version)"
ISO_VERSION_EFFECTIVE="$(resolve_iso_version)" ISO_VERSION_EFFECTIVE="$(resolve_iso_version)"
ISO_BASENAME="easy-bee-${BEE_GPU_VENDOR}-v${ISO_VERSION_EFFECTIVE}-amd64" ISO_BASENAME="easy-bee-${BUILD_VARIANT}-v${ISO_VERSION_EFFECTIVE}-amd64"
# Versioned output directory: dist/easy-bee-v4.1/ — all final artefacts live here. # Versioned output directory: dist/easy-bee-v4.1/ — all final artefacts live here.
OUT_DIR="${DIST_DIR}/easy-bee-v${ISO_VERSION_EFFECTIVE}" OUT_DIR="${DIST_DIR}/easy-bee-v${ISO_VERSION_EFFECTIVE}"
mkdir -p "${OUT_DIR}" mkdir -p "${OUT_DIR}"
@@ -801,7 +821,7 @@ if [ ! -d "/usr/src/linux-headers-${KVER}" ]; then
apt-get install -y "linux-headers-${KVER}" apt-get install -y "linux-headers-${KVER}"
fi fi
echo "=== bee ISO build (variant: ${BEE_GPU_VENDOR}) ===" echo "=== bee ISO build (variant: ${BUILD_VARIANT}) ==="
echo "Debian: ${DEBIAN_VERSION}, Kernel ABI: ${DEBIAN_KERNEL_ABI}, Go: ${GO_VERSION}" echo "Debian: ${DEBIAN_VERSION}, Kernel ABI: ${DEBIAN_KERNEL_ABI}, Go: ${GO_VERSION}"
echo "Audit version: ${AUDIT_VERSION_EFFECTIVE}, ISO version: ${ISO_VERSION_EFFECTIVE}" echo "Audit version: ${AUDIT_VERSION_EFFECTIVE}, ISO version: ${ISO_VERSION_EFFECTIVE}"
echo "" echo ""
@@ -871,7 +891,7 @@ if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
fi fi
fi fi
echo "=== preparing staged overlay (${BEE_GPU_VENDOR}) ===" echo "=== preparing staged overlay (${BUILD_VARIANT}) ==="
mkdir -p "${BUILD_WORK_DIR}" "${OVERLAY_STAGE_DIR}" mkdir -p "${BUILD_WORK_DIR}" "${OVERLAY_STAGE_DIR}"
# Sync builder config into variant work dir, preserving lb cache. # Sync builder config into variant work dir, preserving lb cache.
@@ -981,10 +1001,10 @@ done
# --- NVIDIA kernel modules and userspace libs --- # --- NVIDIA kernel modules and userspace libs ---
if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
run_step "build NVIDIA ${NVIDIA_DRIVER_VERSION} modules" "40-nvidia-module" \ run_step "build NVIDIA ${NVIDIA_DRIVER_VERSION} modules" "40-nvidia-module" \
sh "${BUILDER_DIR}/build-nvidia-module.sh" "${NVIDIA_DRIVER_VERSION}" "${DIST_DIR}" "${DEBIAN_KERNEL_ABI}" sh "${BUILDER_DIR}/build-nvidia-module.sh" "${NVIDIA_DRIVER_VERSION}" "${DIST_DIR}" "${DEBIAN_KERNEL_ABI}" "${BEE_NVIDIA_MODULE_FLAVOR}"
KVER="${DEBIAN_KERNEL_ABI}-amd64" KVER="${DEBIAN_KERNEL_ABI}-amd64"
NVIDIA_CACHE="${DIST_DIR}/nvidia-${NVIDIA_DRIVER_VERSION}-${KVER}" NVIDIA_CACHE="${DIST_DIR}/nvidia-${BEE_NVIDIA_MODULE_FLAVOR}-${NVIDIA_DRIVER_VERSION}-${KVER}"
# Inject .ko files into overlay at /usr/local/lib/nvidia/ # Inject .ko files into overlay at /usr/local/lib/nvidia/
OVERLAY_KMOD_DIR="${OVERLAY_STAGE_DIR}/usr/local/lib/nvidia" OVERLAY_KMOD_DIR="${OVERLAY_STAGE_DIR}/usr/local/lib/nvidia"
@@ -1055,13 +1075,14 @@ GIT_COMMIT="$(git -C "${REPO_ROOT}" rev-parse --short HEAD 2>/dev/null || echo u
if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
GPU_VERSION_LINE="NVIDIA_DRIVER_VERSION=${NVIDIA_DRIVER_VERSION} GPU_VERSION_LINE="NVIDIA_DRIVER_VERSION=${NVIDIA_DRIVER_VERSION}
NVIDIA_KERNEL_MODULES_FLAVOR=${BEE_NVIDIA_MODULE_FLAVOR}
NCCL_VERSION=${NCCL_VERSION} NCCL_VERSION=${NCCL_VERSION}
NCCL_CUDA_VERSION=${NCCL_CUDA_VERSION} NCCL_CUDA_VERSION=${NCCL_CUDA_VERSION}
CUBLAS_VERSION=${CUBLAS_VERSION} CUBLAS_VERSION=${CUBLAS_VERSION}
CUDA_USERSPACE_VERSION=${CUDA_USERSPACE_VERSION} CUDA_USERSPACE_VERSION=${CUDA_USERSPACE_VERSION}
NCCL_TESTS_VERSION=${NCCL_TESTS_VERSION} NCCL_TESTS_VERSION=${NCCL_TESTS_VERSION}
JOHN_JUMBO_COMMIT=${JOHN_JUMBO_COMMIT}" JOHN_JUMBO_COMMIT=${JOHN_JUMBO_COMMIT}"
GPU_BUILD_INFO="nvidia:${NVIDIA_DRIVER_VERSION}" GPU_BUILD_INFO="nvidia-${BEE_NVIDIA_MODULE_FLAVOR}:${NVIDIA_DRIVER_VERSION}"
elif [ "$BEE_GPU_VENDOR" = "amd" ]; then elif [ "$BEE_GPU_VENDOR" = "amd" ]; then
GPU_VERSION_LINE="ROCM_VERSION=${ROCM_VERSION}" GPU_VERSION_LINE="ROCM_VERSION=${ROCM_VERSION}"
GPU_BUILD_INFO="rocm:${ROCM_VERSION}" GPU_BUILD_INFO="rocm:${ROCM_VERSION}"
@@ -1073,6 +1094,7 @@ fi
cat > "${OVERLAY_STAGE_DIR}/etc/bee-release" <<EOF cat > "${OVERLAY_STAGE_DIR}/etc/bee-release" <<EOF
BEE_ISO_VERSION=${ISO_VERSION_EFFECTIVE} BEE_ISO_VERSION=${ISO_VERSION_EFFECTIVE}
BEE_AUDIT_VERSION=${AUDIT_VERSION_EFFECTIVE} BEE_AUDIT_VERSION=${AUDIT_VERSION_EFFECTIVE}
BEE_BUILD_VARIANT=${BUILD_VARIANT}
BEE_GPU_VENDOR=${BEE_GPU_VENDOR} BEE_GPU_VENDOR=${BEE_GPU_VENDOR}
BUILD_DATE=${BUILD_DATE} BUILD_DATE=${BUILD_DATE}
GIT_COMMIT=${GIT_COMMIT} GIT_COMMIT=${GIT_COMMIT}
@@ -1083,6 +1105,11 @@ EOF
# Write GPU vendor marker for hooks # Write GPU vendor marker for hooks
echo "${BEE_GPU_VENDOR}" > "${OVERLAY_STAGE_DIR}/etc/bee-gpu-vendor" echo "${BEE_GPU_VENDOR}" > "${OVERLAY_STAGE_DIR}/etc/bee-gpu-vendor"
if [ "$BEE_GPU_VENDOR" = "nvidia" ]; then
echo "${BEE_NVIDIA_MODULE_FLAVOR}" > "${OVERLAY_STAGE_DIR}/etc/bee-nvidia-modules-flavor"
else
rm -f "${OVERLAY_STAGE_DIR}/etc/bee-nvidia-modules-flavor"
fi
# Patch motd with build info # Patch motd with build info
BEE_BUILD_INFO="${BUILD_DATE} git:${GIT_COMMIT} debian:${DEBIAN_VERSION} ${GPU_BUILD_INFO}" BEE_BUILD_INFO="${BUILD_DATE} git:${GIT_COMMIT} debian:${DEBIAN_VERSION} ${GPU_BUILD_INFO}"
@@ -1153,10 +1180,10 @@ fi
# --- build ISO using live-build --- # --- build ISO using live-build ---
echo "" echo ""
echo "=== building ISO (live-build, variant: ${BEE_GPU_VENDOR}) ===" echo "=== building ISO (variant: ${BUILD_VARIANT}) ==="
# Export for auto/config # Export for auto/config
BEE_GPU_VENDOR_UPPER="$(echo "${BEE_GPU_VENDOR}" | tr 'a-z' 'A-Z')" BEE_GPU_VENDOR_UPPER="$(echo "${BUILD_VARIANT}" | tr 'a-z-' 'A-Z_')"
export BEE_GPU_VENDOR_UPPER export BEE_GPU_VENDOR_UPPER
cd "${LB_DIR}" cd "${LB_DIR}"
@@ -1191,7 +1218,7 @@ if [ -f "$ISO_RAW" ]; then
validate_iso_nvidia_runtime "$ISO_RAW" validate_iso_nvidia_runtime "$ISO_RAW"
cp "$ISO_RAW" "$ISO_OUT" cp "$ISO_RAW" "$ISO_OUT"
echo "" echo ""
echo "=== done (${BEE_GPU_VENDOR}) ===" echo "=== done (${BUILD_VARIANT}) ==="
echo "ISO: $ISO_OUT" echo "ISO: $ISO_OUT"
if command -v stat >/dev/null 2>&1; then if command -v stat >/dev/null 2>&1; then
ISO_SIZE_BYTES="$(stat -c '%s' "$ISO_OUT" 2>/dev/null || stat -f '%z' "$ISO_OUT")" ISO_SIZE_BYTES="$(stat -c '%s' "$ISO_OUT" 2>/dev/null || stat -f '%z' "$ISO_OUT")"

View File

@@ -11,18 +11,18 @@ echo " Hardware Audit LiveCD"
echo "" echo ""
menuentry "EASY-BEE" { menuentry "EASY-BEE" {
linux @KERNEL_LIVE@ @APPEND_LIVE@ bee.nvidia.mode=normal net.ifnames=0 biosdevname=0 mitigations=off transparent_hugepage=always numa_balancing=disable nowatchdog nosoftlockup linux @KERNEL_LIVE@ @APPEND_LIVE@ nomodeset bee.nvidia.mode=normal net.ifnames=0 biosdevname=0 mitigations=off transparent_hugepage=always numa_balancing=disable nowatchdog nosoftlockup
initrd @INITRD_LIVE@ initrd @INITRD_LIVE@
} }
submenu "EASY-BEE (advanced options) -->" { submenu "EASY-BEE (advanced options) -->" {
menuentry "EASY-BEE — GSP=off" { menuentry "EASY-BEE — GSP=off" {
linux @KERNEL_LIVE@ @APPEND_LIVE@ bee.nvidia.mode=gsp-off net.ifnames=0 biosdevname=0 mitigations=off transparent_hugepage=always numa_balancing=disable nowatchdog nosoftlockup linux @KERNEL_LIVE@ @APPEND_LIVE@ nomodeset bee.nvidia.mode=gsp-off net.ifnames=0 biosdevname=0 mitigations=off transparent_hugepage=always numa_balancing=disable nowatchdog nosoftlockup
initrd @INITRD_LIVE@ initrd @INITRD_LIVE@
} }
menuentry "EASY-BEE — nomodeset" { menuentry "EASY-BEE — KMS (no nomodeset)" {
linux @KERNEL_LIVE@ @APPEND_LIVE@ nomodeset bee.nvidia.mode=normal net.ifnames=0 biosdevname=0 mitigations=off transparent_hugepage=always numa_balancing=disable nowatchdog nosoftlockup linux @KERNEL_LIVE@ @APPEND_LIVE@ bee.nvidia.mode=normal net.ifnames=0 biosdevname=0 mitigations=off transparent_hugepage=always numa_balancing=disable nowatchdog nosoftlockup
initrd @INITRD_LIVE@ initrd @INITRD_LIVE@
} }

View File

@@ -44,23 +44,27 @@ else:
img = Image.new('RGB', (W, H), (0, 0, 0)) img = Image.new('RGB', (W, H), (0, 0, 0))
draw = ImageDraw.Draw(img) draw = ImageDraw.Draw(img)
# Measure logo block # Measure logo block line by line to avoid font ascender offset
lines = LOGO.split('\n') lines = LOGO.split('\n')
bbox = draw.textbbox((0, 0), LOGO, font=font_logo) logo_lines = lines[:6]
text_w = bbox[2] - bbox[0] sub_line = lines[6] if len(lines) > 6 else ''
text_h = bbox[3] - bbox[1]
x = (W - text_w) // 2 line_h = SIZE + 2
y = (H - text_h) // 2 block_h = len(logo_lines) * line_h + 8 + (SIZE if sub_line else 0)
# Draw logo lines: first 6 in amber, last line (subtitle) dimmer # Width: measure the widest logo line
logo_lines = lines[:6] max_w = 0
sub_line = lines[6] if len(lines) > 6 else '' for line in logo_lines:
bb = draw.textbbox((0, 0), line, font=font_logo)
max_w = max(max_w, bb[2] - bb[0])
x = (W - max_w) // 2
y = (H - block_h) // 2
cy = y cy = y
for line in logo_lines: for line in logo_lines:
draw.text((x, cy), line, font=font_logo, fill=(0xf6, 0xc9, 0x0e)) draw.text((x, cy), line, font=font_logo, fill=(0xf6, 0xc9, 0x0e))
cy += SIZE + 2 cy += line_h
cy += 8 cy += 8
if sub_line: if sub_line:
draw.text((x, cy), sub_line, font=font_sub, fill=(0x80, 0x68, 0x18)) draw.text((x, cy), sub_line, font=font_sub, fill=(0x80, 0x68, 0x18))

View File

@@ -62,6 +62,8 @@ done
echo "loader=bee-gpu-burn" echo "loader=bee-gpu-burn"
echo "selected_gpus=${FINAL}" echo "selected_gpus=${FINAL}"
export CUDA_DEVICE_ORDER="PCI_BUS_ID"
TMP_DIR=$(mktemp -d) TMP_DIR=$(mktemp -d)
trap 'rm -rf "${TMP_DIR}"' EXIT INT TERM trap 'rm -rf "${TMP_DIR}"' EXIT INT TERM
@@ -78,7 +80,8 @@ for id in $(echo "${FINAL}" | tr ',' ' '); do
fi fi
fi fi
echo "starting gpu ${id} size=${gpu_size_mb}MB" echo "starting gpu ${id} size=${gpu_size_mb}MB"
"${WORKER}" --device "${id}" --seconds "${SECONDS}" --size-mb "${gpu_size_mb}" >"${log}" 2>&1 & CUDA_VISIBLE_DEVICES="${id}" \
"${WORKER}" --device 0 --seconds "${SECONDS}" --size-mb "${gpu_size_mb}" >"${log}" 2>&1 &
pid=$! pid=$!
WORKERS="${WORKERS} ${pid}:${id}:${log}" WORKERS="${WORKERS} ${pid}:${id}:${log}"
done done

View File

@@ -152,14 +152,19 @@ done
[ -n "${FINAL}" ] || { echo "no NVIDIA GPUs selected after filters" >&2; exit 1; } [ -n "${FINAL}" ] || { echo "no NVIDIA GPUs selected after filters" >&2; exit 1; }
export CUDA_DEVICE_ORDER="PCI_BUS_ID"
export CUDA_VISIBLE_DEVICES="${FINAL}"
JOHN_DEVICES="" JOHN_DEVICES=""
local_id=1
for id in $(echo "${FINAL}" | tr ',' ' '); do for id in $(echo "${FINAL}" | tr ',' ' '); do
opencl_id=$((id + 1)) opencl_id="${local_id}"
if [ -z "${JOHN_DEVICES}" ]; then if [ -z "${JOHN_DEVICES}" ]; then
JOHN_DEVICES="${opencl_id}" JOHN_DEVICES="${opencl_id}"
else else
JOHN_DEVICES="${JOHN_DEVICES},${opencl_id}" JOHN_DEVICES="${JOHN_DEVICES},${opencl_id}"
fi fi
local_id=$((local_id + 1))
done done
echo "loader=john" echo "loader=john"

View File

@@ -70,6 +70,8 @@ echo "gpu_count=${GPU_COUNT}"
echo "range=${MIN_BYTES}..${MAX_BYTES}" echo "range=${MIN_BYTES}..${MAX_BYTES}"
echo "iters=${ITERS}" echo "iters=${ITERS}"
export CUDA_DEVICE_ORDER="PCI_BUS_ID"
deadline=$(( $(date +%s) + SECONDS )) deadline=$(( $(date +%s) + SECONDS ))
round=0 round=0

View File

@@ -209,6 +209,18 @@ fi
ldconfig 2>/dev/null || true ldconfig 2>/dev/null || true
log "ldconfig refreshed" log "ldconfig refreshed"
# Keep persistence mode enabled across the session so dcgmi / stress tools do
# not fail with deployment warnings on otherwise healthy GPUs.
if command -v nvidia-smi >/dev/null 2>&1; then
if nvidia-smi -pm 1 >/dev/null 2>&1; then
log "enabled NVIDIA persistence mode"
else
log "WARN: failed to enable NVIDIA persistence mode"
fi
else
log "WARN: nvidia-smi not found — cannot enable persistence mode"
fi
# Start DCGM host engine so dcgmi can discover GPUs. # Start DCGM host engine so dcgmi can discover GPUs.
# nv-hostengine must run after the NVIDIA modules and device nodes are ready. # nv-hostengine must run after the NVIDIA modules and device nodes are ready.
# If it started too early (for example via systemd before bee-nvidia-load), it can # If it started too early (for example via systemd before bee-nvidia-load), it can