Compare commits
44 Commits
iso/v1.0.1
...
v2.7
| Author | SHA1 | Date | |
|---|---|---|---|
| ec0b7f7ff9 | |||
| e7a7ff54b9 | |||
| b4371e291e | |||
| c22b53a406 | |||
| ff0acc3698 | |||
| d50760e7c6 | |||
| ed4f8be019 | |||
| 883592d029 | |||
| a6dcaf1c7e | |||
| 88727fb590 | |||
| c9f5224c42 | |||
| 7cb5c02a9b | |||
| c1aa3cf491 | |||
| f7eb75c57c | |||
| 004cc4910d | |||
| ed1cceed8c | |||
| 9fe9f061f8 | |||
| 837a1fb981 | |||
| 1f43b4e050 | |||
| 83bbc8a1bc | |||
| 896bdb6ee8 | |||
| 5407c26e25 | |||
| 4fddaba9c5 | |||
| d2f384b6eb | |||
| 25f0f30aaf | |||
| a57b037a91 | |||
| 5644231f9a | |||
| eea98e6d76 | |||
| 967455194c | |||
| 79dabf3efb | |||
| 1336f5b95c | |||
| 31486a31c1 | |||
| aa3fc332ba | |||
| 62c57b87f2 | |||
| f600261546 | |||
| d7ca04bdfb | |||
| 5433652c70 | |||
| b25f014dbd | |||
| d69a46f211 | |||
|
|
fc5c2019aa | ||
|
|
67a215c66f | ||
|
|
8b4bfdf5ad | ||
|
|
0a52a4f3ba | ||
|
|
b132f7973a |
18
audit/Makefile
Normal file
18
audit/Makefile
Normal file
@@ -0,0 +1,18 @@
|
||||
LISTEN ?= :8080
|
||||
AUDIT_PATH ?=
|
||||
|
||||
RUN_ARGS := web --listen $(LISTEN)
|
||||
ifneq ($(AUDIT_PATH),)
|
||||
RUN_ARGS += --audit-path $(AUDIT_PATH)
|
||||
endif
|
||||
|
||||
.PHONY: run build test
|
||||
|
||||
run:
|
||||
go run ./cmd/bee $(RUN_ARGS)
|
||||
|
||||
build:
|
||||
go build -o bee ./cmd/bee
|
||||
|
||||
test:
|
||||
go test ./...
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"bee/audit/internal/app"
|
||||
"bee/audit/internal/platform"
|
||||
"bee/audit/internal/runtimeenv"
|
||||
"bee/audit/internal/tui"
|
||||
"bee/audit/internal/webui"
|
||||
)
|
||||
|
||||
@@ -40,8 +39,6 @@ func run(args []string, stdout, stderr io.Writer) int {
|
||||
return 0
|
||||
case "audit":
|
||||
return runAudit(args[1:], stdout, stderr)
|
||||
case "tui":
|
||||
return runTUI(args[1:], stdout, stderr)
|
||||
case "export":
|
||||
return runExport(args[1:], stdout, stderr)
|
||||
case "preflight":
|
||||
@@ -66,7 +63,6 @@ func printRootUsage(w io.Writer) {
|
||||
fmt.Fprintln(w, `bee commands:
|
||||
bee audit --runtime auto|local|livecd --output stdout|file:<path>
|
||||
bee preflight --output stdout|file:<path>
|
||||
bee tui --runtime auto|local|livecd
|
||||
bee export --target <device>
|
||||
bee support-bundle --output stdout|file:<path>
|
||||
bee web --listen :80 --audit-path `+app.DefaultAuditJSONPath+`
|
||||
@@ -79,8 +75,6 @@ func runHelp(args []string, stdout, stderr io.Writer) int {
|
||||
switch args[0] {
|
||||
case "audit":
|
||||
return runAudit([]string{"--help"}, stdout, stdout)
|
||||
case "tui":
|
||||
return runTUI([]string{"--help"}, stdout, stdout)
|
||||
case "export":
|
||||
return runExport([]string{"--help"}, stdout, stdout)
|
||||
case "preflight":
|
||||
@@ -145,42 +139,6 @@ func runAudit(args []string, stdout, stderr io.Writer) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func runTUI(args []string, stdout, stderr io.Writer) int {
|
||||
fs := flag.NewFlagSet("tui", flag.ContinueOnError)
|
||||
fs.SetOutput(stderr)
|
||||
runtimeFlag := fs.String("runtime", "auto", "runtime environment: auto, local, livecd")
|
||||
fs.Usage = func() {
|
||||
fmt.Fprintln(stderr, "usage: bee tui [--runtime auto|local|livecd]")
|
||||
fs.PrintDefaults()
|
||||
}
|
||||
if err := fs.Parse(args); err != nil {
|
||||
if err == flag.ErrHelp {
|
||||
return 0
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if fs.NArg() != 0 {
|
||||
fs.Usage()
|
||||
return 2
|
||||
}
|
||||
|
||||
runtimeInfo, err := runtimeenv.Detect(*runtimeFlag)
|
||||
if err != nil {
|
||||
slog.Error("resolve runtime", "err", err)
|
||||
return 1
|
||||
}
|
||||
|
||||
slog.SetDefault(slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{
|
||||
Level: slog.LevelInfo,
|
||||
})))
|
||||
|
||||
application := app.New(platform.New())
|
||||
if err := tui.Run(application, runtimeInfo.Mode); err != nil {
|
||||
slog.Error("run tui", "err", err)
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func runExport(args []string, stdout, stderr io.Writer) int {
|
||||
fs := flag.NewFlagSet("export", flag.ContinueOnError)
|
||||
@@ -333,10 +291,18 @@ func runWeb(args []string, stdout, stderr io.Writer) int {
|
||||
}
|
||||
|
||||
slog.Info("starting bee web", "listen", *listenAddr, "audit_path", *auditPath)
|
||||
|
||||
runtimeInfo, err := runtimeenv.Detect("auto")
|
||||
if err != nil {
|
||||
slog.Warn("resolve runtime for web", "err", err)
|
||||
}
|
||||
|
||||
if err := webui.ListenAndServe(*listenAddr, webui.HandlerOptions{
|
||||
Title: *title,
|
||||
AuditPath: *auditPath,
|
||||
ExportDir: *exportDir,
|
||||
Title: *title,
|
||||
AuditPath: *auditPath,
|
||||
ExportDir: *exportDir,
|
||||
App: app.New(platform.New()),
|
||||
RuntimeMode: runtimeInfo.Mode,
|
||||
}); err != nil {
|
||||
slog.Error("run web", "err", err)
|
||||
return 1
|
||||
|
||||
27
audit/go.mod
27
audit/go.mod
@@ -4,25 +4,14 @@ go 1.24.0
|
||||
|
||||
replace reanimator/chart => ../internal/chart
|
||||
|
||||
require github.com/charmbracelet/bubbletea v1.3.4
|
||||
require github.com/charmbracelet/lipgloss v1.0.0
|
||||
require reanimator/chart v0.0.0
|
||||
require (
|
||||
github.com/go-analyze/charts v0.5.26
|
||||
reanimator/chart v0.0.0-00010101000000-000000000000
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/charmbracelet/lipgloss v1.0.0 // promoted to direct — used for TUI colors
|
||||
github.com/charmbracelet/x/ansi v0.8.0 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.15.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.3.8 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/go-analyze/bulk v0.1.3 // indirect
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
||||
golang.org/x/image v0.24.0 // indirect
|
||||
)
|
||||
|
||||
55
audit/go.sum
55
audit/go.sum
@@ -1,37 +1,18 @@
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI=
|
||||
github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo=
|
||||
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
|
||||
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
|
||||
github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
|
||||
github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
|
||||
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/go-analyze/bulk v0.1.3 h1:pzRdBqzHDAT9PyROt0SlWE0YqPtdmTcEpIJY0C3vF0c=
|
||||
github.com/go-analyze/bulk v0.1.3/go.mod h1:afon/KtFJYnekIyN20H/+XUvcLFjE8sKR1CfpqfClgM=
|
||||
github.com/go-analyze/charts v0.5.26 h1:rSwZikLQuFX6cJzwI8OAgaWZneG1kDYxD857ms00ZxY=
|
||||
github.com/go-analyze/charts v0.5.26/go.mod h1:s1YvQhjiSwtLx1f2dOKfiV9x2TT49nVSL6v2rlRpTbY=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
|
||||
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -33,12 +33,13 @@ var (
|
||||
)
|
||||
|
||||
type App struct {
|
||||
network networkManager
|
||||
services serviceManager
|
||||
exports exportManager
|
||||
tools toolManager
|
||||
sat satRunner
|
||||
runtime runtimeChecker
|
||||
network networkManager
|
||||
services serviceManager
|
||||
exports exportManager
|
||||
tools toolManager
|
||||
sat satRunner
|
||||
runtime runtimeChecker
|
||||
installer installer
|
||||
}
|
||||
|
||||
type ActionResult struct {
|
||||
@@ -56,6 +57,7 @@ type networkManager interface {
|
||||
|
||||
type serviceManager interface {
|
||||
ListBeeServices() ([]string, error)
|
||||
ServiceState(name string) string
|
||||
ServiceStatus(name string) (string, error)
|
||||
ServiceDo(name string, action platform.ServiceAction) (string, error)
|
||||
}
|
||||
@@ -70,9 +72,14 @@ type toolManager interface {
|
||||
CheckTools(names []string) []platform.ToolStatus
|
||||
}
|
||||
|
||||
type installer interface {
|
||||
ListInstallDisks() ([]platform.InstallDisk, error)
|
||||
InstallToDisk(ctx context.Context, device string, logFile string) error
|
||||
}
|
||||
|
||||
type satRunner interface {
|
||||
RunNvidiaAcceptancePack(baseDir string) (string, error)
|
||||
RunNvidiaAcceptancePackWithOptions(ctx context.Context, baseDir string, durationSec int, sizeMB int, gpuIndices []int) (string, error)
|
||||
RunNvidiaAcceptancePackWithOptions(ctx context.Context, baseDir string, diagLevel int, gpuIndices []int) (string, error)
|
||||
RunMemoryAcceptancePack(baseDir string) (string, error)
|
||||
RunStorageAcceptancePack(baseDir string) (string, error)
|
||||
RunCPUAcceptancePack(baseDir string, durationSec int) (string, error)
|
||||
@@ -81,6 +88,7 @@ type satRunner interface {
|
||||
ListAMDGPUs() ([]platform.AMDGPUInfo, error)
|
||||
RunAMDAcceptancePack(baseDir string) (string, error)
|
||||
RunFanStressTest(ctx context.Context, baseDir string, opts platform.FanStressOptions) (string, error)
|
||||
RunNCCLTests(ctx context.Context, baseDir string) (string, error)
|
||||
}
|
||||
|
||||
type runtimeChecker interface {
|
||||
@@ -90,12 +98,13 @@ type runtimeChecker interface {
|
||||
|
||||
func New(platform *platform.System) *App {
|
||||
return &App{
|
||||
network: platform,
|
||||
services: platform,
|
||||
exports: platform,
|
||||
tools: platform,
|
||||
sat: platform,
|
||||
runtime: platform,
|
||||
network: platform,
|
||||
services: platform,
|
||||
exports: platform,
|
||||
tools: platform,
|
||||
sat: platform,
|
||||
runtime: platform,
|
||||
installer: platform,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,8 +240,11 @@ func (a *App) ExportLatestAudit(target platform.RemovableTarget) (string, error)
|
||||
|
||||
func (a *App) ExportLatestAuditResult(target platform.RemovableTarget) (ActionResult, error) {
|
||||
path, err := a.ExportLatestAudit(target)
|
||||
body := "Audit exported."
|
||||
if path != "" {
|
||||
body := "Audit export failed."
|
||||
if err == nil {
|
||||
body = "Audit exported."
|
||||
}
|
||||
if err == nil && path != "" {
|
||||
body = "Audit exported to " + path
|
||||
}
|
||||
return ActionResult{Title: "Export audit", Body: body}, err
|
||||
@@ -249,8 +261,11 @@ func (a *App) ExportSupportBundle(target platform.RemovableTarget) (string, erro
|
||||
|
||||
func (a *App) ExportSupportBundleResult(target platform.RemovableTarget) (ActionResult, error) {
|
||||
path, err := a.ExportSupportBundle(target)
|
||||
body := "Support bundle exported. USB target unmounted and safe to remove."
|
||||
if path != "" {
|
||||
body := "Support bundle export failed."
|
||||
if err == nil {
|
||||
body = "Support bundle exported. USB target unmounted and safe to remove."
|
||||
}
|
||||
if err == nil && path != "" {
|
||||
body = "Support bundle exported to " + path + ".\n\nUSB target unmounted and safe to remove."
|
||||
}
|
||||
return ActionResult{Title: "Export support bundle", Body: body}, err
|
||||
@@ -342,6 +357,10 @@ func (a *App) ListBeeServices() ([]string, error) {
|
||||
return a.services.ListBeeServices()
|
||||
}
|
||||
|
||||
func (a *App) ServiceState(name string) string {
|
||||
return a.services.ServiceState(name)
|
||||
}
|
||||
|
||||
func (a *App) ServiceStatus(name string) (string, error) {
|
||||
return a.services.ServiceStatus(name)
|
||||
}
|
||||
@@ -417,23 +436,16 @@ func (a *App) ListNvidiaGPUs() ([]platform.NvidiaGPU, error) {
|
||||
return a.sat.ListNvidiaGPUs()
|
||||
}
|
||||
|
||||
func (a *App) RunNvidiaAcceptancePackWithOptions(ctx context.Context, baseDir string, durationSec int, sizeMB int, gpuIndices []int) (ActionResult, error) {
|
||||
func (a *App) RunNvidiaAcceptancePackWithOptions(ctx context.Context, baseDir string, diagLevel int, gpuIndices []int) (ActionResult, error) {
|
||||
if strings.TrimSpace(baseDir) == "" {
|
||||
baseDir = DefaultSATBaseDir
|
||||
}
|
||||
path, err := a.sat.RunNvidiaAcceptancePackWithOptions(ctx, baseDir, durationSec, sizeMB, gpuIndices)
|
||||
path, err := a.sat.RunNvidiaAcceptancePackWithOptions(ctx, baseDir, diagLevel, gpuIndices)
|
||||
body := "Archive written."
|
||||
if path != "" {
|
||||
body = "Archive written to " + path
|
||||
}
|
||||
// Include terminal chart if available (runDir = archive path without .tar.gz).
|
||||
if path != "" {
|
||||
termPath := filepath.Join(strings.TrimSuffix(path, ".tar.gz"), "gpu-metrics-term.txt")
|
||||
if chart, readErr := os.ReadFile(termPath); readErr == nil && len(chart) > 0 {
|
||||
body += "\n\n" + string(chart)
|
||||
}
|
||||
}
|
||||
return ActionResult{Title: "NVIDIA SAT", Body: body}, err
|
||||
return ActionResult{Title: "NVIDIA DCGM", Body: body}, err
|
||||
}
|
||||
|
||||
func (a *App) RunMemoryAcceptancePack(baseDir string) (string, error) {
|
||||
@@ -499,6 +511,15 @@ func (a *App) RunFanStressTest(ctx context.Context, baseDir string, opts platfor
|
||||
return a.sat.RunFanStressTest(ctx, baseDir, opts)
|
||||
}
|
||||
|
||||
func (a *App) RunNCCLTestsResult(ctx context.Context) (ActionResult, error) {
|
||||
path, err := a.sat.RunNCCLTests(ctx, DefaultSATBaseDir)
|
||||
body := "Results: " + path
|
||||
if err != nil && err != context.Canceled {
|
||||
body += "\nERROR: " + err.Error()
|
||||
}
|
||||
return ActionResult{Title: "NCCL bandwidth test", Body: body}, err
|
||||
}
|
||||
|
||||
func (a *App) RunFanStressTestResult(ctx context.Context, opts platform.FanStressOptions) (ActionResult, error) {
|
||||
path, err := a.RunFanStressTest(ctx, "", opts)
|
||||
body := formatFanStressResult(path)
|
||||
@@ -994,3 +1015,11 @@ func firstNonEmpty(values ...string) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (a *App) ListInstallDisks() ([]platform.InstallDisk, error) {
|
||||
return a.installer.ListInstallDisks()
|
||||
}
|
||||
|
||||
func (a *App) InstallToDisk(ctx context.Context, device string, logFile string) error {
|
||||
return a.installer.InstallToDisk(ctx, device, logFile)
|
||||
}
|
||||
|
||||
@@ -52,6 +52,10 @@ func (f fakeServices) ListBeeServices() ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f fakeServices) ServiceState(name string) string {
|
||||
return "active"
|
||||
}
|
||||
|
||||
func (f fakeServices) ServiceStatus(name string) (string, error) {
|
||||
return f.serviceStatusFn(name)
|
||||
}
|
||||
@@ -123,7 +127,7 @@ func (f fakeSAT) RunNvidiaAcceptancePack(baseDir string) (string, error) {
|
||||
return f.runNvidiaFn(baseDir)
|
||||
}
|
||||
|
||||
func (f fakeSAT) RunNvidiaAcceptancePackWithOptions(_ context.Context, baseDir string, _ int, _ int, _ []int) (string, error) {
|
||||
func (f fakeSAT) RunNvidiaAcceptancePackWithOptions(_ context.Context, baseDir string, _ int, _ []int) (string, error) {
|
||||
return f.runNvidiaFn(baseDir)
|
||||
}
|
||||
|
||||
@@ -174,6 +178,10 @@ func (f fakeSAT) RunFanStressTest(_ context.Context, _ string, _ platform.FanStr
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (f fakeSAT) RunNCCLTests(_ context.Context, _ string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func TestNetworkStatusFormatsInterfacesAndRoute(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -470,6 +478,41 @@ func TestExportSupportBundleResultMentionsUnmountedUSB(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportSupportBundleResultDoesNotPretendSuccessOnError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmp := t.TempDir()
|
||||
oldExportDir := DefaultExportDir
|
||||
DefaultExportDir = tmp
|
||||
t.Cleanup(func() { DefaultExportDir = oldExportDir })
|
||||
|
||||
if err := os.WriteFile(filepath.Join(tmp, "bee-audit.json"), []byte("{}\n"), 0644); err != nil {
|
||||
t.Fatalf("write bee-audit.json: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(tmp, "bee-audit.log"), []byte("audit ok\n"), 0644); err != nil {
|
||||
t.Fatalf("write bee-audit.log: %v", err)
|
||||
}
|
||||
|
||||
a := &App{
|
||||
exports: fakeExports{
|
||||
exportToTargetFn: func(string, platform.RemovableTarget) (string, error) {
|
||||
return "", errors.New("mount /dev/sda1: exFAT support is missing in this ISO build")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := a.ExportSupportBundleResult(platform.RemovableTarget{Device: "/dev/sda1", FSType: "exfat"})
|
||||
if err == nil {
|
||||
t.Fatal("expected export error")
|
||||
}
|
||||
if contains(result.Body, "exported to") {
|
||||
t.Fatalf("body should not claim success:\n%s", result.Body)
|
||||
}
|
||||
if result.Body != "Support bundle export failed." {
|
||||
t.Fatalf("body=%q want %q", result.Body, "Support bundle export failed.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunNvidiaAcceptancePackResult(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -11,8 +11,48 @@ import (
|
||||
|
||||
var exportExecCommand = exec.Command
|
||||
|
||||
func formatMountTargetError(target RemovableTarget, raw string, err error) error {
|
||||
msg := strings.TrimSpace(raw)
|
||||
fstype := strings.ToLower(strings.TrimSpace(target.FSType))
|
||||
if fstype == "exfat" && strings.Contains(strings.ToLower(msg), "unknown filesystem type 'exfat'") {
|
||||
return fmt.Errorf("mount %s: exFAT support is missing in this ISO build: %w", target.Device, err)
|
||||
}
|
||||
if msg == "" {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("%s: %w", msg, err)
|
||||
}
|
||||
|
||||
func removableTargetReadOnly(fields map[string]string) bool {
|
||||
if fields["RO"] == "1" {
|
||||
return true
|
||||
}
|
||||
switch strings.ToLower(strings.TrimSpace(fields["FSTYPE"])) {
|
||||
case "iso9660", "squashfs":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func ensureWritableMountpoint(mountpoint string) error {
|
||||
probe, err := os.CreateTemp(mountpoint, ".bee-write-test-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("target filesystem is not writable: %w", err)
|
||||
}
|
||||
name := probe.Name()
|
||||
if closeErr := probe.Close(); closeErr != nil {
|
||||
_ = os.Remove(name)
|
||||
return closeErr
|
||||
}
|
||||
if err := os.Remove(name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *System) ListRemovableTargets() ([]RemovableTarget, error) {
|
||||
raw, err := exportExecCommand("lsblk", "-P", "-o", "NAME,TYPE,PKNAME,RM,FSTYPE,MOUNTPOINT,SIZE,LABEL,MODEL").Output()
|
||||
raw, err := exportExecCommand("lsblk", "-P", "-o", "NAME,TYPE,PKNAME,RM,RO,FSTYPE,MOUNTPOINT,SIZE,LABEL,MODEL").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -36,7 +76,7 @@ func (s *System) ListRemovableTargets() ([]RemovableTarget, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if !removable || fields["FSTYPE"] == "" {
|
||||
if !removable || fields["FSTYPE"] == "" || removableTargetReadOnly(fields) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -72,7 +112,7 @@ func (s *System) ExportFileToTarget(src string, target RemovableTarget) (dst str
|
||||
}
|
||||
if raw, err := exportExecCommand("mount", target.Device, mountpoint).CombinedOutput(); err != nil {
|
||||
_ = os.Remove(mountpoint)
|
||||
return string(raw), err
|
||||
return "", formatMountTargetError(target, string(raw), err)
|
||||
}
|
||||
mountedHere = true
|
||||
mounted = true
|
||||
@@ -95,6 +135,10 @@ func (s *System) ExportFileToTarget(src string, target RemovableTarget) (dst str
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureWritableMountpoint(mountpoint); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
filename := filepath.Base(src)
|
||||
dst = filepath.Join(mountpoint, filename)
|
||||
data, err := os.ReadFile(src)
|
||||
|
||||
@@ -4,12 +4,11 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExportFileToTargetUnmountsExistingMountpoint(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tmp := t.TempDir()
|
||||
src := filepath.Join(tmp, "bundle.tar.gz")
|
||||
mountpoint := filepath.Join(tmp, "mnt")
|
||||
@@ -54,3 +53,60 @@ func TestExportFileToTargetUnmountsExistingMountpoint(t *testing.T) {
|
||||
t.Fatalf("expected umount %q call, got %#v", mountpoint, calls)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportFileToTargetRejectsNonWritableMountpoint(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
src := filepath.Join(tmp, "bundle.tar.gz")
|
||||
mountpoint := filepath.Join(tmp, "mnt")
|
||||
if err := os.MkdirAll(mountpoint, 0755); err != nil {
|
||||
t.Fatalf("mkdir mountpoint: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(src, []byte("bundle"), 0644); err != nil {
|
||||
t.Fatalf("write src: %v", err)
|
||||
}
|
||||
if err := os.Chmod(mountpoint, 0555); err != nil {
|
||||
t.Fatalf("chmod mountpoint: %v", err)
|
||||
}
|
||||
|
||||
oldExec := exportExecCommand
|
||||
exportExecCommand = func(name string, args ...string) *exec.Cmd {
|
||||
return exec.Command("sh", "-c", "exit 0")
|
||||
}
|
||||
t.Cleanup(func() { exportExecCommand = oldExec })
|
||||
|
||||
s := &System{}
|
||||
_, err := s.ExportFileToTarget(src, RemovableTarget{
|
||||
Device: "/dev/sdb1",
|
||||
Mountpoint: mountpoint,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for non-writable mountpoint")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "target filesystem is not writable") {
|
||||
t.Fatalf("err=%q want writable message", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListRemovableTargetsSkipsReadOnlyMedia(t *testing.T) {
|
||||
oldExec := exportExecCommand
|
||||
lsblkOut := `NAME="sda1" TYPE="part" PKNAME="sda" RM="1" RO="1" FSTYPE="iso9660" MOUNTPOINT="/run/live/medium" SIZE="3.7G" LABEL="BEE" MODEL=""
|
||||
NAME="sdb1" TYPE="part" PKNAME="sdb" RM="1" RO="0" FSTYPE="vfat" MOUNTPOINT="/media/bee/USB" SIZE="29.8G" LABEL="USB" MODEL=""`
|
||||
exportExecCommand = func(name string, args ...string) *exec.Cmd {
|
||||
cmd := exec.Command("sh", "-c", "printf '%s\n' \"$LSBLK_OUT\"")
|
||||
cmd.Env = append(os.Environ(), "LSBLK_OUT="+lsblkOut)
|
||||
return cmd
|
||||
}
|
||||
t.Cleanup(func() { exportExecCommand = oldExec })
|
||||
|
||||
s := &System{}
|
||||
targets, err := s.ListRemovableTargets()
|
||||
if err != nil {
|
||||
t.Fatalf("ListRemovableTargets error: %v", err)
|
||||
}
|
||||
if len(targets) != 1 {
|
||||
t.Fatalf("len(targets)=%d want 1 (%+v)", len(targets), targets)
|
||||
}
|
||||
if got := targets[0].Device; got != "/dev/sdb1" {
|
||||
t.Fatalf("device=%q want /dev/sdb1", got)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,18 +13,19 @@ import (
|
||||
|
||||
// GPUMetricRow is one telemetry sample from nvidia-smi during a stress test.
|
||||
type GPUMetricRow struct {
|
||||
ElapsedSec float64
|
||||
GPUIndex int
|
||||
TempC float64
|
||||
UsagePct float64
|
||||
PowerW float64
|
||||
ClockMHz float64
|
||||
ElapsedSec float64 `json:"elapsed_sec"`
|
||||
GPUIndex int `json:"index"`
|
||||
TempC float64 `json:"temp_c"`
|
||||
UsagePct float64 `json:"usage_pct"`
|
||||
MemUsagePct float64 `json:"mem_usage_pct"`
|
||||
PowerW float64 `json:"power_w"`
|
||||
ClockMHz float64 `json:"clock_mhz"`
|
||||
}
|
||||
|
||||
// sampleGPUMetrics runs nvidia-smi once and returns current metrics for each GPU.
|
||||
func sampleGPUMetrics(gpuIndices []int) ([]GPUMetricRow, error) {
|
||||
args := []string{
|
||||
"--query-gpu=index,temperature.gpu,utilization.gpu,power.draw,clocks.current.graphics",
|
||||
"--query-gpu=index,temperature.gpu,utilization.gpu,utilization.memory,power.draw,clocks.current.graphics",
|
||||
"--format=csv,noheader,nounits",
|
||||
}
|
||||
if len(gpuIndices) > 0 {
|
||||
@@ -45,16 +46,17 @@ func sampleGPUMetrics(gpuIndices []int) ([]GPUMetricRow, error) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(line, ", ")
|
||||
if len(parts) < 5 {
|
||||
if len(parts) < 6 {
|
||||
continue
|
||||
}
|
||||
idx, _ := strconv.Atoi(strings.TrimSpace(parts[0]))
|
||||
rows = append(rows, GPUMetricRow{
|
||||
GPUIndex: idx,
|
||||
TempC: parseGPUFloat(parts[1]),
|
||||
UsagePct: parseGPUFloat(parts[2]),
|
||||
PowerW: parseGPUFloat(parts[3]),
|
||||
ClockMHz: parseGPUFloat(parts[4]),
|
||||
GPUIndex: idx,
|
||||
TempC: parseGPUFloat(parts[1]),
|
||||
UsagePct: parseGPUFloat(parts[2]),
|
||||
MemUsagePct: parseGPUFloat(parts[3]),
|
||||
PowerW: parseGPUFloat(parts[4]),
|
||||
ClockMHz: parseGPUFloat(parts[5]),
|
||||
})
|
||||
}
|
||||
return rows, nil
|
||||
@@ -69,6 +71,11 @@ func parseGPUFloat(s string) float64 {
|
||||
return v
|
||||
}
|
||||
|
||||
// SampleGPUMetrics runs nvidia-smi once and returns current metrics for each GPU.
|
||||
func SampleGPUMetrics(gpuIndices []int) ([]GPUMetricRow, error) {
|
||||
return sampleGPUMetrics(gpuIndices)
|
||||
}
|
||||
|
||||
// WriteGPUMetricsCSV writes collected rows as a CSV file.
|
||||
func WriteGPUMetricsCSV(path string, rows []GPUMetricRow) error {
|
||||
var b bytes.Buffer
|
||||
@@ -370,6 +377,162 @@ func RenderGPUTerminalChart(rows []GPUMetricRow) string {
|
||||
return strings.TrimRight(b.String(), "\n")
|
||||
}
|
||||
|
||||
// RenderGPULiveChart renders all GPU metrics on a single combined chart per GPU.
|
||||
// Each series is normalised to its own min–max and drawn in a different colour.
|
||||
// chartWidth controls the width of the plot area (Y-axis label uses 5 extra chars).
|
||||
func RenderGPULiveChart(rows []GPUMetricRow, chartWidth int) string {
|
||||
if chartWidth < 20 {
|
||||
chartWidth = 70
|
||||
}
|
||||
const chartHeight = 14
|
||||
|
||||
seen := make(map[int]bool)
|
||||
var order []int
|
||||
gpuMap := make(map[int][]GPUMetricRow)
|
||||
for _, r := range rows {
|
||||
if !seen[r.GPUIndex] {
|
||||
seen[r.GPUIndex] = true
|
||||
order = append(order, r.GPUIndex)
|
||||
}
|
||||
gpuMap[r.GPUIndex] = append(gpuMap[r.GPUIndex], r)
|
||||
}
|
||||
|
||||
type seriesDef struct {
|
||||
label string
|
||||
color string
|
||||
unit string
|
||||
fn func(GPUMetricRow) float64
|
||||
}
|
||||
defs := []seriesDef{
|
||||
{"Usage", ansiBlue, "%", func(r GPUMetricRow) float64 { return r.UsagePct }},
|
||||
{"Temp", ansiRed, "°C", func(r GPUMetricRow) float64 { return r.TempC }},
|
||||
{"Power", ansiGreen, "W", func(r GPUMetricRow) float64 { return r.PowerW }},
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
for _, gpuIdx := range order {
|
||||
gr := gpuMap[gpuIdx]
|
||||
if len(gr) == 0 {
|
||||
continue
|
||||
}
|
||||
elapsed := gr[len(gr)-1].ElapsedSec
|
||||
|
||||
// Build value slices for each series.
|
||||
type seriesData struct {
|
||||
seriesDef
|
||||
vals []float64
|
||||
mn float64
|
||||
mx float64
|
||||
}
|
||||
var series []seriesData
|
||||
for _, d := range defs {
|
||||
vals := extractGPUField(gr, d.fn)
|
||||
mn, mx := gpuMinMax(vals)
|
||||
if mn == mx {
|
||||
mx = mn + 1
|
||||
}
|
||||
series = append(series, seriesData{d, vals, mn, mx})
|
||||
}
|
||||
|
||||
// Shared character grid: row 0 = top (max), row chartHeight = bottom (min).
|
||||
type cell struct {
|
||||
ch rune
|
||||
color string
|
||||
}
|
||||
grid := make([][]cell, chartHeight+1)
|
||||
for r := range grid {
|
||||
grid[r] = make([]cell, chartWidth)
|
||||
for c := range grid[r] {
|
||||
grid[r][c] = cell{' ', ""}
|
||||
}
|
||||
}
|
||||
|
||||
// Plot each series onto the shared grid.
|
||||
for _, s := range series {
|
||||
w := chartWidth
|
||||
if len(s.vals) < w {
|
||||
w = len(s.vals)
|
||||
}
|
||||
data := gpuDownsample(s.vals, w)
|
||||
prevRow := -1
|
||||
for x, v := range data {
|
||||
row := chartHeight - int(math.Round((v-s.mn)/(s.mx-s.mn)*float64(chartHeight)))
|
||||
if row < 0 {
|
||||
row = 0
|
||||
}
|
||||
if row > chartHeight {
|
||||
row = chartHeight
|
||||
}
|
||||
if prevRow < 0 || prevRow == row {
|
||||
grid[row][x] = cell{'─', s.color}
|
||||
} else {
|
||||
lo, hi := prevRow, row
|
||||
if lo > hi {
|
||||
lo, hi = hi, lo
|
||||
}
|
||||
for y := lo + 1; y < hi; y++ {
|
||||
grid[y][x] = cell{'│', s.color}
|
||||
}
|
||||
if prevRow < row {
|
||||
grid[prevRow][x] = cell{'╮', s.color}
|
||||
grid[row][x] = cell{'╰', s.color}
|
||||
} else {
|
||||
grid[prevRow][x] = cell{'╯', s.color}
|
||||
grid[row][x] = cell{'╭', s.color}
|
||||
}
|
||||
}
|
||||
prevRow = row
|
||||
}
|
||||
}
|
||||
|
||||
// Render: Y axis + data rows.
|
||||
fmt.Fprintf(&b, "GPU %d (%.0fs) each series normalised to its range\n", gpuIdx, elapsed)
|
||||
for r := 0; r <= chartHeight; r++ {
|
||||
// Y axis label: 100% at top, 50% in middle, 0% at bottom.
|
||||
switch r {
|
||||
case 0:
|
||||
fmt.Fprintf(&b, "%4s┤", "100%")
|
||||
case chartHeight / 2:
|
||||
fmt.Fprintf(&b, "%4s┤", "50%")
|
||||
case chartHeight:
|
||||
fmt.Fprintf(&b, "%4s┤", "0%")
|
||||
default:
|
||||
fmt.Fprintf(&b, "%4s│", "")
|
||||
}
|
||||
for c := 0; c < chartWidth; c++ {
|
||||
cl := grid[r][c]
|
||||
if cl.color != "" {
|
||||
b.WriteString(cl.color)
|
||||
b.WriteRune(cl.ch)
|
||||
b.WriteString(ansiReset)
|
||||
} else {
|
||||
b.WriteRune(' ')
|
||||
}
|
||||
}
|
||||
b.WriteRune('\n')
|
||||
}
|
||||
// Bottom axis.
|
||||
b.WriteString(" └")
|
||||
b.WriteString(strings.Repeat("─", chartWidth))
|
||||
b.WriteRune('\n')
|
||||
|
||||
// Legend with current (last) values.
|
||||
b.WriteString(" ")
|
||||
for i, s := range series {
|
||||
last := s.vals[len(s.vals)-1]
|
||||
b.WriteString(s.color)
|
||||
fmt.Fprintf(&b, "▐ %s: %.0f%s", s.label, last, s.unit)
|
||||
b.WriteString(ansiReset)
|
||||
if i < len(series)-1 {
|
||||
b.WriteString(" ")
|
||||
}
|
||||
}
|
||||
b.WriteRune('\n')
|
||||
}
|
||||
|
||||
return strings.TrimRight(b.String(), "\n")
|
||||
}
|
||||
|
||||
// renderLineChart draws a single time-series line chart using box-drawing characters.
|
||||
// Produces output in the style of asciigraph: ╭─╮ │ ╰─╯ with a Y axis and caption.
|
||||
func renderLineChart(vals []float64, color, caption string, height, width int) string {
|
||||
|
||||
105
audit/internal/platform/install.go
Normal file
105
audit/internal/platform/install.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package platform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// InstallDisk describes a candidate disk for installation.
|
||||
type InstallDisk struct {
|
||||
Device string // e.g. /dev/sda
|
||||
Model string
|
||||
Size string // human-readable, e.g. "500G"
|
||||
}
|
||||
|
||||
// ListInstallDisks returns block devices suitable for installation.
|
||||
// Excludes USB drives and the current live boot medium.
|
||||
func (s *System) ListInstallDisks() ([]InstallDisk, error) {
|
||||
out, err := exec.Command("lsblk", "-dn", "-o", "NAME,MODEL,SIZE,TYPE,TRAN").Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("lsblk: %w", err)
|
||||
}
|
||||
|
||||
bootDev := findLiveBootDevice()
|
||||
|
||||
var disks []InstallDisk
|
||||
for _, line := range strings.Split(strings.TrimSpace(string(out)), "\n") {
|
||||
fields := strings.Fields(line)
|
||||
// NAME MODEL SIZE TYPE TRAN — model may have spaces so we parse from end
|
||||
if len(fields) < 4 {
|
||||
continue
|
||||
}
|
||||
// Last field: TRAN, second-to-last: TYPE, third-to-last: SIZE
|
||||
tran := fields[len(fields)-1]
|
||||
typ := fields[len(fields)-2]
|
||||
size := fields[len(fields)-3]
|
||||
name := fields[0]
|
||||
model := strings.Join(fields[1:len(fields)-3], " ")
|
||||
|
||||
if typ != "disk" {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(tran, "usb") {
|
||||
continue
|
||||
}
|
||||
|
||||
device := "/dev/" + name
|
||||
if device == bootDev {
|
||||
continue
|
||||
}
|
||||
|
||||
disks = append(disks, InstallDisk{
|
||||
Device: device,
|
||||
Model: strings.TrimSpace(model),
|
||||
Size: size,
|
||||
})
|
||||
}
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
// findLiveBootDevice returns the block device backing /run/live/medium (if any).
|
||||
func findLiveBootDevice() string {
|
||||
out, err := exec.Command("findmnt", "-n", "-o", "SOURCE", "/run/live/medium").Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
src := strings.TrimSpace(string(out))
|
||||
if src == "" {
|
||||
return ""
|
||||
}
|
||||
// Strip partition suffix to get the whole disk device.
|
||||
// e.g. /dev/sdb1 → /dev/sdb, /dev/nvme0n1p1 → /dev/nvme0n1
|
||||
out2, err := exec.Command("lsblk", "-no", "PKNAME", src).Output()
|
||||
if err != nil || strings.TrimSpace(string(out2)) == "" {
|
||||
return src
|
||||
}
|
||||
return "/dev/" + strings.TrimSpace(string(out2))
|
||||
}
|
||||
|
||||
// InstallToDisk runs bee-install <device> <logfile> and streams output to logFile.
|
||||
// The context can be used to cancel.
|
||||
func (s *System) InstallToDisk(ctx context.Context, device string, logFile string) error {
|
||||
cmd := exec.CommandContext(ctx, "bee-install", device, logFile)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// InstallLogPath returns the default install log path for a given device.
|
||||
func InstallLogPath(device string) string {
|
||||
safe := strings.NewReplacer("/", "_", " ", "_").Replace(device)
|
||||
return "/tmp/bee-install" + safe + ".log"
|
||||
}
|
||||
|
||||
// DiskLabel returns a display label for a disk.
|
||||
func (d InstallDisk) Label() string {
|
||||
model := d.Model
|
||||
if model == "" {
|
||||
model = "Unknown"
|
||||
}
|
||||
sizeBytes, err := strconv.ParseInt(strings.TrimSuffix(d.Size, "B"), 10, 64)
|
||||
_ = sizeBytes
|
||||
_ = err
|
||||
return fmt.Sprintf("%s %s %s", d.Device, d.Size, model)
|
||||
}
|
||||
139
audit/internal/platform/live_metrics.go
Normal file
139
audit/internal/platform/live_metrics.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package platform
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LiveMetricSample is a single point-in-time snapshot of server metrics
|
||||
// collected for the web UI metrics page.
|
||||
type LiveMetricSample struct {
|
||||
Timestamp time.Time `json:"ts"`
|
||||
Fans []FanReading `json:"fans"`
|
||||
Temps []TempReading `json:"temps"`
|
||||
PowerW float64 `json:"power_w"`
|
||||
CPULoadPct float64 `json:"cpu_load_pct"`
|
||||
MemLoadPct float64 `json:"mem_load_pct"`
|
||||
GPUs []GPUMetricRow `json:"gpus"`
|
||||
}
|
||||
|
||||
// TempReading is a named temperature sensor value.
|
||||
type TempReading struct {
|
||||
Name string `json:"name"`
|
||||
Celsius float64 `json:"celsius"`
|
||||
}
|
||||
|
||||
// SampleLiveMetrics collects a single metrics snapshot from all available
|
||||
// sources: GPU (via nvidia-smi), fans and temperatures (via ipmitool/sensors),
|
||||
// and system power (via ipmitool dcmi). Missing sources are silently skipped.
|
||||
func SampleLiveMetrics() LiveMetricSample {
|
||||
s := LiveMetricSample{Timestamp: time.Now().UTC()}
|
||||
|
||||
// GPU metrics — skipped silently if nvidia-smi unavailable
|
||||
gpus, _ := SampleGPUMetrics(nil)
|
||||
s.GPUs = gpus
|
||||
|
||||
// Fan speeds — skipped silently if ipmitool unavailable
|
||||
fans, _ := sampleFanSpeeds()
|
||||
s.Fans = fans
|
||||
|
||||
// CPU/system temperature — returns 0 if unavailable
|
||||
cpuTemp := sampleCPUMaxTemp()
|
||||
if cpuTemp > 0 {
|
||||
s.Temps = append(s.Temps, TempReading{Name: "CPU", Celsius: cpuTemp})
|
||||
}
|
||||
|
||||
// System power — returns 0 if unavailable
|
||||
s.PowerW = sampleSystemPower()
|
||||
|
||||
// CPU load — from /proc/stat
|
||||
s.CPULoadPct = sampleCPULoadPct()
|
||||
|
||||
// Memory load — from /proc/meminfo
|
||||
s.MemLoadPct = sampleMemLoadPct()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// sampleCPULoadPct reads two /proc/stat snapshots 200ms apart and returns
|
||||
// the overall CPU utilisation percentage.
|
||||
var cpuStatPrev [2]uint64 // [total, idle]
|
||||
|
||||
func sampleCPULoadPct() float64 {
|
||||
total, idle := readCPUStat()
|
||||
if total == 0 {
|
||||
return 0
|
||||
}
|
||||
prevTotal, prevIdle := cpuStatPrev[0], cpuStatPrev[1]
|
||||
cpuStatPrev = [2]uint64{total, idle}
|
||||
if prevTotal == 0 {
|
||||
return 0
|
||||
}
|
||||
dt := float64(total - prevTotal)
|
||||
di := float64(idle - prevIdle)
|
||||
if dt <= 0 {
|
||||
return 0
|
||||
}
|
||||
pct := (1 - di/dt) * 100
|
||||
if pct < 0 {
|
||||
return 0
|
||||
}
|
||||
if pct > 100 {
|
||||
return 100
|
||||
}
|
||||
return pct
|
||||
}
|
||||
|
||||
func readCPUStat() (total, idle uint64) {
|
||||
f, err := os.Open("/proc/stat")
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
defer f.Close()
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if !strings.HasPrefix(line, "cpu ") {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)[1:] // skip "cpu"
|
||||
var vals [10]uint64
|
||||
for i := 0; i < len(fields) && i < 10; i++ {
|
||||
vals[i], _ = strconv.ParseUint(fields[i], 10, 64)
|
||||
}
|
||||
// idle = idle + iowait
|
||||
idle = vals[3] + vals[4]
|
||||
for _, v := range vals {
|
||||
total += v
|
||||
}
|
||||
return total, idle
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func sampleMemLoadPct() float64 {
|
||||
f, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer f.Close()
|
||||
vals := map[string]uint64{}
|
||||
sc := bufio.NewScanner(f)
|
||||
for sc.Scan() {
|
||||
fields := strings.Fields(sc.Text())
|
||||
if len(fields) >= 2 {
|
||||
v, _ := strconv.ParseUint(fields[1], 10, 64)
|
||||
vals[strings.TrimSuffix(fields[0], ":")] = v
|
||||
}
|
||||
}
|
||||
total := vals["MemTotal"]
|
||||
avail := vals["MemAvailable"]
|
||||
if total == 0 {
|
||||
return 0
|
||||
}
|
||||
used := total - avail
|
||||
return float64(used) / float64(total) * 100
|
||||
}
|
||||
@@ -121,14 +121,34 @@ func (s *System) ListNvidiaGPUs() ([]NvidiaGPU, error) {
|
||||
return gpus, nil
|
||||
}
|
||||
|
||||
// RunNCCLTests runs nccl-tests all_reduce_perf across all NVIDIA GPUs.
|
||||
// Measures collective communication bandwidth over NVLink/PCIe.
|
||||
func (s *System) RunNCCLTests(ctx context.Context, baseDir string) (string, error) {
|
||||
// detect GPU count
|
||||
out, _ := exec.Command("nvidia-smi", "--query-gpu=index", "--format=csv,noheader").Output()
|
||||
gpuCount := len(strings.Split(strings.TrimSpace(string(out)), "\n"))
|
||||
if gpuCount < 1 {
|
||||
gpuCount = 1
|
||||
}
|
||||
return runAcceptancePackCtx(ctx, baseDir, "nccl-tests", []satJob{
|
||||
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
|
||||
{name: "02-all-reduce-perf.log", cmd: []string{
|
||||
"all_reduce_perf", "-b", "512M", "-e", "4G", "-f", "2",
|
||||
"-g", strconv.Itoa(gpuCount), "--iters", "20",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
func (s *System) RunNvidiaAcceptancePack(baseDir string) (string, error) {
|
||||
return runAcceptancePack(baseDir, "gpu-nvidia", nvidiaSATJobs())
|
||||
}
|
||||
|
||||
// RunNvidiaAcceptancePackWithOptions runs the NVIDIA SAT with explicit duration,
|
||||
// GPU memory size, and GPU index selection. ctx cancellation kills the running job.
|
||||
func (s *System) RunNvidiaAcceptancePackWithOptions(ctx context.Context, baseDir string, durationSec int, sizeMB int, gpuIndices []int) (string, error) {
|
||||
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia", nvidiaSATJobsWithOptions(durationSec, sizeMB, gpuIndices))
|
||||
// RunNvidiaAcceptancePackWithOptions runs the NVIDIA diagnostics via DCGM.
|
||||
// diagLevel: 1=quick, 2=medium, 3=targeted stress, 4=extended stress.
|
||||
// gpuIndices: specific GPU indices to test (empty = all GPUs).
|
||||
// ctx cancellation kills the running job.
|
||||
func (s *System) RunNvidiaAcceptancePackWithOptions(ctx context.Context, baseDir string, diagLevel int, gpuIndices []int) (string, error) {
|
||||
return runAcceptancePackCtx(ctx, baseDir, "gpu-nvidia", nvidiaDCGMJobs(diagLevel, gpuIndices))
|
||||
}
|
||||
|
||||
func (s *System) RunMemoryAcceptancePack(baseDir string) (string, error) {
|
||||
@@ -275,27 +295,23 @@ func runAcceptancePack(baseDir, prefix string, jobs []satJob) (string, error) {
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
func nvidiaSATJobsWithOptions(durationSec, sizeMB int, gpuIndices []int) []satJob {
|
||||
var env []string
|
||||
func nvidiaDCGMJobs(diagLevel int, gpuIndices []int) []satJob {
|
||||
if diagLevel < 1 || diagLevel > 4 {
|
||||
diagLevel = 3
|
||||
}
|
||||
diagArgs := []string{"dcgmi", "diag", "-r", strconv.Itoa(diagLevel)}
|
||||
if len(gpuIndices) > 0 {
|
||||
ids := make([]string, len(gpuIndices))
|
||||
for i, idx := range gpuIndices {
|
||||
ids[i] = strconv.Itoa(idx)
|
||||
}
|
||||
env = []string{"CUDA_VISIBLE_DEVICES=" + strings.Join(ids, ",")}
|
||||
diagArgs = append(diagArgs, "-i", strings.Join(ids, ","))
|
||||
}
|
||||
return []satJob{
|
||||
{name: "01-nvidia-smi-q.log", cmd: []string{"nvidia-smi", "-q"}},
|
||||
{name: "02-dmidecode-baseboard.log", cmd: []string{"dmidecode", "-t", "baseboard"}},
|
||||
{name: "03-dmidecode-system.log", cmd: []string{"dmidecode", "-t", "system"}},
|
||||
{name: "04-nvidia-bug-report.log", cmd: []string{"nvidia-bug-report.sh", "--output-file", "{{run_dir}}/nvidia-bug-report.log"}},
|
||||
{
|
||||
name: "05-bee-gpu-stress.log",
|
||||
cmd: []string{"bee-gpu-stress", "--seconds", strconv.Itoa(durationSec), "--size-mb", strconv.Itoa(sizeMB)},
|
||||
env: env,
|
||||
collectGPU: true,
|
||||
gpuIndices: gpuIndices,
|
||||
},
|
||||
{name: "04-dcgmi-diag.log", cmd: diagArgs},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"bee/audit/internal/platform"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func (m model) updateStaticForm(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch msg.String() {
|
||||
case "esc":
|
||||
m.screen = screenNetwork
|
||||
m.formFields = nil
|
||||
m.formIndex = 0
|
||||
return m, nil
|
||||
case "up", "shift+tab":
|
||||
if m.formIndex > 0 {
|
||||
m.formIndex--
|
||||
}
|
||||
case "down", "tab":
|
||||
if m.formIndex < len(m.formFields)-1 {
|
||||
m.formIndex++
|
||||
}
|
||||
case "enter":
|
||||
if m.formIndex < len(m.formFields)-1 {
|
||||
m.formIndex++
|
||||
return m, nil
|
||||
}
|
||||
cfg := m.app.ParseStaticIPv4Config(m.selectedIface, []string{
|
||||
m.formFields[0].Value,
|
||||
m.formFields[1].Value,
|
||||
m.formFields[2].Value,
|
||||
m.formFields[3].Value,
|
||||
})
|
||||
m.busy = true
|
||||
m.busyTitle = "Static IPv4: " + m.selectedIface
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.SetStaticIPv4Result(cfg)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenNetwork}
|
||||
}
|
||||
case "backspace":
|
||||
field := &m.formFields[m.formIndex]
|
||||
if len(field.Value) > 0 {
|
||||
field.Value = field.Value[:len(field.Value)-1]
|
||||
}
|
||||
default:
|
||||
if msg.Type == tea.KeyRunes && len(msg.Runes) > 0 {
|
||||
m.formFields[m.formIndex].Value += string(msg.Runes)
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) updateConfirm(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch msg.String() {
|
||||
case "left", "up", "tab":
|
||||
if m.cursor > 0 {
|
||||
m.cursor--
|
||||
}
|
||||
case "right", "down":
|
||||
if m.cursor < 1 {
|
||||
m.cursor++
|
||||
}
|
||||
case "esc":
|
||||
m.screen = m.confirmCancelTarget()
|
||||
m.cursor = 0
|
||||
m.pendingAction = actionNone
|
||||
return m, nil
|
||||
case "enter":
|
||||
if m.cursor == 1 { // Cancel
|
||||
m.screen = m.confirmCancelTarget()
|
||||
m.cursor = 0
|
||||
m.pendingAction = actionNone
|
||||
return m, nil
|
||||
}
|
||||
m.busy = true
|
||||
switch m.pendingAction {
|
||||
case actionExportBundle:
|
||||
m.busyTitle = "Export support bundle"
|
||||
target := *m.selectedTarget
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.ExportSupportBundleResult(target)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenMain}
|
||||
}
|
||||
case actionRunAll:
|
||||
return m.executeRunAll()
|
||||
case actionRunMemorySAT:
|
||||
m.busyTitle = "Memory test"
|
||||
m.progressPrefix = "memory"
|
||||
m.progressSince = time.Now()
|
||||
m.progressLines = nil
|
||||
since := m.progressSince
|
||||
return m, tea.Batch(
|
||||
func() tea.Msg {
|
||||
result, err := m.app.RunMemoryAcceptancePackResult("")
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenHealthCheck}
|
||||
},
|
||||
pollSATProgress("memory", since),
|
||||
)
|
||||
case actionRunStorageSAT:
|
||||
m.busyTitle = "Storage test"
|
||||
m.progressPrefix = "storage"
|
||||
m.progressSince = time.Now()
|
||||
m.progressLines = nil
|
||||
since := m.progressSince
|
||||
return m, tea.Batch(
|
||||
func() tea.Msg {
|
||||
result, err := m.app.RunStorageAcceptancePackResult("")
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenHealthCheck}
|
||||
},
|
||||
pollSATProgress("storage", since),
|
||||
)
|
||||
case actionRunCPUSAT:
|
||||
m.busyTitle = "CPU test"
|
||||
m.progressPrefix = "cpu"
|
||||
m.progressSince = time.Now()
|
||||
m.progressLines = nil
|
||||
since := m.progressSince
|
||||
durationSec := hcCPUDurations[m.hcMode]
|
||||
return m, tea.Batch(
|
||||
func() tea.Msg {
|
||||
result, err := m.app.RunCPUAcceptancePackResult("", durationSec)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenHealthCheck}
|
||||
},
|
||||
pollSATProgress("cpu", since),
|
||||
)
|
||||
case actionRunAMDGPUSAT:
|
||||
m.busyTitle = "AMD GPU test"
|
||||
m.progressPrefix = "gpu-amd"
|
||||
m.progressSince = time.Now()
|
||||
m.progressLines = nil
|
||||
since := m.progressSince
|
||||
return m, tea.Batch(
|
||||
func() tea.Msg {
|
||||
result, err := m.app.RunAMDAcceptancePackResult("")
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenHealthCheck}
|
||||
},
|
||||
pollSATProgress("gpu-amd", since),
|
||||
)
|
||||
case actionRunFanStress:
|
||||
return m.startGPUStressTest()
|
||||
}
|
||||
case "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) confirmCancelTarget() screen {
|
||||
switch m.pendingAction {
|
||||
case actionExportBundle:
|
||||
return screenExportTargets
|
||||
case actionRunAll, actionRunMemorySAT, actionRunStorageSAT, actionRunCPUSAT, actionRunAMDGPUSAT, actionRunFanStress:
|
||||
return screenHealthCheck
|
||||
default:
|
||||
return screenMain
|
||||
}
|
||||
}
|
||||
|
||||
// hcFanStressOpts builds FanStressOptions for the selected mode, auto-detecting all GPUs.
|
||||
func hcFanStressOpts(hcMode int, application interface {
|
||||
ListNvidiaGPUs() ([]platform.NvidiaGPU, error)
|
||||
}) platform.FanStressOptions {
|
||||
// Phase durations per mode: [baseline, load1, pause, load2]
|
||||
type durations struct{ baseline, load1, pause, load2 int }
|
||||
modes := [3]durations{
|
||||
{30, 120, 30, 120}, // Quick: ~5 min total
|
||||
{60, 300, 60, 300}, // Standard: ~12 min total
|
||||
{60, 600, 120, 600}, // Express: ~24 min total
|
||||
}
|
||||
if hcMode < 0 || hcMode >= len(modes) {
|
||||
hcMode = 0
|
||||
}
|
||||
d := modes[hcMode]
|
||||
|
||||
// Use all detected NVIDIA GPUs.
|
||||
var indices []int
|
||||
if gpus, err := application.ListNvidiaGPUs(); err == nil {
|
||||
for _, g := range gpus {
|
||||
indices = append(indices, g.Index)
|
||||
}
|
||||
}
|
||||
|
||||
// Use minimum GPU memory size to fit all GPUs.
|
||||
sizeMB := 64
|
||||
if gpus, err := application.ListNvidiaGPUs(); err == nil {
|
||||
for _, g := range gpus {
|
||||
if g.MemoryMB > 0 && (sizeMB == 64 || g.MemoryMB < sizeMB) {
|
||||
sizeMB = g.MemoryMB / 16 // allocate 1/16 of VRAM per GPU
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return platform.FanStressOptions{
|
||||
BaselineSec: d.baseline,
|
||||
Phase1DurSec: d.load1,
|
||||
PauseSec: d.pause,
|
||||
Phase2DurSec: d.load2,
|
||||
SizeMB: sizeMB,
|
||||
GPUIndices: indices,
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"bee/audit/internal/app"
|
||||
"bee/audit/internal/platform"
|
||||
)
|
||||
|
||||
type resultMsg struct {
|
||||
title string
|
||||
body string
|
||||
err error
|
||||
back screen
|
||||
}
|
||||
|
||||
type servicesMsg struct {
|
||||
services []string
|
||||
err error
|
||||
}
|
||||
|
||||
type interfacesMsg struct {
|
||||
ifaces []platform.InterfaceInfo
|
||||
err error
|
||||
}
|
||||
|
||||
type exportTargetsMsg struct {
|
||||
targets []platform.RemovableTarget
|
||||
err error
|
||||
}
|
||||
|
||||
type snapshotMsg struct {
|
||||
banner string
|
||||
panel app.HardwarePanelData
|
||||
}
|
||||
|
||||
type nvidiaGPUsMsg struct {
|
||||
gpus []platform.NvidiaGPU
|
||||
err error
|
||||
}
|
||||
|
||||
type nvtopClosedMsg struct{}
|
||||
|
||||
type nvidiaSATDoneMsg struct {
|
||||
title string
|
||||
body string
|
||||
err error
|
||||
}
|
||||
|
||||
type gpuStressDoneMsg struct {
|
||||
title string
|
||||
body string
|
||||
err error
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"bee/audit/internal/app"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
type satProgressMsg struct {
|
||||
lines []string
|
||||
}
|
||||
|
||||
// pollSATProgress returns a Cmd that waits 300ms then reads the latest verbose.log
|
||||
// for the given SAT prefix and returns parsed step progress lines.
|
||||
func pollSATProgress(prefix string, since time.Time) tea.Cmd {
|
||||
return tea.Tick(300*time.Millisecond, func(_ time.Time) tea.Msg {
|
||||
return satProgressMsg{lines: readSATProgressLines(prefix, since)}
|
||||
})
|
||||
}
|
||||
|
||||
func readSATProgressLines(prefix string, since time.Time) []string {
|
||||
pattern := filepath.Join(app.DefaultSATBaseDir, prefix+"-*/verbose.log")
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil || len(matches) == 0 {
|
||||
return nil
|
||||
}
|
||||
sort.Strings(matches)
|
||||
// Find the latest file created at or after (since - 5s) to account for clock skew.
|
||||
cutoff := since.Add(-5 * time.Second)
|
||||
candidate := ""
|
||||
for _, m := range matches {
|
||||
info, statErr := os.Stat(m)
|
||||
if statErr == nil && info.ModTime().After(cutoff) {
|
||||
candidate = m
|
||||
}
|
||||
}
|
||||
if candidate == "" {
|
||||
return nil
|
||||
}
|
||||
raw, err := os.ReadFile(candidate)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return parseSATVerboseProgress(string(raw))
|
||||
}
|
||||
|
||||
// parseSATVerboseProgress parses verbose.log content and returns display lines like:
|
||||
//
|
||||
// "PASS lscpu (234ms)"
|
||||
// "FAIL stress-ng (60.0s)"
|
||||
// "... sensors-after"
|
||||
func parseSATVerboseProgress(content string) []string {
|
||||
type step struct {
|
||||
name string
|
||||
rc int
|
||||
durationMs int
|
||||
done bool
|
||||
}
|
||||
|
||||
lines := strings.Split(content, "\n")
|
||||
var steps []step
|
||||
stepIdx := map[string]int{}
|
||||
|
||||
for i, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if idx := strings.Index(line, "] start "); idx >= 0 {
|
||||
name := strings.TrimSpace(line[idx+len("] start "):])
|
||||
if _, exists := stepIdx[name]; !exists {
|
||||
stepIdx[name] = len(steps)
|
||||
steps = append(steps, step{name: name})
|
||||
}
|
||||
} else if idx := strings.Index(line, "] finish "); idx >= 0 {
|
||||
name := strings.TrimSpace(line[idx+len("] finish "):])
|
||||
si, exists := stepIdx[name]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
steps[si].done = true
|
||||
for j := i + 1; j < len(lines) && j <= i+3; j++ {
|
||||
l := strings.TrimSpace(lines[j])
|
||||
if strings.HasPrefix(l, "rc: ") {
|
||||
steps[si].rc, _ = strconv.Atoi(strings.TrimPrefix(l, "rc: "))
|
||||
} else if strings.HasPrefix(l, "duration_ms: ") {
|
||||
steps[si].durationMs, _ = strconv.Atoi(strings.TrimPrefix(l, "duration_ms: "))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var result []string
|
||||
for _, s := range steps {
|
||||
display := cleanSATStepName(s.name)
|
||||
if s.done {
|
||||
status := "PASS"
|
||||
if s.rc != 0 {
|
||||
status = "FAIL"
|
||||
}
|
||||
result = append(result, fmt.Sprintf("%-4s %s (%s)", status, display, fmtDurMs(s.durationMs)))
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("... %s", display))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// cleanSATStepName strips leading digits and dash: "01-lscpu.log" → "lscpu".
|
||||
func cleanSATStepName(name string) string {
|
||||
name = strings.TrimSuffix(name, ".log")
|
||||
i := 0
|
||||
for i < len(name) && name[i] >= '0' && name[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
if i < len(name) && name[i] == '-' {
|
||||
name = name[i+1:]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func fmtDurMs(ms int) string {
|
||||
if ms < 1000 {
|
||||
return fmt.Sprintf("%dms", ms)
|
||||
}
|
||||
return fmt.Sprintf("%.1fs", float64(ms)/1000)
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package tui
|
||||
|
||||
import tea "github.com/charmbracelet/bubbletea"
|
||||
|
||||
func (m model) handleExportTargetsMenu() (tea.Model, tea.Cmd) {
|
||||
if len(m.targets) == 0 {
|
||||
return m, resultCmd("Export support bundle", "No removable filesystems found", nil, screenMain)
|
||||
}
|
||||
target := m.targets[m.cursor]
|
||||
m.selectedTarget = &target
|
||||
m.pendingAction = actionExportBundle
|
||||
m.screen = screenConfirm
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,386 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
// Component indices.
|
||||
const (
|
||||
hcGPU = 0
|
||||
hcMemory = 1
|
||||
hcStorage = 2
|
||||
hcCPU = 3
|
||||
)
|
||||
|
||||
// Cursor positions in Health Check screen.
|
||||
const (
|
||||
hcCurGPU = 0
|
||||
hcCurMemory = 1
|
||||
hcCurStorage = 2
|
||||
hcCurCPU = 3
|
||||
hcCurSelectAll = 4
|
||||
hcCurModeQuick = 5
|
||||
hcCurModeStd = 6
|
||||
hcCurModeExpr = 7
|
||||
hcCurRunAll = 8
|
||||
hcCurFanStress = 9
|
||||
hcCurTotal = 10
|
||||
)
|
||||
|
||||
// hcModeDurations maps mode index (0=Quick,1=Standard,2=Express) to GPU stress seconds.
|
||||
var hcModeDurations = [3]int{600, 3600, 28800}
|
||||
|
||||
// hcCPUDurations maps mode index to CPU stress-ng seconds.
|
||||
var hcCPUDurations = [3]int{60, 300, 900}
|
||||
|
||||
func (m model) enterHealthCheck() (tea.Model, tea.Cmd) {
|
||||
m.screen = screenHealthCheck
|
||||
if !m.hcInitialized {
|
||||
m.hcSel = [4]bool{true, true, true, true}
|
||||
m.hcMode = 0
|
||||
m.hcCursor = 0
|
||||
m.hcInitialized = true
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) updateHealthCheck(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch msg.String() {
|
||||
case "up", "k":
|
||||
if m.hcCursor > 0 {
|
||||
m.hcCursor--
|
||||
}
|
||||
case "down", "j":
|
||||
if m.hcCursor < hcCurTotal-1 {
|
||||
m.hcCursor++
|
||||
}
|
||||
case " ":
|
||||
switch m.hcCursor {
|
||||
case hcCurGPU, hcCurMemory, hcCurStorage, hcCurCPU:
|
||||
m.hcSel[m.hcCursor] = !m.hcSel[m.hcCursor]
|
||||
case hcCurSelectAll:
|
||||
allOn := m.hcSel[0] && m.hcSel[1] && m.hcSel[2] && m.hcSel[3]
|
||||
for i := range m.hcSel {
|
||||
m.hcSel[i] = !allOn
|
||||
}
|
||||
case hcCurModeQuick, hcCurModeStd, hcCurModeExpr:
|
||||
m.hcMode = m.hcCursor - hcCurModeQuick
|
||||
}
|
||||
case "enter":
|
||||
switch m.hcCursor {
|
||||
case hcCurGPU, hcCurMemory, hcCurStorage, hcCurCPU:
|
||||
return m.hcRunSingle(m.hcCursor)
|
||||
case hcCurSelectAll:
|
||||
allOn := m.hcSel[0] && m.hcSel[1] && m.hcSel[2] && m.hcSel[3]
|
||||
for i := range m.hcSel {
|
||||
m.hcSel[i] = !allOn
|
||||
}
|
||||
case hcCurModeQuick, hcCurModeStd, hcCurModeExpr:
|
||||
m.hcMode = m.hcCursor - hcCurModeQuick
|
||||
case hcCurRunAll:
|
||||
return m.hcRunAll()
|
||||
case hcCurFanStress:
|
||||
return m.hcRunFanStress()
|
||||
}
|
||||
case "g", "G":
|
||||
return m.hcRunSingle(hcGPU)
|
||||
case "m", "M":
|
||||
return m.hcRunSingle(hcMemory)
|
||||
case "s", "S":
|
||||
return m.hcRunSingle(hcStorage)
|
||||
case "c", "C":
|
||||
return m.hcRunSingle(hcCPU)
|
||||
case "r", "R":
|
||||
return m.hcRunAll()
|
||||
case "f", "F":
|
||||
return m.hcRunFanStress()
|
||||
case "a", "A":
|
||||
allOn := m.hcSel[0] && m.hcSel[1] && m.hcSel[2] && m.hcSel[3]
|
||||
for i := range m.hcSel {
|
||||
m.hcSel[i] = !allOn
|
||||
}
|
||||
case "1":
|
||||
m.hcMode = 0
|
||||
case "2":
|
||||
m.hcMode = 1
|
||||
case "3":
|
||||
m.hcMode = 2
|
||||
case "esc":
|
||||
m.screen = screenMain
|
||||
m.cursor = 0
|
||||
case "q", "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) hcRunSingle(idx int) (tea.Model, tea.Cmd) {
|
||||
switch idx {
|
||||
case hcGPU:
|
||||
if m.app.DetectGPUVendor() == "amd" {
|
||||
m.pendingAction = actionRunAMDGPUSAT
|
||||
m.screen = screenConfirm
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
m.nvidiaDurIdx = m.hcMode
|
||||
return m.enterNvidiaSATSetup()
|
||||
case hcMemory:
|
||||
m.pendingAction = actionRunMemorySAT
|
||||
m.screen = screenConfirm
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
case hcStorage:
|
||||
m.pendingAction = actionRunStorageSAT
|
||||
m.screen = screenConfirm
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
case hcCPU:
|
||||
m.pendingAction = actionRunCPUSAT
|
||||
m.screen = screenConfirm
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) hcRunFanStress() (tea.Model, tea.Cmd) {
|
||||
m.pendingAction = actionRunFanStress
|
||||
m.screen = screenConfirm
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// startGPUStressTest launches the GPU Platform Stress Test and nvtop concurrently.
|
||||
// nvtop occupies the full terminal as a live chart; the stress test runs in background.
|
||||
func (m model) startGPUStressTest() (tea.Model, tea.Cmd) {
|
||||
opts := hcFanStressOpts(m.hcMode, m.app)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
m.gpuStressCancel = cancel
|
||||
m.gpuStressAborted = false
|
||||
m.screen = screenGPUStressRunning
|
||||
m.nvidiaSATCursor = 0
|
||||
|
||||
stressCmd := func() tea.Msg {
|
||||
result, err := m.app.RunFanStressTestResult(ctx, opts)
|
||||
return gpuStressDoneMsg{title: result.Title, body: result.Body, err: err}
|
||||
}
|
||||
|
||||
nvtopPath, lookErr := exec.LookPath("nvtop")
|
||||
if lookErr != nil {
|
||||
return m, stressCmd
|
||||
}
|
||||
|
||||
return m, tea.Batch(
|
||||
stressCmd,
|
||||
tea.ExecProcess(exec.Command(nvtopPath), func(_ error) tea.Msg {
|
||||
return nvtopClosedMsg{}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
// updateGPUStressRunning handles keys on the GPU stress running screen.
|
||||
func (m model) updateGPUStressRunning(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch msg.String() {
|
||||
case "o", "O":
|
||||
nvtopPath, err := exec.LookPath("nvtop")
|
||||
if err != nil {
|
||||
return m, nil
|
||||
}
|
||||
return m, tea.ExecProcess(exec.Command(nvtopPath), func(_ error) tea.Msg {
|
||||
return nvtopClosedMsg{}
|
||||
})
|
||||
case "a", "A":
|
||||
if m.gpuStressCancel != nil {
|
||||
m.gpuStressCancel()
|
||||
m.gpuStressCancel = nil
|
||||
}
|
||||
m.gpuStressAborted = true
|
||||
m.screen = screenHealthCheck
|
||||
m.cursor = 0
|
||||
case "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func renderGPUStressRunning() string {
|
||||
return "GPU PLATFORM STRESS TEST\n\nTest is running...\n\n[o] Open nvtop [a] Abort test [ctrl+c] quit\n"
|
||||
}
|
||||
|
||||
func (m model) hcRunAll() (tea.Model, tea.Cmd) {
|
||||
for _, sel := range m.hcSel {
|
||||
if sel {
|
||||
m.pendingAction = actionRunAll
|
||||
m.screen = screenConfirm
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) executeRunAll() (tea.Model, tea.Cmd) {
|
||||
durationSec := hcModeDurations[m.hcMode]
|
||||
durationIdx := m.hcMode
|
||||
sel := m.hcSel
|
||||
app := m.app
|
||||
m.busy = true
|
||||
m.busyTitle = "Health Check"
|
||||
return m, func() tea.Msg {
|
||||
var parts []string
|
||||
if sel[hcGPU] {
|
||||
vendor := app.DetectGPUVendor()
|
||||
if vendor == "amd" {
|
||||
r, err := app.RunAMDAcceptancePackResult("")
|
||||
body := r.Body
|
||||
if err != nil {
|
||||
body += "\nERROR: " + err.Error()
|
||||
}
|
||||
parts = append(parts, "=== GPU (AMD) ===\n"+body)
|
||||
} else {
|
||||
gpus, err := app.ListNvidiaGPUs()
|
||||
if err != nil || len(gpus) == 0 {
|
||||
parts = append(parts, "=== GPU ===\nNo NVIDIA GPUs detected or driver not loaded.")
|
||||
} else {
|
||||
var indices []int
|
||||
sizeMB := 0
|
||||
for _, g := range gpus {
|
||||
indices = append(indices, g.Index)
|
||||
if sizeMB == 0 || g.MemoryMB < sizeMB {
|
||||
sizeMB = g.MemoryMB
|
||||
}
|
||||
}
|
||||
if sizeMB == 0 {
|
||||
sizeMB = 64
|
||||
}
|
||||
r, err := app.RunNvidiaAcceptancePackWithOptions(context.Background(), "", durationSec, sizeMB, indices)
|
||||
body := r.Body
|
||||
if err != nil {
|
||||
body += "\nERROR: " + err.Error()
|
||||
}
|
||||
parts = append(parts, "=== GPU ===\n"+body)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sel[hcMemory] {
|
||||
r, err := app.RunMemoryAcceptancePackResult("")
|
||||
body := r.Body
|
||||
if err != nil {
|
||||
body += "\nERROR: " + err.Error()
|
||||
}
|
||||
parts = append(parts, "=== MEMORY ===\n"+body)
|
||||
}
|
||||
if sel[hcStorage] {
|
||||
r, err := app.RunStorageAcceptancePackResult("")
|
||||
body := r.Body
|
||||
if err != nil {
|
||||
body += "\nERROR: " + err.Error()
|
||||
}
|
||||
parts = append(parts, "=== STORAGE ===\n"+body)
|
||||
}
|
||||
if sel[hcCPU] {
|
||||
cpuDur := hcCPUDurations[durationIdx]
|
||||
r, err := app.RunCPUAcceptancePackResult("", cpuDur)
|
||||
body := r.Body
|
||||
if err != nil {
|
||||
body += "\nERROR: " + err.Error()
|
||||
}
|
||||
parts = append(parts, "=== CPU ===\n"+body)
|
||||
}
|
||||
combined := strings.Join(parts, "\n\n")
|
||||
if combined == "" {
|
||||
combined = "No components selected."
|
||||
}
|
||||
return resultMsg{title: "Health Check", body: combined, back: screenHealthCheck}
|
||||
}
|
||||
}
|
||||
|
||||
func renderHealthCheck(m model) string {
|
||||
var b strings.Builder
|
||||
|
||||
fmt.Fprintln(&b, "HEALTH CHECK")
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, " Diagnostics:")
|
||||
fmt.Fprintln(&b)
|
||||
|
||||
type comp struct{ name, desc, key string }
|
||||
comps := []comp{
|
||||
{"GPU", "nvidia/amd auto-detect", "G"},
|
||||
{"MEMORY", "memtester", "M"},
|
||||
{"STORAGE", "smartctl + NVMe self-test", "S"},
|
||||
{"CPU", "audit diagnostics", "C"},
|
||||
}
|
||||
for i, c := range comps {
|
||||
pfx := " "
|
||||
if m.hcCursor == i {
|
||||
pfx = "> "
|
||||
}
|
||||
ch := "[ ]"
|
||||
if m.hcSel[i] {
|
||||
ch = "[x]"
|
||||
}
|
||||
fmt.Fprintf(&b, "%s%s %-8s %-28s [%s]\n", pfx, ch, c.name, c.desc, c.key)
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b, " ─────────────────────────────────────────────────")
|
||||
{
|
||||
pfx := " "
|
||||
if m.hcCursor == hcCurSelectAll {
|
||||
pfx = "> "
|
||||
}
|
||||
allOn := m.hcSel[0] && m.hcSel[1] && m.hcSel[2] && m.hcSel[3]
|
||||
ch := "[ ]"
|
||||
if allOn {
|
||||
ch = "[x]"
|
||||
}
|
||||
fmt.Fprintf(&b, "%s%s Select / Deselect All [A]\n", pfx, ch)
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, " Mode:")
|
||||
modes := []struct{ label, key string }{
|
||||
{"Quick", "1"},
|
||||
{"Standard", "2"},
|
||||
{"Express", "3"},
|
||||
}
|
||||
for i, mode := range modes {
|
||||
pfx := " "
|
||||
if m.hcCursor == hcCurModeQuick+i {
|
||||
pfx = "> "
|
||||
}
|
||||
radio := "( )"
|
||||
if m.hcMode == i {
|
||||
radio = "(*)"
|
||||
}
|
||||
fmt.Fprintf(&b, "%s%s %-10s [%s]\n", pfx, radio, mode.label, mode.key)
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b)
|
||||
{
|
||||
pfx := " "
|
||||
if m.hcCursor == hcCurRunAll {
|
||||
pfx = "> "
|
||||
}
|
||||
fmt.Fprintf(&b, "%s[ RUN ALL [R] ]\n", pfx)
|
||||
}
|
||||
|
||||
{
|
||||
pfx := " "
|
||||
if m.hcCursor == hcCurFanStress {
|
||||
pfx = "> "
|
||||
}
|
||||
fmt.Fprintf(&b, "%s[ GPU PLATFORM STRESS TEST [F] ] (thermal cycling, fan lag, throttle check)\n", pfx)
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, "─────────────────────────────────────────────────────────────────")
|
||||
fmt.Fprint(&b, "[↑↓] move [space/enter] toggle [letter] single test [R] run all [F] gpu stress [Esc] back")
|
||||
return b.String()
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func (m model) handleMainMenu() (tea.Model, tea.Cmd) {
|
||||
switch m.cursor {
|
||||
case 0: // Health Check
|
||||
return m.enterHealthCheck()
|
||||
case 1: // Export support bundle
|
||||
m.pendingAction = actionExportBundle
|
||||
m.busy = true
|
||||
m.busyTitle = "Export support bundle"
|
||||
return m, func() tea.Msg {
|
||||
targets, err := m.app.ListRemovableTargets()
|
||||
return exportTargetsMsg{targets: targets, err: err}
|
||||
}
|
||||
case 2: // Settings
|
||||
m.screen = screenSettings
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
case 3: // Exit
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func (m model) handleNetworkMenu() (tea.Model, tea.Cmd) {
|
||||
switch m.cursor {
|
||||
case 0:
|
||||
m.busy = true
|
||||
m.busyTitle = "Network status"
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.NetworkStatus()
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenNetwork}
|
||||
}
|
||||
case 1:
|
||||
m.busy = true
|
||||
m.busyTitle = "DHCP all interfaces"
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.DHCPAllResult()
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenNetwork}
|
||||
}
|
||||
case 2:
|
||||
m.pendingAction = actionDHCPOne
|
||||
m.busy = true
|
||||
m.busyTitle = "Interfaces"
|
||||
return m, func() tea.Msg {
|
||||
ifaces, err := m.app.ListInterfaces()
|
||||
return interfacesMsg{ifaces: ifaces, err: err}
|
||||
}
|
||||
case 3:
|
||||
m.pendingAction = actionStaticIPv4
|
||||
m.busy = true
|
||||
m.busyTitle = "Interfaces"
|
||||
return m, func() tea.Msg {
|
||||
ifaces, err := m.app.ListInterfaces()
|
||||
return interfacesMsg{ifaces: ifaces, err: err}
|
||||
}
|
||||
case 4:
|
||||
m.screen = screenSettings
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) handleInterfacePickMenu() (tea.Model, tea.Cmd) {
|
||||
if len(m.interfaces) == 0 {
|
||||
return m, resultCmd("interfaces", "No physical interfaces found", nil, screenNetwork)
|
||||
}
|
||||
m.selectedIface = m.interfaces[m.cursor].Name
|
||||
switch m.pendingAction {
|
||||
case actionDHCPOne:
|
||||
m.busy = true
|
||||
m.busyTitle = "DHCP on " + m.selectedIface
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.DHCPOneResult(m.selectedIface)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenNetwork}
|
||||
}
|
||||
case actionStaticIPv4:
|
||||
defaults := m.app.DefaultStaticIPv4FormFields(m.selectedIface)
|
||||
m.formFields = []formField{
|
||||
{Label: "IPv4 address", Value: defaults[0]},
|
||||
{Label: "Prefix", Value: defaults[1]},
|
||||
{Label: "Gateway", Value: strings.TrimSpace(defaults[2])},
|
||||
{Label: "DNS (space-separated)", Value: defaults[3]},
|
||||
}
|
||||
m.formIndex = 0
|
||||
m.screen = screenStaticForm
|
||||
return m, nil
|
||||
default:
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"bee/audit/internal/platform"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
var nvidiaDurationOptions = []struct {
|
||||
label string
|
||||
seconds int
|
||||
}{
|
||||
{"10 minutes", 600},
|
||||
{"1 hour", 3600},
|
||||
{"8 hours", 28800},
|
||||
{"24 hours", 86400},
|
||||
}
|
||||
|
||||
// enterNvidiaSATSetup resets the setup screen and starts loading GPU list.
|
||||
func (m model) enterNvidiaSATSetup() (tea.Model, tea.Cmd) {
|
||||
m.screen = screenNvidiaSATSetup
|
||||
m.nvidiaGPUs = nil
|
||||
m.nvidiaGPUSel = nil
|
||||
m.nvidiaDurIdx = 0
|
||||
m.nvidiaSATCursor = 0
|
||||
m.busy = true
|
||||
m.busyTitle = "NVIDIA SAT"
|
||||
return m, func() tea.Msg {
|
||||
gpus, err := m.app.ListNvidiaGPUs()
|
||||
return nvidiaGPUsMsg{gpus: gpus, err: err}
|
||||
}
|
||||
}
|
||||
|
||||
// handleNvidiaGPUsMsg processes the GPU list response.
|
||||
func (m model) handleNvidiaGPUsMsg(msg nvidiaGPUsMsg) (tea.Model, tea.Cmd) {
|
||||
m.busy = false
|
||||
m.busyTitle = ""
|
||||
if msg.err != nil {
|
||||
m.title = "NVIDIA SAT"
|
||||
m.body = fmt.Sprintf("Failed to list GPUs: %v", msg.err)
|
||||
m.prevScreen = screenHealthCheck
|
||||
m.screen = screenOutput
|
||||
return m, nil
|
||||
}
|
||||
m.nvidiaGPUs = msg.gpus
|
||||
m.nvidiaGPUSel = make([]bool, len(msg.gpus))
|
||||
for i := range m.nvidiaGPUSel {
|
||||
m.nvidiaGPUSel[i] = true // all selected by default
|
||||
}
|
||||
m.nvidiaSATCursor = 0
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// updateNvidiaSATSetup handles keys on the setup screen.
|
||||
func (m model) updateNvidiaSATSetup(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
numDur := len(nvidiaDurationOptions)
|
||||
numGPU := len(m.nvidiaGPUs)
|
||||
totalItems := numDur + numGPU + 2 // +2: Start, Cancel
|
||||
switch msg.String() {
|
||||
case "up", "k":
|
||||
if m.nvidiaSATCursor > 0 {
|
||||
m.nvidiaSATCursor--
|
||||
}
|
||||
case "down", "j":
|
||||
if m.nvidiaSATCursor < totalItems-1 {
|
||||
m.nvidiaSATCursor++
|
||||
}
|
||||
case " ":
|
||||
switch {
|
||||
case m.nvidiaSATCursor < numDur:
|
||||
m.nvidiaDurIdx = m.nvidiaSATCursor
|
||||
case m.nvidiaSATCursor < numDur+numGPU:
|
||||
i := m.nvidiaSATCursor - numDur
|
||||
m.nvidiaGPUSel[i] = !m.nvidiaGPUSel[i]
|
||||
}
|
||||
case "enter":
|
||||
startIdx := numDur + numGPU
|
||||
cancelIdx := startIdx + 1
|
||||
switch {
|
||||
case m.nvidiaSATCursor < numDur:
|
||||
m.nvidiaDurIdx = m.nvidiaSATCursor
|
||||
case m.nvidiaSATCursor < startIdx:
|
||||
i := m.nvidiaSATCursor - numDur
|
||||
m.nvidiaGPUSel[i] = !m.nvidiaGPUSel[i]
|
||||
case m.nvidiaSATCursor == startIdx:
|
||||
return m.startNvidiaSAT()
|
||||
case m.nvidiaSATCursor == cancelIdx:
|
||||
m.screen = screenHealthCheck
|
||||
m.cursor = 0
|
||||
}
|
||||
case "esc":
|
||||
m.screen = screenHealthCheck
|
||||
m.cursor = 0
|
||||
case "ctrl+c", "q":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// startNvidiaSAT launches the NVIDIA acceptance pack.
|
||||
func (m model) startNvidiaSAT() (tea.Model, tea.Cmd) {
|
||||
var selectedGPUs []platform.NvidiaGPU
|
||||
for i, sel := range m.nvidiaGPUSel {
|
||||
if sel {
|
||||
selectedGPUs = append(selectedGPUs, m.nvidiaGPUs[i])
|
||||
}
|
||||
}
|
||||
if len(selectedGPUs) == 0 {
|
||||
selectedGPUs = m.nvidiaGPUs // fallback: use all if none explicitly selected
|
||||
}
|
||||
|
||||
sizeMB := 0
|
||||
for _, g := range selectedGPUs {
|
||||
if sizeMB == 0 || g.MemoryMB < sizeMB {
|
||||
sizeMB = g.MemoryMB
|
||||
}
|
||||
}
|
||||
if sizeMB == 0 {
|
||||
sizeMB = 64
|
||||
}
|
||||
|
||||
var gpuIndices []int
|
||||
for _, g := range selectedGPUs {
|
||||
gpuIndices = append(gpuIndices, g.Index)
|
||||
}
|
||||
|
||||
durationSec := nvidiaDurationOptions[m.nvidiaDurIdx].seconds
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
m.nvidiaSATCancel = cancel
|
||||
m.nvidiaSATAborted = false
|
||||
m.screen = screenNvidiaSATRunning
|
||||
m.nvidiaSATCursor = 0
|
||||
|
||||
satCmd := func() tea.Msg {
|
||||
result, err := m.app.RunNvidiaAcceptancePackWithOptions(ctx, "", durationSec, sizeMB, gpuIndices)
|
||||
return nvidiaSATDoneMsg{title: result.Title, body: result.Body, err: err}
|
||||
}
|
||||
|
||||
return m, satCmd
|
||||
}
|
||||
|
||||
// updateNvidiaSATRunning handles keys on the running screen.
|
||||
func (m model) updateNvidiaSATRunning(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch msg.String() {
|
||||
case "a", "A":
|
||||
if m.nvidiaSATCancel != nil {
|
||||
m.nvidiaSATCancel()
|
||||
m.nvidiaSATCancel = nil
|
||||
}
|
||||
m.nvidiaSATAborted = true
|
||||
m.screen = screenHealthCheck
|
||||
m.cursor = 0
|
||||
case "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// renderNvidiaSATSetup renders the setup screen.
|
||||
func renderNvidiaSATSetup(m model) string {
|
||||
var b strings.Builder
|
||||
fmt.Fprintln(&b, "NVIDIA SAT")
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintln(&b, "Duration:")
|
||||
for i, opt := range nvidiaDurationOptions {
|
||||
radio := "( )"
|
||||
if i == m.nvidiaDurIdx {
|
||||
radio = "(*)"
|
||||
}
|
||||
prefix := " "
|
||||
if m.nvidiaSATCursor == i {
|
||||
prefix = "> "
|
||||
}
|
||||
fmt.Fprintf(&b, "%s%s %s\n", prefix, radio, opt.label)
|
||||
}
|
||||
fmt.Fprintln(&b)
|
||||
if len(m.nvidiaGPUs) == 0 {
|
||||
fmt.Fprintln(&b, "GPUs: (none detected)")
|
||||
} else {
|
||||
fmt.Fprintln(&b, "GPUs:")
|
||||
for i, gpu := range m.nvidiaGPUs {
|
||||
check := "[ ]"
|
||||
if m.nvidiaGPUSel[i] {
|
||||
check = "[x]"
|
||||
}
|
||||
prefix := " "
|
||||
if m.nvidiaSATCursor == len(nvidiaDurationOptions)+i {
|
||||
prefix = "> "
|
||||
}
|
||||
fmt.Fprintf(&b, "%s%s %d: %s (%d MB)\n", prefix, check, gpu.Index, gpu.Name, gpu.MemoryMB)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&b)
|
||||
startIdx := len(nvidiaDurationOptions) + len(m.nvidiaGPUs)
|
||||
startPfx := " "
|
||||
cancelPfx := " "
|
||||
if m.nvidiaSATCursor == startIdx {
|
||||
startPfx = "> "
|
||||
}
|
||||
if m.nvidiaSATCursor == startIdx+1 {
|
||||
cancelPfx = "> "
|
||||
}
|
||||
fmt.Fprintf(&b, "%sStart\n", startPfx)
|
||||
fmt.Fprintf(&b, "%sCancel\n", cancelPfx)
|
||||
fmt.Fprintln(&b)
|
||||
b.WriteString("[↑/↓] move [space] toggle [enter] select [esc] cancel\n")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// renderNvidiaSATRunning renders the running screen.
|
||||
func renderNvidiaSATRunning() string {
|
||||
return "NVIDIA SAT\n\nTest is running...\n\n[a] Abort test [ctrl+c] quit\n"
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"bee/audit/internal/platform"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func (m model) handleServicesMenu() (tea.Model, tea.Cmd) {
|
||||
if len(m.services) == 0 {
|
||||
return m, resultCmd("Services", "No bee-* services found.", nil, screenSettings)
|
||||
}
|
||||
m.selectedService = m.services[m.cursor]
|
||||
m.screen = screenServiceAction
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) handleServiceActionMenu() (tea.Model, tea.Cmd) {
|
||||
action := m.serviceMenu[m.cursor]
|
||||
if action == "back" {
|
||||
m.screen = screenServices
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
|
||||
m.busy = true
|
||||
m.busyTitle = "service: " + m.selectedService
|
||||
return m, func() tea.Msg {
|
||||
switch action {
|
||||
case "Status":
|
||||
result, err := m.app.ServiceStatusResult(m.selectedService)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenServiceAction}
|
||||
case "Restart":
|
||||
result, err := m.app.ServiceActionResult(m.selectedService, platform.ServiceRestart)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenServiceAction}
|
||||
case "Start":
|
||||
result, err := m.app.ServiceActionResult(m.selectedService, platform.ServiceStart)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenServiceAction}
|
||||
case "Stop":
|
||||
result, err := m.app.ServiceActionResult(m.selectedService, platform.ServiceStop)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenServiceAction}
|
||||
default:
|
||||
return resultMsg{title: "Service", body: "Unknown action.", back: screenServiceAction}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package tui
|
||||
|
||||
import tea "github.com/charmbracelet/bubbletea"
|
||||
|
||||
func (m model) handleSettingsMenu() (tea.Model, tea.Cmd) {
|
||||
switch m.cursor {
|
||||
case 0: // Network
|
||||
m.screen = screenNetwork
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
case 1: // Services
|
||||
m.busy = true
|
||||
m.busyTitle = "Services"
|
||||
return m, func() tea.Msg {
|
||||
services, err := m.app.ListBeeServices()
|
||||
return servicesMsg{services: services, err: err}
|
||||
}
|
||||
case 2: // Re-run audit
|
||||
m.busy = true
|
||||
m.busyTitle = "Re-run audit"
|
||||
runtimeMode := m.runtimeMode
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.RunAuditNow(runtimeMode)
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenSettings}
|
||||
}
|
||||
case 3: // Run self-check
|
||||
m.busy = true
|
||||
m.busyTitle = "Self-check"
|
||||
return m, func() tea.Msg {
|
||||
result, err := m.app.RunRuntimePreflightResult()
|
||||
return resultMsg{title: result.Title, body: result.Body, err: err, back: screenSettings}
|
||||
}
|
||||
case 4: // Runtime issues
|
||||
m.busy = true
|
||||
m.busyTitle = "Runtime issues"
|
||||
return m, func() tea.Msg {
|
||||
result := m.app.RuntimeHealthResult()
|
||||
return resultMsg{title: result.Title, body: result.Body, back: screenSettings}
|
||||
}
|
||||
case 5: // Audit logs
|
||||
m.busy = true
|
||||
m.busyTitle = "Audit logs"
|
||||
return m, func() tea.Msg {
|
||||
result := m.app.AuditLogTailResult()
|
||||
return resultMsg{title: result.Title, body: result.Body, back: screenSettings}
|
||||
}
|
||||
case 6: // Check tools
|
||||
m.busy = true
|
||||
m.busyTitle = "Check tools"
|
||||
return m, func() tea.Msg {
|
||||
result := m.app.ToolCheckResult([]string{
|
||||
"dmidecode", "smartctl", "nvme", "ipmitool", "lspci",
|
||||
"ethtool", "bee", "nvidia-smi", "bee-gpu-stress",
|
||||
"memtester", "dhclient", "lsblk", "mount",
|
||||
})
|
||||
return resultMsg{title: result.Title, body: result.Body, back: screenSettings}
|
||||
}
|
||||
case 7: // Back
|
||||
m.screen = screenMain
|
||||
m.cursor = 0
|
||||
return m, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"bee/audit/internal/app"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func (m model) refreshSnapshotCmd() tea.Cmd {
|
||||
if m.app == nil {
|
||||
return nil
|
||||
}
|
||||
return func() tea.Msg {
|
||||
return snapshotMsg{
|
||||
banner: m.app.MainBanner(),
|
||||
panel: m.app.LoadHardwarePanel(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func shouldRefreshSnapshot(prev, next model) bool {
|
||||
return prev.screen != next.screen || prev.busy != next.busy
|
||||
}
|
||||
|
||||
func emptySnapshot() snapshotMsg {
|
||||
return snapshotMsg{
|
||||
banner: "",
|
||||
panel: app.HardwarePanelData{},
|
||||
}
|
||||
}
|
||||
@@ -1,628 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"bee/audit/internal/app"
|
||||
"bee/audit/internal/platform"
|
||||
"bee/audit/internal/runtimeenv"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func newTestModel() model {
|
||||
return newModel(app.New(platform.New()), runtimeenv.ModeLocal)
|
||||
}
|
||||
|
||||
func sendKey(t *testing.T, m model, key tea.KeyType) model {
|
||||
t.Helper()
|
||||
|
||||
next, _ := m.Update(tea.KeyMsg{Type: key})
|
||||
return next.(model)
|
||||
}
|
||||
|
||||
func TestUpdateMainMenuCursorNavigation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
|
||||
m = sendKey(t, m, tea.KeyDown)
|
||||
if m.cursor != 1 {
|
||||
t.Fatalf("cursor=%d want 1 after down", m.cursor)
|
||||
}
|
||||
|
||||
m = sendKey(t, m, tea.KeyDown)
|
||||
if m.cursor != 2 {
|
||||
t.Fatalf("cursor=%d want 2 after second down", m.cursor)
|
||||
}
|
||||
|
||||
m = sendKey(t, m, tea.KeyUp)
|
||||
if m.cursor != 1 {
|
||||
t.Fatalf("cursor=%d want 1 after up", m.cursor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateMainMenuEnterActions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
cursor int
|
||||
wantScreen screen
|
||||
wantBusy bool
|
||||
wantCmd bool
|
||||
}{
|
||||
{name: "health_check", cursor: 0, wantScreen: screenHealthCheck, wantCmd: true},
|
||||
{name: "export", cursor: 1, wantScreen: screenMain, wantBusy: true, wantCmd: true},
|
||||
{name: "settings", cursor: 2, wantScreen: screenSettings, wantCmd: true},
|
||||
{name: "exit", cursor: 3, wantScreen: screenMain, wantCmd: true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.cursor = test.cursor
|
||||
|
||||
next, cmd := m.Update(tea.KeyMsg{Type: tea.KeyEnter})
|
||||
got := next.(model)
|
||||
|
||||
if got.screen != test.wantScreen {
|
||||
t.Fatalf("screen=%q want %q", got.screen, test.wantScreen)
|
||||
}
|
||||
if got.busy != test.wantBusy {
|
||||
t.Fatalf("busy=%v want %v", got.busy, test.wantBusy)
|
||||
}
|
||||
if (cmd != nil) != test.wantCmd {
|
||||
t.Fatalf("cmd present=%v want %v", cmd != nil, test.wantCmd)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateConfirmCancelViaKeys(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenConfirm
|
||||
m.pendingAction = actionRunMemorySAT
|
||||
|
||||
next, _ := m.Update(tea.KeyMsg{Type: tea.KeyRight})
|
||||
got := next.(model)
|
||||
if got.cursor != 1 {
|
||||
t.Fatalf("cursor=%d want 1 after right", got.cursor)
|
||||
}
|
||||
|
||||
next, _ = got.Update(tea.KeyMsg{Type: tea.KeyEnter})
|
||||
got = next.(model)
|
||||
if got.screen != screenHealthCheck {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenHealthCheck)
|
||||
}
|
||||
if got.cursor != 0 {
|
||||
t.Fatalf("cursor=%d want 0 after cancel", got.cursor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMainMenuSimpleTransitions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
cursor int
|
||||
wantScreen screen
|
||||
}{
|
||||
{name: "health_check", cursor: 0, wantScreen: screenHealthCheck},
|
||||
{name: "settings", cursor: 2, wantScreen: screenSettings},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.cursor = test.cursor
|
||||
|
||||
next, cmd := m.handleMainMenu()
|
||||
got := next.(model)
|
||||
|
||||
if cmd != nil {
|
||||
t.Fatalf("expected nil cmd for %s", test.name)
|
||||
}
|
||||
if got.screen != test.wantScreen {
|
||||
t.Fatalf("screen=%q want %q", got.screen, test.wantScreen)
|
||||
}
|
||||
if got.cursor != 0 {
|
||||
t.Fatalf("cursor=%d want 0", got.cursor)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMainMenuExportSetsBusy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.cursor = 1 // Export support bundle
|
||||
|
||||
next, cmd := m.handleMainMenu()
|
||||
got := next.(model)
|
||||
|
||||
if !got.busy {
|
||||
t.Fatal("busy=false for export")
|
||||
}
|
||||
if cmd == nil {
|
||||
t.Fatal("expected async cmd for export")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMainViewRendersTwoColumns(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.cursor = 1
|
||||
|
||||
view := m.View()
|
||||
for _, want := range []string{
|
||||
"bee",
|
||||
"Health Check",
|
||||
"> Export support bundle",
|
||||
"Settings",
|
||||
"Exit",
|
||||
"│",
|
||||
"[↑↓] move",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEscapeNavigation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
screen screen
|
||||
wantScreen screen
|
||||
}{
|
||||
{name: "network to settings", screen: screenNetwork, wantScreen: screenSettings},
|
||||
{name: "services to settings", screen: screenServices, wantScreen: screenSettings},
|
||||
{name: "settings to main", screen: screenSettings, wantScreen: screenMain},
|
||||
{name: "service action to services", screen: screenServiceAction, wantScreen: screenServices},
|
||||
{name: "export targets to main", screen: screenExportTargets, wantScreen: screenMain},
|
||||
{name: "interface pick to network", screen: screenInterfacePick, wantScreen: screenNetwork},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = test.screen
|
||||
m.cursor = 3
|
||||
|
||||
next, _ := m.updateKey(tea.KeyMsg{Type: tea.KeyEsc})
|
||||
got := next.(model)
|
||||
|
||||
if got.screen != test.wantScreen {
|
||||
t.Fatalf("screen=%q want %q", got.screen, test.wantScreen)
|
||||
}
|
||||
if got.cursor != 0 {
|
||||
t.Fatalf("cursor=%d want 0", got.cursor)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckEscReturnsToMain(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenHealthCheck
|
||||
m.hcCursor = 3
|
||||
|
||||
next, _ := m.updateHealthCheck(tea.KeyMsg{Type: tea.KeyEsc})
|
||||
got := next.(model)
|
||||
|
||||
if got.screen != screenMain {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenMain)
|
||||
}
|
||||
if got.cursor != 0 {
|
||||
t.Fatalf("cursor=%d want 0", got.cursor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutputScreenReturnsToPreviousScreen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenOutput
|
||||
m.prevScreen = screenNetwork
|
||||
m.title = "title"
|
||||
m.body = "body"
|
||||
|
||||
next, _ := m.updateKey(tea.KeyMsg{Type: tea.KeyEnter})
|
||||
got := next.(model)
|
||||
|
||||
if got.screen != screenNetwork {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenNetwork)
|
||||
}
|
||||
if got.title != "" || got.body != "" {
|
||||
t.Fatalf("expected output state cleared, got title=%q body=%q", got.title, got.body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckGPUOpensNvidiaSATSetup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenHealthCheck
|
||||
m.hcInitialized = true
|
||||
m.hcSel = [4]bool{true, true, true, true}
|
||||
|
||||
next, cmd := m.hcRunSingle(hcGPU)
|
||||
got := next.(model)
|
||||
|
||||
if cmd == nil {
|
||||
t.Fatal("expected non-nil cmd (GPU list loader)")
|
||||
}
|
||||
if got.screen != screenNvidiaSATSetup {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenNvidiaSATSetup)
|
||||
}
|
||||
|
||||
// esc from setup returns to health check
|
||||
next, _ = got.updateNvidiaSATSetup(tea.KeyMsg{Type: tea.KeyEsc})
|
||||
got = next.(model)
|
||||
if got.screen != screenHealthCheck {
|
||||
t.Fatalf("screen after esc=%q want %q", got.screen, screenHealthCheck)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHealthCheckRunSingleMapsActions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
idx int
|
||||
want actionKind
|
||||
}{
|
||||
{idx: hcMemory, want: actionRunMemorySAT},
|
||||
{idx: hcStorage, want: actionRunStorageSAT},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
m := newTestModel()
|
||||
m.screen = screenHealthCheck
|
||||
m.hcInitialized = true
|
||||
|
||||
next, _ := m.hcRunSingle(test.idx)
|
||||
got := next.(model)
|
||||
if got.pendingAction != test.want {
|
||||
t.Fatalf("idx=%d pendingAction=%q want %q", test.idx, got.pendingAction, test.want)
|
||||
}
|
||||
if got.screen != screenConfirm {
|
||||
t.Fatalf("idx=%d screen=%q want %q", test.idx, got.screen, screenConfirm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportTargetSelectionOpensConfirm(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenExportTargets
|
||||
m.targets = []platform.RemovableTarget{{Device: "/dev/sdb1", FSType: "vfat", Size: "16G"}}
|
||||
|
||||
next, cmd := m.handleExportTargetsMenu()
|
||||
got := next.(model)
|
||||
|
||||
if cmd != nil {
|
||||
t.Fatal("expected nil cmd")
|
||||
}
|
||||
if got.screen != screenConfirm {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenConfirm)
|
||||
}
|
||||
if got.pendingAction != actionExportBundle {
|
||||
t.Fatalf("pendingAction=%q want %q", got.pendingAction, actionExportBundle)
|
||||
}
|
||||
if got.selectedTarget == nil || got.selectedTarget.Device != "/dev/sdb1" {
|
||||
t.Fatalf("selectedTarget=%+v want /dev/sdb1", got.selectedTarget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterfacePickStaticIPv4OpensForm(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.pendingAction = actionStaticIPv4
|
||||
m.interfaces = []platform.InterfaceInfo{{Name: "eth0"}}
|
||||
|
||||
next, cmd := m.handleInterfacePickMenu()
|
||||
got := next.(model)
|
||||
|
||||
if cmd != nil {
|
||||
t.Fatal("expected nil cmd")
|
||||
}
|
||||
if got.screen != screenStaticForm {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenStaticForm)
|
||||
}
|
||||
if got.selectedIface != "eth0" {
|
||||
t.Fatalf("selectedIface=%q want eth0", got.selectedIface)
|
||||
}
|
||||
if len(got.formFields) != 4 {
|
||||
t.Fatalf("len(formFields)=%d want 4", len(got.formFields))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResultMsgUsesExplicitBackScreen(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenConfirm
|
||||
|
||||
next, _ := m.Update(resultMsg{title: "done", body: "ok", back: screenNetwork})
|
||||
got := next.(model)
|
||||
|
||||
if got.screen != screenOutput {
|
||||
t.Fatalf("screen=%q want %q", got.screen, screenOutput)
|
||||
}
|
||||
if got.prevScreen != screenNetwork {
|
||||
t.Fatalf("prevScreen=%q want %q", got.prevScreen, screenNetwork)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfirmCancelTarget(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
|
||||
m.pendingAction = actionExportBundle
|
||||
if got := m.confirmCancelTarget(); got != screenExportTargets {
|
||||
t.Fatalf("export cancel target=%q want %q", got, screenExportTargets)
|
||||
}
|
||||
|
||||
m.pendingAction = actionRunAll
|
||||
if got := m.confirmCancelTarget(); got != screenHealthCheck {
|
||||
t.Fatalf("run all cancel target=%q want %q", got, screenHealthCheck)
|
||||
}
|
||||
|
||||
m.pendingAction = actionRunMemorySAT
|
||||
if got := m.confirmCancelTarget(); got != screenHealthCheck {
|
||||
t.Fatalf("memory sat cancel target=%q want %q", got, screenHealthCheck)
|
||||
}
|
||||
|
||||
m.pendingAction = actionRunStorageSAT
|
||||
if got := m.confirmCancelTarget(); got != screenHealthCheck {
|
||||
t.Fatalf("storage sat cancel target=%q want %q", got, screenHealthCheck)
|
||||
}
|
||||
|
||||
m.pendingAction = actionNone
|
||||
if got := m.confirmCancelTarget(); got != screenMain {
|
||||
t.Fatalf("default cancel target=%q want %q", got, screenMain)
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewBusyStateIsMinimal(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.busy = true
|
||||
|
||||
view := m.View()
|
||||
want := "bee\n\nWorking...\n\n[ctrl+c] quit\n"
|
||||
if view != want {
|
||||
t.Fatalf("busy view mismatch\nwant:\n%s\ngot:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewBusyStateUsesBusyTitle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.busy = true
|
||||
m.busyTitle = "Export support bundle"
|
||||
|
||||
view := m.View()
|
||||
|
||||
for _, want := range []string{
|
||||
"Export support bundle",
|
||||
"Working...",
|
||||
"[ctrl+c] quit",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewOutputScreenRendersBodyAndBackHint(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenOutput
|
||||
m.title = "Run audit"
|
||||
m.body = "audit output: /appdata/bee/export/bee-audit.json\n"
|
||||
|
||||
view := m.View()
|
||||
|
||||
for _, want := range []string{
|
||||
"Run audit",
|
||||
"audit output: /appdata/bee/export/bee-audit.json",
|
||||
"[enter/esc] back [ctrl+c] quit",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewRendersBannerModuleAboveScreenBody(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.banner = "System: Demo Server\nIP: 10.0.0.10"
|
||||
m.width = 60
|
||||
|
||||
view := m.View()
|
||||
|
||||
for _, want := range []string{
|
||||
"┌ MOTD ",
|
||||
"System: Demo Server",
|
||||
"IP: 10.0.0.10",
|
||||
"Health Check",
|
||||
"Export support bundle",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotMsgUpdatesBannerAndPanel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
|
||||
next, cmd := m.Update(snapshotMsg{
|
||||
banner: "System: Demo",
|
||||
panel: app.HardwarePanelData{
|
||||
Header: []string{"Demo header"},
|
||||
Rows: []app.ComponentRow{
|
||||
{Key: "CPU", Status: "PASS", Detail: "ok"},
|
||||
},
|
||||
},
|
||||
})
|
||||
got := next.(model)
|
||||
|
||||
if cmd != nil {
|
||||
t.Fatal("expected nil cmd")
|
||||
}
|
||||
if got.banner != "System: Demo" {
|
||||
t.Fatalf("banner=%q want %q", got.banner, "System: Demo")
|
||||
}
|
||||
if len(got.panel.Rows) != 1 || got.panel.Rows[0].Key != "CPU" {
|
||||
t.Fatalf("panel rows=%+v", got.panel.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewExportTargetsRendersDeviceMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenExportTargets
|
||||
m.targets = []platform.RemovableTarget{
|
||||
{
|
||||
Device: "/dev/sdb1",
|
||||
FSType: "vfat",
|
||||
Size: "29G",
|
||||
Label: "BEEUSB",
|
||||
Mountpoint: "/media/bee",
|
||||
},
|
||||
}
|
||||
|
||||
view := m.View()
|
||||
|
||||
for _, want := range []string{
|
||||
"Export support bundle",
|
||||
"Select removable filesystem",
|
||||
"> /dev/sdb1 [vfat 29G] label=BEEUSB mounted=/media/bee",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewStaticFormRendersFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenStaticForm
|
||||
m.selectedIface = "enp1s0"
|
||||
m.formFields = []formField{
|
||||
{Label: "Address", Value: "192.0.2.10/24"},
|
||||
{Label: "Gateway", Value: "192.0.2.1"},
|
||||
{Label: "DNS", Value: "1.1.1.1"},
|
||||
}
|
||||
m.formIndex = 1
|
||||
|
||||
view := m.View()
|
||||
|
||||
for _, want := range []string{
|
||||
"Static IPv4: enp1s0",
|
||||
" Address: 192.0.2.10/24",
|
||||
"> Gateway: 192.0.2.1",
|
||||
" DNS: 1.1.1.1",
|
||||
"[tab/↑/↓] move [enter] next/submit [backspace] delete [esc] cancel",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestViewConfirmScreenMatchesPendingExport(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.screen = screenConfirm
|
||||
m.pendingAction = actionExportBundle
|
||||
m.selectedTarget = &platform.RemovableTarget{Device: "/dev/sdb1"}
|
||||
|
||||
view := m.View()
|
||||
|
||||
for _, want := range []string{
|
||||
"Export support bundle",
|
||||
"Copy support bundle to /dev/sdb1?",
|
||||
"> Confirm",
|
||||
" Cancel",
|
||||
} {
|
||||
if !strings.Contains(view, want) {
|
||||
t.Fatalf("view missing %q\nview:\n%s", want, view)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResultMsgClearsBusyAndPendingAction(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
m.busy = true
|
||||
m.busyTitle = "Export support bundle"
|
||||
m.pendingAction = actionExportBundle
|
||||
m.screen = screenConfirm
|
||||
|
||||
next, _ := m.Update(resultMsg{title: "Export support bundle", body: "done", back: screenMain})
|
||||
got := next.(model)
|
||||
|
||||
if got.busy {
|
||||
t.Fatal("busy=true want false")
|
||||
}
|
||||
if got.busyTitle != "" {
|
||||
t.Fatalf("busyTitle=%q want empty", got.busyTitle)
|
||||
}
|
||||
if got.pendingAction != actionNone {
|
||||
t.Fatalf("pendingAction=%q want empty", got.pendingAction)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResultMsgErrorWithoutBodyFormatsCleanly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := newTestModel()
|
||||
|
||||
next, _ := m.Update(resultMsg{title: "Export support bundle", err: assertErr("boom"), back: screenMain})
|
||||
got := next.(model)
|
||||
|
||||
if got.body != "ERROR: boom" {
|
||||
t.Fatalf("body=%q want %q", got.body, "ERROR: boom")
|
||||
}
|
||||
}
|
||||
|
||||
type assertErr string
|
||||
|
||||
func (e assertErr) Error() string { return string(e) }
|
||||
@@ -1,205 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"bee/audit/internal/app"
|
||||
"bee/audit/internal/platform"
|
||||
"bee/audit/internal/runtimeenv"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
type screen string
|
||||
|
||||
const (
|
||||
screenMain screen = "main"
|
||||
screenHealthCheck screen = "health_check"
|
||||
screenSettings screen = "settings"
|
||||
screenNetwork screen = "network"
|
||||
screenInterfacePick screen = "interface_pick"
|
||||
screenServices screen = "services"
|
||||
screenServiceAction screen = "service_action"
|
||||
screenExportTargets screen = "export_targets"
|
||||
screenOutput screen = "output"
|
||||
screenStaticForm screen = "static_form"
|
||||
screenConfirm screen = "confirm"
|
||||
screenNvidiaSATSetup screen = "nvidia_sat_setup"
|
||||
screenNvidiaSATRunning screen = "nvidia_sat_running"
|
||||
screenGPUStressRunning screen = "gpu_stress_running"
|
||||
)
|
||||
|
||||
type actionKind string
|
||||
|
||||
const (
|
||||
actionNone actionKind = ""
|
||||
actionDHCPOne actionKind = "dhcp_one"
|
||||
actionStaticIPv4 actionKind = "static_ipv4"
|
||||
actionExportBundle actionKind = "export_bundle"
|
||||
actionRunAll actionKind = "run_all"
|
||||
actionRunMemorySAT actionKind = "run_memory_sat"
|
||||
actionRunStorageSAT actionKind = "run_storage_sat"
|
||||
actionRunCPUSAT actionKind = "run_cpu_sat"
|
||||
actionRunAMDGPUSAT actionKind = "run_amd_gpu_sat"
|
||||
actionRunFanStress actionKind = "run_fan_stress"
|
||||
)
|
||||
|
||||
type model struct {
|
||||
app *app.App
|
||||
runtimeMode runtimeenv.Mode
|
||||
|
||||
screen screen
|
||||
prevScreen screen
|
||||
cursor int
|
||||
busy bool
|
||||
busyTitle string
|
||||
title string
|
||||
body string
|
||||
mainMenu []string
|
||||
settingsMenu []string
|
||||
networkMenu []string
|
||||
serviceMenu []string
|
||||
|
||||
services []string
|
||||
interfaces []platform.InterfaceInfo
|
||||
targets []platform.RemovableTarget
|
||||
selectedService string
|
||||
selectedIface string
|
||||
selectedTarget *platform.RemovableTarget
|
||||
pendingAction actionKind
|
||||
|
||||
formFields []formField
|
||||
formIndex int
|
||||
|
||||
// Hardware panel (right column)
|
||||
panel app.HardwarePanelData
|
||||
panelFocus bool
|
||||
panelCursor int
|
||||
banner string
|
||||
|
||||
// Health Check screen
|
||||
hcSel [4]bool
|
||||
hcMode int
|
||||
hcCursor int
|
||||
hcInitialized bool
|
||||
|
||||
// NVIDIA SAT setup
|
||||
nvidiaGPUs []platform.NvidiaGPU
|
||||
nvidiaGPUSel []bool
|
||||
nvidiaDurIdx int
|
||||
nvidiaSATCursor int
|
||||
|
||||
// NVIDIA SAT running
|
||||
nvidiaSATCancel func()
|
||||
nvidiaSATAborted bool
|
||||
|
||||
// GPU Platform Stress Test running
|
||||
gpuStressCancel func()
|
||||
gpuStressAborted bool
|
||||
|
||||
// SAT verbose progress (CPU / Memory / Storage / AMD GPU)
|
||||
progressLines []string
|
||||
progressPrefix string
|
||||
progressSince time.Time
|
||||
|
||||
// Terminal size
|
||||
width int
|
||||
}
|
||||
|
||||
type formField struct {
|
||||
Label string
|
||||
Value string
|
||||
}
|
||||
|
||||
func Run(application *app.App, runtimeMode runtimeenv.Mode) error {
|
||||
options := []tea.ProgramOption{}
|
||||
if runtimeMode != runtimeenv.ModeLiveCD {
|
||||
options = append(options, tea.WithAltScreen())
|
||||
}
|
||||
program := tea.NewProgram(newModel(application, runtimeMode), options...)
|
||||
_, err := program.Run()
|
||||
return err
|
||||
}
|
||||
|
||||
func newModel(application *app.App, runtimeMode runtimeenv.Mode) model {
|
||||
return model{
|
||||
app: application,
|
||||
runtimeMode: runtimeMode,
|
||||
screen: screenMain,
|
||||
mainMenu: []string{
|
||||
"Health Check",
|
||||
"Export support bundle",
|
||||
"Settings",
|
||||
"Exit",
|
||||
},
|
||||
settingsMenu: []string{
|
||||
"Network",
|
||||
"Services",
|
||||
"Re-run audit",
|
||||
"Run self-check",
|
||||
"Runtime issues",
|
||||
"Audit logs",
|
||||
"Check tools",
|
||||
"Back",
|
||||
},
|
||||
networkMenu: []string{
|
||||
"Show status",
|
||||
"DHCP on all interfaces",
|
||||
"DHCP on one interface",
|
||||
"Set static IPv4",
|
||||
"Back",
|
||||
},
|
||||
serviceMenu: []string{
|
||||
"Status",
|
||||
"Restart",
|
||||
"Start",
|
||||
"Stop",
|
||||
"Back",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m model) Init() tea.Cmd {
|
||||
return m.refreshSnapshotCmd()
|
||||
}
|
||||
|
||||
func (m model) confirmBody() (string, string) {
|
||||
switch m.pendingAction {
|
||||
case actionExportBundle:
|
||||
if m.selectedTarget == nil {
|
||||
return "Export support bundle", "No target selected"
|
||||
}
|
||||
return "Export support bundle", "Copy support bundle to " + m.selectedTarget.Device + "?"
|
||||
case actionRunAll:
|
||||
modes := []string{"Quick", "Standard", "Express"}
|
||||
mode := modes[m.hcMode]
|
||||
var sel []string
|
||||
names := []string{"GPU", "Memory", "Storage", "CPU"}
|
||||
for i, on := range m.hcSel {
|
||||
if on {
|
||||
sel = append(sel, names[i])
|
||||
}
|
||||
}
|
||||
if len(sel) == 0 {
|
||||
return "Health Check", "No components selected."
|
||||
}
|
||||
return "Health Check", "Run: " + strings.Join(sel, " + ") + "\nMode: " + mode
|
||||
case actionRunMemorySAT:
|
||||
return "Memory test", "Run memtester?"
|
||||
case actionRunStorageSAT:
|
||||
return "Storage test", "Run storage diagnostic pack?"
|
||||
case actionRunCPUSAT:
|
||||
modes := []string{"Quick (60s)", "Standard (300s)", "Express (900s)"}
|
||||
return "CPU test", "Run stress-ng? Mode: " + modes[m.hcMode]
|
||||
case actionRunAMDGPUSAT:
|
||||
return "AMD GPU test", "Run AMD GPU diagnostic pack (rocm-smi)?"
|
||||
case actionRunFanStress:
|
||||
modes := []string{"Quick (2×2min)", "Standard (2×5min)", "Express (2×10min)"}
|
||||
return "GPU Platform Stress Test", "Two-phase GPU thermal cycling test.\n" +
|
||||
"Monitors fans, temps, power — detects throttling.\n" +
|
||||
"Mode: " + modes[m.hcMode] + "\n\nAll NVIDIA GPUs will be stressed."
|
||||
default:
|
||||
return "Confirm", "Proceed?"
|
||||
}
|
||||
}
|
||||
@@ -1,284 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
switch msg := msg.(type) {
|
||||
case tea.WindowSizeMsg:
|
||||
m.width = msg.Width
|
||||
return m, nil
|
||||
case tea.KeyMsg:
|
||||
if m.busy {
|
||||
if msg.String() == "ctrl+c" {
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
next, cmd := m.updateKey(msg)
|
||||
nextModel := next.(model)
|
||||
if shouldRefreshSnapshot(m, nextModel) {
|
||||
return nextModel, tea.Batch(cmd, nextModel.refreshSnapshotCmd())
|
||||
}
|
||||
return nextModel, cmd
|
||||
case satProgressMsg:
|
||||
if m.busy && m.progressPrefix != "" {
|
||||
if len(msg.lines) > 0 {
|
||||
m.progressLines = msg.lines
|
||||
}
|
||||
return m, pollSATProgress(m.progressPrefix, m.progressSince)
|
||||
}
|
||||
return m, nil
|
||||
case snapshotMsg:
|
||||
m.banner = msg.banner
|
||||
m.panel = msg.panel
|
||||
return m, nil
|
||||
case resultMsg:
|
||||
m.busy = false
|
||||
m.busyTitle = ""
|
||||
m.progressLines = nil
|
||||
m.progressPrefix = ""
|
||||
m.title = msg.title
|
||||
if msg.err != nil {
|
||||
body := strings.TrimSpace(msg.body)
|
||||
if body == "" {
|
||||
m.body = fmt.Sprintf("ERROR: %v", msg.err)
|
||||
} else {
|
||||
m.body = fmt.Sprintf("%s\n\nERROR: %v", body, msg.err)
|
||||
}
|
||||
} else {
|
||||
m.body = msg.body
|
||||
}
|
||||
m.pendingAction = actionNone
|
||||
if msg.back != "" {
|
||||
m.prevScreen = msg.back
|
||||
} else {
|
||||
m.prevScreen = m.screen
|
||||
}
|
||||
m.screen = screenOutput
|
||||
m.cursor = 0
|
||||
return m, m.refreshSnapshotCmd()
|
||||
case servicesMsg:
|
||||
m.busy = false
|
||||
m.busyTitle = ""
|
||||
if msg.err != nil {
|
||||
m.title = "Services"
|
||||
m.body = msg.err.Error()
|
||||
m.prevScreen = screenSettings
|
||||
m.screen = screenOutput
|
||||
return m, m.refreshSnapshotCmd()
|
||||
}
|
||||
m.services = msg.services
|
||||
m.screen = screenServices
|
||||
m.cursor = 0
|
||||
return m, m.refreshSnapshotCmd()
|
||||
case interfacesMsg:
|
||||
m.busy = false
|
||||
m.busyTitle = ""
|
||||
if msg.err != nil {
|
||||
m.title = "interfaces"
|
||||
m.body = msg.err.Error()
|
||||
m.prevScreen = screenNetwork
|
||||
m.screen = screenOutput
|
||||
return m, m.refreshSnapshotCmd()
|
||||
}
|
||||
m.interfaces = msg.ifaces
|
||||
m.screen = screenInterfacePick
|
||||
m.cursor = 0
|
||||
return m, m.refreshSnapshotCmd()
|
||||
case exportTargetsMsg:
|
||||
m.busy = false
|
||||
m.busyTitle = ""
|
||||
if msg.err != nil {
|
||||
m.title = "export"
|
||||
m.body = msg.err.Error()
|
||||
m.prevScreen = screenMain
|
||||
m.screen = screenOutput
|
||||
return m, m.refreshSnapshotCmd()
|
||||
}
|
||||
m.targets = msg.targets
|
||||
m.screen = screenExportTargets
|
||||
m.cursor = 0
|
||||
return m, m.refreshSnapshotCmd()
|
||||
case nvidiaGPUsMsg:
|
||||
return m.handleNvidiaGPUsMsg(msg)
|
||||
case nvtopClosedMsg:
|
||||
return m, nil
|
||||
case gpuStressDoneMsg:
|
||||
if m.gpuStressAborted {
|
||||
return m, nil
|
||||
}
|
||||
if m.gpuStressCancel != nil {
|
||||
m.gpuStressCancel()
|
||||
m.gpuStressCancel = nil
|
||||
}
|
||||
m.prevScreen = screenHealthCheck
|
||||
m.screen = screenOutput
|
||||
m.title = msg.title
|
||||
if msg.err != nil {
|
||||
body := strings.TrimSpace(msg.body)
|
||||
if body == "" {
|
||||
m.body = fmt.Sprintf("ERROR: %v", msg.err)
|
||||
} else {
|
||||
m.body = fmt.Sprintf("%s\n\nERROR: %v", body, msg.err)
|
||||
}
|
||||
} else {
|
||||
m.body = msg.body
|
||||
}
|
||||
return m, m.refreshSnapshotCmd()
|
||||
case nvidiaSATDoneMsg:
|
||||
if m.nvidiaSATAborted {
|
||||
return m, nil
|
||||
}
|
||||
if m.nvidiaSATCancel != nil {
|
||||
m.nvidiaSATCancel()
|
||||
m.nvidiaSATCancel = nil
|
||||
}
|
||||
m.prevScreen = screenHealthCheck
|
||||
m.screen = screenOutput
|
||||
m.title = msg.title
|
||||
if msg.err != nil {
|
||||
body := strings.TrimSpace(msg.body)
|
||||
if body == "" {
|
||||
m.body = fmt.Sprintf("ERROR: %v", msg.err)
|
||||
} else {
|
||||
m.body = fmt.Sprintf("%s\n\nERROR: %v", body, msg.err)
|
||||
}
|
||||
} else {
|
||||
m.body = msg.body
|
||||
}
|
||||
return m, m.refreshSnapshotCmd()
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch m.screen {
|
||||
case screenMain:
|
||||
return m.updateMain(msg)
|
||||
case screenHealthCheck:
|
||||
return m.updateHealthCheck(msg)
|
||||
case screenSettings:
|
||||
return m.updateMenu(msg, len(m.settingsMenu), m.handleSettingsMenu)
|
||||
case screenNetwork:
|
||||
return m.updateMenu(msg, len(m.networkMenu), m.handleNetworkMenu)
|
||||
case screenServices:
|
||||
return m.updateMenu(msg, len(m.services), m.handleServicesMenu)
|
||||
case screenServiceAction:
|
||||
return m.updateMenu(msg, len(m.serviceMenu), m.handleServiceActionMenu)
|
||||
case screenNvidiaSATSetup:
|
||||
return m.updateNvidiaSATSetup(msg)
|
||||
case screenNvidiaSATRunning:
|
||||
return m.updateNvidiaSATRunning(msg)
|
||||
case screenGPUStressRunning:
|
||||
return m.updateGPUStressRunning(msg)
|
||||
case screenExportTargets:
|
||||
return m.updateMenu(msg, len(m.targets), m.handleExportTargetsMenu)
|
||||
case screenInterfacePick:
|
||||
return m.updateMenu(msg, len(m.interfaces), m.handleInterfacePickMenu)
|
||||
case screenOutput:
|
||||
switch msg.String() {
|
||||
case "esc", "enter", "q":
|
||||
m.screen = m.prevScreen
|
||||
m.body = ""
|
||||
m.title = ""
|
||||
m.pendingAction = actionNone
|
||||
return m, nil
|
||||
case "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
case screenStaticForm:
|
||||
return m.updateStaticForm(msg)
|
||||
case screenConfirm:
|
||||
return m.updateConfirm(msg)
|
||||
}
|
||||
if msg.String() == "ctrl+c" {
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// updateMain handles keys on the main (two-column) screen.
|
||||
func (m model) updateMain(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
if m.panelFocus {
|
||||
return m.updateMainPanel(msg)
|
||||
}
|
||||
// Switch focus to right panel.
|
||||
if (msg.String() == "tab" || msg.String() == "right" || msg.String() == "l") && len(m.panel.Rows) > 0 {
|
||||
m.panelFocus = true
|
||||
return m, nil
|
||||
}
|
||||
return m.updateMenu(msg, len(m.mainMenu), m.handleMainMenu)
|
||||
}
|
||||
|
||||
// updateMainPanel handles keys when right panel has focus.
|
||||
func (m model) updateMainPanel(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
switch msg.String() {
|
||||
case "up", "k":
|
||||
if m.panelCursor > 0 {
|
||||
m.panelCursor--
|
||||
}
|
||||
case "down", "j":
|
||||
if m.panelCursor < len(m.panel.Rows)-1 {
|
||||
m.panelCursor++
|
||||
}
|
||||
case "enter":
|
||||
if m.panelCursor < len(m.panel.Rows) {
|
||||
key := m.panel.Rows[m.panelCursor].Key
|
||||
m.busy = true
|
||||
m.busyTitle = key
|
||||
return m, func() tea.Msg {
|
||||
r := m.app.ComponentDetailResult(key)
|
||||
return resultMsg{title: r.Title, body: r.Body, back: screenMain}
|
||||
}
|
||||
}
|
||||
case "tab", "left", "h", "esc":
|
||||
m.panelFocus = false
|
||||
case "q", "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m model) updateMenu(msg tea.KeyMsg, size int, onEnter func() (tea.Model, tea.Cmd)) (tea.Model, tea.Cmd) {
|
||||
if size == 0 {
|
||||
size = 1
|
||||
}
|
||||
switch msg.String() {
|
||||
case "up", "k":
|
||||
if m.cursor > 0 {
|
||||
m.cursor--
|
||||
}
|
||||
case "down", "j":
|
||||
if m.cursor < size-1 {
|
||||
m.cursor++
|
||||
}
|
||||
case "enter":
|
||||
return onEnter()
|
||||
case "esc":
|
||||
switch m.screen {
|
||||
case screenNetwork, screenServices:
|
||||
m.screen = screenSettings
|
||||
m.cursor = 0
|
||||
case screenSettings:
|
||||
m.screen = screenMain
|
||||
m.cursor = 0
|
||||
case screenServiceAction:
|
||||
m.screen = screenServices
|
||||
m.cursor = 0
|
||||
case screenExportTargets:
|
||||
m.screen = screenMain
|
||||
m.cursor = 0
|
||||
case screenInterfacePick:
|
||||
m.screen = screenNetwork
|
||||
m.cursor = 0
|
||||
}
|
||||
case "q", "ctrl+c":
|
||||
return m, tea.Quit
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
package tui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"bee/audit/internal/platform"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
// Column widths for two-column main layout.
|
||||
const leftColWidth = 30
|
||||
|
||||
var (
|
||||
stylePass = lipgloss.NewStyle().Foreground(lipgloss.Color("10")) // bright green
|
||||
styleFail = lipgloss.NewStyle().Foreground(lipgloss.Color("9")) // bright red
|
||||
styleCancel = lipgloss.NewStyle().Foreground(lipgloss.Color("11")) // bright yellow
|
||||
styleNA = lipgloss.NewStyle().Foreground(lipgloss.Color("8")) // dark gray
|
||||
)
|
||||
|
||||
func colorStatus(status string) string {
|
||||
switch status {
|
||||
case "PASS":
|
||||
return stylePass.Render("PASS")
|
||||
case "FAIL":
|
||||
return styleFail.Render("FAIL")
|
||||
case "CANCEL":
|
||||
return styleCancel.Render("CANC")
|
||||
default:
|
||||
return styleNA.Render("N/A ")
|
||||
}
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
var body string
|
||||
if m.busy {
|
||||
title := "bee"
|
||||
if m.busyTitle != "" {
|
||||
title = m.busyTitle
|
||||
}
|
||||
if len(m.progressLines) > 0 {
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "%s\n\n", title)
|
||||
for _, l := range m.progressLines {
|
||||
fmt.Fprintf(&b, " %s\n", l)
|
||||
}
|
||||
b.WriteString("\n[ctrl+c] quit\n")
|
||||
body = b.String()
|
||||
} else {
|
||||
body = fmt.Sprintf("%s\n\nWorking...\n\n[ctrl+c] quit\n", title)
|
||||
}
|
||||
} else {
|
||||
switch m.screen {
|
||||
case screenMain:
|
||||
body = renderTwoColumnMain(m)
|
||||
case screenHealthCheck:
|
||||
body = renderHealthCheck(m)
|
||||
case screenSettings:
|
||||
body = renderMenu("Settings", "Select action", m.settingsMenu, m.cursor)
|
||||
case screenNetwork:
|
||||
body = renderMenu("Network", "Select action", m.networkMenu, m.cursor)
|
||||
case screenServices:
|
||||
body = renderMenu("Services", "Select service", m.services, m.cursor)
|
||||
case screenServiceAction:
|
||||
body = renderMenu("Service: "+m.selectedService, "Select action", m.serviceMenu, m.cursor)
|
||||
case screenExportTargets:
|
||||
body = renderMenu("Export support bundle", "Select removable filesystem", renderTargetItems(m.targets), m.cursor)
|
||||
case screenInterfacePick:
|
||||
body = renderMenu("Interfaces", "Select interface", renderInterfaceItems(m.interfaces), m.cursor)
|
||||
case screenStaticForm:
|
||||
body = renderForm("Static IPv4: "+m.selectedIface, m.formFields, m.formIndex)
|
||||
case screenConfirm:
|
||||
title, confirmBody := m.confirmBody()
|
||||
body = renderConfirm(title, confirmBody, m.cursor)
|
||||
case screenNvidiaSATSetup:
|
||||
body = renderNvidiaSATSetup(m)
|
||||
case screenNvidiaSATRunning:
|
||||
body = renderNvidiaSATRunning()
|
||||
case screenGPUStressRunning:
|
||||
body = renderGPUStressRunning()
|
||||
case screenOutput:
|
||||
body = fmt.Sprintf("%s\n\n%s\n\n[enter/esc] back [ctrl+c] quit\n", m.title, strings.TrimSpace(m.body))
|
||||
default:
|
||||
body = "bee\n"
|
||||
}
|
||||
}
|
||||
return m.renderWithBanner(body)
|
||||
}
|
||||
|
||||
// renderTwoColumnMain renders the main screen with menu on the left and hardware panel on the right.
|
||||
func renderTwoColumnMain(m model) string {
|
||||
// Left column lines
|
||||
leftLines := []string{"bee", ""}
|
||||
for i, item := range m.mainMenu {
|
||||
pfx := " "
|
||||
if !m.panelFocus && m.cursor == i {
|
||||
pfx = "> "
|
||||
}
|
||||
leftLines = append(leftLines, pfx+item)
|
||||
}
|
||||
|
||||
// Right column lines
|
||||
rightLines := buildPanelLines(m)
|
||||
|
||||
// Render side by side
|
||||
var b strings.Builder
|
||||
maxRows := max(len(leftLines), len(rightLines))
|
||||
for i := 0; i < maxRows; i++ {
|
||||
l := ""
|
||||
if i < len(leftLines) {
|
||||
l = leftLines[i]
|
||||
}
|
||||
r := ""
|
||||
if i < len(rightLines) {
|
||||
r = rightLines[i]
|
||||
}
|
||||
w := lipgloss.Width(l)
|
||||
if w < leftColWidth {
|
||||
l += strings.Repeat(" ", leftColWidth-w)
|
||||
}
|
||||
b.WriteString(l + " │ " + r + "\n")
|
||||
}
|
||||
|
||||
sep := strings.Repeat("─", leftColWidth) + "─┴─" + strings.Repeat("─", 46)
|
||||
b.WriteString(sep + "\n")
|
||||
|
||||
if m.panelFocus {
|
||||
b.WriteString("[↑↓] move [enter] details [tab/←] menu [ctrl+c] quit\n")
|
||||
} else {
|
||||
b.WriteString("[↑↓] move [enter] select [tab/→] panel [ctrl+c] quit\n")
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func buildPanelLines(m model) []string {
|
||||
p := m.panel
|
||||
var lines []string
|
||||
|
||||
for _, h := range p.Header {
|
||||
lines = append(lines, h)
|
||||
}
|
||||
if len(p.Header) > 0 && len(p.Rows) > 0 {
|
||||
lines = append(lines, "")
|
||||
}
|
||||
|
||||
for i, row := range p.Rows {
|
||||
pfx := " "
|
||||
if m.panelFocus && m.panelCursor == i {
|
||||
pfx = "> "
|
||||
}
|
||||
status := colorStatus(row.Status)
|
||||
lines = append(lines, fmt.Sprintf("%s%s %-4s %s", pfx, status, row.Key, row.Detail))
|
||||
}
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
func renderTargetItems(targets []platform.RemovableTarget) []string {
|
||||
items := make([]string, 0, len(targets))
|
||||
for _, target := range targets {
|
||||
desc := fmt.Sprintf("%s [%s %s]", target.Device, target.FSType, target.Size)
|
||||
if target.Label != "" {
|
||||
desc += " label=" + target.Label
|
||||
}
|
||||
if target.Mountpoint != "" {
|
||||
desc += " mounted=" + target.Mountpoint
|
||||
}
|
||||
items = append(items, desc)
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func renderInterfaceItems(interfaces []platform.InterfaceInfo) []string {
|
||||
items := make([]string, 0, len(interfaces))
|
||||
for _, iface := range interfaces {
|
||||
label := iface.Name
|
||||
if len(iface.IPv4) > 0 {
|
||||
label += " [" + strings.Join(iface.IPv4, ", ") + "]"
|
||||
}
|
||||
items = append(items, label)
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func renderMenu(title, subtitle string, items []string, cursor int) string {
|
||||
var body strings.Builder
|
||||
fmt.Fprintf(&body, "%s\n\n%s\n\n", title, subtitle)
|
||||
if len(items) == 0 {
|
||||
body.WriteString("(no items)\n")
|
||||
} else {
|
||||
for i, item := range items {
|
||||
prefix := " "
|
||||
if i == cursor {
|
||||
prefix = "> "
|
||||
}
|
||||
fmt.Fprintf(&body, "%s%s\n", prefix, item)
|
||||
}
|
||||
}
|
||||
body.WriteString("\n[↑/↓] move [enter] select [esc] back [ctrl+c] quit\n")
|
||||
return body.String()
|
||||
}
|
||||
|
||||
func renderForm(title string, fields []formField, idx int) string {
|
||||
var body strings.Builder
|
||||
fmt.Fprintf(&body, "%s\n\n", title)
|
||||
for i, field := range fields {
|
||||
prefix := " "
|
||||
if i == idx {
|
||||
prefix = "> "
|
||||
}
|
||||
fmt.Fprintf(&body, "%s%s: %s\n", prefix, field.Label, field.Value)
|
||||
}
|
||||
body.WriteString("\n[tab/↑/↓] move [enter] next/submit [backspace] delete [esc] cancel\n")
|
||||
return body.String()
|
||||
}
|
||||
|
||||
func renderConfirm(title, body string, cursor int) string {
|
||||
options := []string{"Confirm", "Cancel"}
|
||||
var out strings.Builder
|
||||
fmt.Fprintf(&out, "%s\n\n%s\n\n", title, body)
|
||||
for i, option := range options {
|
||||
prefix := " "
|
||||
if i == cursor {
|
||||
prefix = "> "
|
||||
}
|
||||
fmt.Fprintf(&out, "%s%s\n", prefix, option)
|
||||
}
|
||||
out.WriteString("\n[←/→/↑/↓] move [enter] select [esc] cancel\n")
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func resultCmd(title, body string, err error, back screen) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
return resultMsg{title: title, body: body, err: err, back: back}
|
||||
}
|
||||
}
|
||||
|
||||
func (m model) renderWithBanner(body string) string {
|
||||
body = strings.TrimRight(body, "\n")
|
||||
banner := renderBannerModule(m.banner, m.width)
|
||||
if banner == "" {
|
||||
if body == "" {
|
||||
return ""
|
||||
}
|
||||
return body + "\n"
|
||||
}
|
||||
if body == "" {
|
||||
return banner + "\n"
|
||||
}
|
||||
return banner + "\n\n" + body + "\n"
|
||||
}
|
||||
|
||||
func renderBannerModule(banner string, width int) string {
|
||||
banner = strings.TrimSpace(banner)
|
||||
if banner == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
lines := strings.Split(banner, "\n")
|
||||
contentWidth := 0
|
||||
for _, line := range lines {
|
||||
if w := lipgloss.Width(line); w > contentWidth {
|
||||
contentWidth = w
|
||||
}
|
||||
}
|
||||
if width > 0 && width-4 > contentWidth {
|
||||
contentWidth = width - 4
|
||||
}
|
||||
if contentWidth < 20 {
|
||||
contentWidth = 20
|
||||
}
|
||||
|
||||
label := " MOTD "
|
||||
topFill := contentWidth + 2 - lipgloss.Width(label)
|
||||
if topFill < 0 {
|
||||
topFill = 0
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.WriteString("┌" + label + strings.Repeat("─", topFill) + "┐\n")
|
||||
for _, line := range lines {
|
||||
b.WriteString("│ " + padRight(line, contentWidth) + " │\n")
|
||||
}
|
||||
b.WriteString("└" + strings.Repeat("─", contentWidth+2) + "┘")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func padRight(value string, width int) string {
|
||||
if gap := width - lipgloss.Width(value); gap > 0 {
|
||||
return value + strings.Repeat(" ", gap)
|
||||
}
|
||||
return value
|
||||
}
|
||||
474
audit/internal/webui/api.go
Normal file
474
audit/internal/webui/api.go
Normal file
@@ -0,0 +1,474 @@
|
||||
package webui
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"bee/audit/internal/app"
|
||||
"bee/audit/internal/platform"
|
||||
)
|
||||
|
||||
// ── Job ID counter ────────────────────────────────────────────────────────────
|
||||
|
||||
var jobCounter atomic.Uint64
|
||||
|
||||
func newJobID(prefix string) string {
|
||||
return fmt.Sprintf("%s-%d", prefix, jobCounter.Add(1))
|
||||
}
|
||||
|
||||
// ── SSE helpers ───────────────────────────────────────────────────────────────
|
||||
|
||||
func sseWrite(w http.ResponseWriter, event, data string) bool {
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if event != "" {
|
||||
fmt.Fprintf(w, "event: %s\n", event)
|
||||
}
|
||||
fmt.Fprintf(w, "data: %s\n\n", data)
|
||||
f.Flush()
|
||||
return true
|
||||
}
|
||||
|
||||
func sseStart(w http.ResponseWriter) bool {
|
||||
_, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "streaming not supported", http.StatusInternalServerError)
|
||||
return false
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
return true
|
||||
}
|
||||
|
||||
// streamJob streams lines from a jobState to a SSE response.
|
||||
func streamJob(w http.ResponseWriter, r *http.Request, j *jobState) {
|
||||
if !sseStart(w) {
|
||||
return
|
||||
}
|
||||
existing, ch := j.subscribe()
|
||||
for _, line := range existing {
|
||||
sseWrite(w, "", line)
|
||||
}
|
||||
if ch == nil {
|
||||
// Job already finished
|
||||
sseWrite(w, "done", j.err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case line, ok := <-ch:
|
||||
if !ok {
|
||||
sseWrite(w, "done", j.err)
|
||||
return
|
||||
}
|
||||
sseWrite(w, "", line)
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runCmdJob runs an exec.Cmd as a background job, streaming stdout+stderr lines.
|
||||
func runCmdJob(j *jobState, cmd *exec.Cmd) {
|
||||
pr, pw := io.Pipe()
|
||||
cmd.Stdout = pw
|
||||
cmd.Stderr = pw
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
j.finish(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(pr)
|
||||
for scanner.Scan() {
|
||||
j.append(scanner.Text())
|
||||
}
|
||||
}()
|
||||
|
||||
err := cmd.Wait()
|
||||
_ = pw.Close()
|
||||
if err != nil {
|
||||
j.finish(err.Error())
|
||||
} else {
|
||||
j.finish("")
|
||||
}
|
||||
}
|
||||
|
||||
// ── Audit ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPIAuditRun(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
id := newJobID("audit")
|
||||
j := globalJobs.create(id)
|
||||
go func() {
|
||||
j.append("Running audit...")
|
||||
result, err := h.opts.App.RunAuditNow(h.opts.RuntimeMode)
|
||||
if err != nil {
|
||||
j.append("ERROR: " + err.Error())
|
||||
j.finish(err.Error())
|
||||
return
|
||||
}
|
||||
for _, line := range strings.Split(result.Body, "\n") {
|
||||
if line != "" {
|
||||
j.append(line)
|
||||
}
|
||||
}
|
||||
j.finish("")
|
||||
}()
|
||||
writeJSON(w, map[string]string{"job_id": id})
|
||||
}
|
||||
|
||||
func (h *handler) handleAPIAuditStream(w http.ResponseWriter, r *http.Request) {
|
||||
id := r.URL.Query().Get("job_id")
|
||||
j, ok := globalJobs.get(id)
|
||||
if !ok {
|
||||
http.Error(w, "job not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
streamJob(w, r, j)
|
||||
}
|
||||
|
||||
// ── SAT ───────────────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPISATRun(target string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
id := newJobID("sat-" + target)
|
||||
j := globalJobs.create(id)
|
||||
|
||||
go func() {
|
||||
j.append(fmt.Sprintf("Starting %s acceptance test...", target))
|
||||
var (
|
||||
archive string
|
||||
err error
|
||||
)
|
||||
|
||||
// Parse optional parameters
|
||||
var body struct {
|
||||
Duration int `json:"duration"`
|
||||
DiagLevel int `json:"diag_level"`
|
||||
GPUIndices []int `json:"gpu_indices"`
|
||||
}
|
||||
body.DiagLevel = 1
|
||||
if r.ContentLength > 0 {
|
||||
_ = json.NewDecoder(r.Body).Decode(&body)
|
||||
}
|
||||
|
||||
switch target {
|
||||
case "nvidia":
|
||||
if len(body.GPUIndices) > 0 || body.DiagLevel > 0 {
|
||||
result, e := h.opts.App.RunNvidiaAcceptancePackWithOptions(
|
||||
context.Background(), "", body.DiagLevel, body.GPUIndices,
|
||||
)
|
||||
if e != nil {
|
||||
err = e
|
||||
} else {
|
||||
archive = result.Body
|
||||
}
|
||||
} else {
|
||||
archive, err = h.opts.App.RunNvidiaAcceptancePack("")
|
||||
}
|
||||
case "memory":
|
||||
archive, err = h.opts.App.RunMemoryAcceptancePack("")
|
||||
case "storage":
|
||||
archive, err = h.opts.App.RunStorageAcceptancePack("")
|
||||
case "cpu":
|
||||
dur := body.Duration
|
||||
if dur <= 0 {
|
||||
dur = 60
|
||||
}
|
||||
archive, err = h.opts.App.RunCPUAcceptancePack("", dur)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
j.append("ERROR: " + err.Error())
|
||||
j.finish(err.Error())
|
||||
return
|
||||
}
|
||||
j.append(fmt.Sprintf("Archive written: %s", archive))
|
||||
j.finish("")
|
||||
}()
|
||||
|
||||
writeJSON(w, map[string]string{"job_id": id})
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) handleAPISATStream(w http.ResponseWriter, r *http.Request) {
|
||||
id := r.URL.Query().Get("job_id")
|
||||
j, ok := globalJobs.get(id)
|
||||
if !ok {
|
||||
http.Error(w, "job not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
streamJob(w, r, j)
|
||||
}
|
||||
|
||||
// ── Services ──────────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPIServicesList(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
names, err := h.opts.App.ListBeeServices()
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
type serviceInfo struct {
|
||||
Name string `json:"name"`
|
||||
State string `json:"state"`
|
||||
Body string `json:"body"`
|
||||
}
|
||||
result := make([]serviceInfo, 0, len(names))
|
||||
for _, name := range names {
|
||||
state := h.opts.App.ServiceState(name)
|
||||
body, _ := h.opts.App.ServiceStatus(name)
|
||||
result = append(result, serviceInfo{Name: name, State: state, Body: body})
|
||||
}
|
||||
writeJSON(w, result)
|
||||
}
|
||||
|
||||
func (h *handler) handleAPIServicesAction(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid request body")
|
||||
return
|
||||
}
|
||||
var action platform.ServiceAction
|
||||
switch req.Action {
|
||||
case "start":
|
||||
action = platform.ServiceStart
|
||||
case "stop":
|
||||
action = platform.ServiceStop
|
||||
case "restart":
|
||||
action = platform.ServiceRestart
|
||||
default:
|
||||
writeError(w, http.StatusBadRequest, "action must be start|stop|restart")
|
||||
return
|
||||
}
|
||||
result, err := h.opts.App.ServiceActionResult(req.Name, action)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, map[string]string{"status": "ok", "output": result.Body})
|
||||
}
|
||||
|
||||
// ── Network ───────────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPINetworkStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
ifaces, err := h.opts.App.ListInterfaces()
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, map[string]any{
|
||||
"interfaces": ifaces,
|
||||
"default_route": h.opts.App.DefaultRoute(),
|
||||
})
|
||||
}
|
||||
|
||||
func (h *handler) handleAPINetworkDHCP(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
var req struct {
|
||||
Interface string `json:"interface"`
|
||||
}
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
|
||||
var result app.ActionResult
|
||||
var err error
|
||||
if req.Interface == "" || req.Interface == "all" {
|
||||
result, err = h.opts.App.DHCPAllResult()
|
||||
} else {
|
||||
result, err = h.opts.App.DHCPOneResult(req.Interface)
|
||||
}
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, map[string]string{"status": "ok", "output": result.Body})
|
||||
}
|
||||
|
||||
func (h *handler) handleAPINetworkStatic(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
var req struct {
|
||||
Interface string `json:"interface"`
|
||||
Address string `json:"address"`
|
||||
Prefix string `json:"prefix"`
|
||||
Gateway string `json:"gateway"`
|
||||
DNS []string `json:"dns"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, http.StatusBadRequest, "invalid request body")
|
||||
return
|
||||
}
|
||||
cfg := platform.StaticIPv4Config{
|
||||
Interface: req.Interface,
|
||||
Address: req.Address,
|
||||
Prefix: req.Prefix,
|
||||
Gateway: req.Gateway,
|
||||
DNS: req.DNS,
|
||||
}
|
||||
result, err := h.opts.App.SetStaticIPv4Result(cfg)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, map[string]string{"status": "ok", "output": result.Body})
|
||||
}
|
||||
|
||||
// ── Export ────────────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPIExportList(w http.ResponseWriter, r *http.Request) {
|
||||
entries, err := listExportFiles(h.opts.ExportDir)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, entries)
|
||||
}
|
||||
|
||||
func (h *handler) handleAPIExportBundle(w http.ResponseWriter, r *http.Request) {
|
||||
archive, err := app.BuildSupportBundle(h.opts.ExportDir)
|
||||
if err != nil {
|
||||
writeError(w, http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
writeJSON(w, map[string]string{
|
||||
"status": "ok",
|
||||
"path": archive,
|
||||
"url": "/export/support.tar.gz",
|
||||
})
|
||||
}
|
||||
|
||||
// ── Tools ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
var standardTools = []string{
|
||||
"dmidecode", "smartctl", "nvme", "lspci", "ipmitool",
|
||||
"nvidia-smi", "memtester", "stress-ng", "nvtop",
|
||||
"mstflint", "qrencode",
|
||||
}
|
||||
|
||||
func (h *handler) handleAPIToolsCheck(w http.ResponseWriter, r *http.Request) {
|
||||
if h.opts.App == nil {
|
||||
writeError(w, http.StatusServiceUnavailable, "app not configured")
|
||||
return
|
||||
}
|
||||
statuses := h.opts.App.CheckTools(standardTools)
|
||||
writeJSON(w, statuses)
|
||||
}
|
||||
|
||||
// ── Preflight ─────────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPIPreflight(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := loadSnapshot(filepath.Join(h.opts.ExportDir, "runtime-health.json"))
|
||||
if err != nil {
|
||||
writeError(w, http.StatusNotFound, "runtime health not found")
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
_, _ = w.Write(data)
|
||||
}
|
||||
|
||||
// ── Metrics SSE ───────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAPIMetricsStream(w http.ResponseWriter, r *http.Request) {
|
||||
if !sseStart(w) {
|
||||
return
|
||||
}
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
sample := platform.SampleLiveMetrics()
|
||||
|
||||
// Feed server ring buffers
|
||||
for _, t := range sample.Temps {
|
||||
if t.Name == "CPU" {
|
||||
h.ringCPUTemp.push(t.Celsius)
|
||||
break
|
||||
}
|
||||
}
|
||||
h.ringPower.push(sample.PowerW)
|
||||
h.ringCPULoad.push(sample.CPULoadPct)
|
||||
h.ringMemLoad.push(sample.MemLoadPct)
|
||||
|
||||
// Feed fan ring buffers (grow on first sight)
|
||||
h.ringsMu.Lock()
|
||||
for i, fan := range sample.Fans {
|
||||
for len(h.ringFans) <= i {
|
||||
h.ringFans = append(h.ringFans, newMetricsRing(120))
|
||||
h.fanNames = append(h.fanNames, fan.Name)
|
||||
}
|
||||
h.ringFans[i].push(float64(fan.RPM))
|
||||
}
|
||||
// Feed per-GPU ring buffers (grow on first sight)
|
||||
for _, gpu := range sample.GPUs {
|
||||
idx := gpu.GPUIndex
|
||||
for len(h.gpuRings) <= idx {
|
||||
h.gpuRings = append(h.gpuRings, &gpuRings{
|
||||
Temp: newMetricsRing(120),
|
||||
Util: newMetricsRing(120),
|
||||
MemUtil: newMetricsRing(120),
|
||||
Power: newMetricsRing(120),
|
||||
})
|
||||
}
|
||||
h.gpuRings[idx].Temp.push(gpu.TempC)
|
||||
h.gpuRings[idx].Util.push(gpu.UsagePct)
|
||||
h.gpuRings[idx].MemUtil.push(gpu.MemUsagePct)
|
||||
h.gpuRings[idx].Power.push(gpu.PowerW)
|
||||
}
|
||||
h.ringsMu.Unlock()
|
||||
|
||||
b, err := json.Marshal(sample)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !sseWrite(w, "metrics", string(b)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
84
audit/internal/webui/jobs.go
Normal file
84
audit/internal/webui/jobs.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package webui
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// jobState holds the output lines and completion status of an async job.
|
||||
type jobState struct {
|
||||
lines []string
|
||||
done bool
|
||||
err string
|
||||
mu sync.Mutex
|
||||
// subs is a list of channels that receive new lines as they arrive.
|
||||
subs []chan string
|
||||
}
|
||||
|
||||
func (j *jobState) append(line string) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.lines = append(j.lines, line)
|
||||
for _, ch := range j.subs {
|
||||
select {
|
||||
case ch <- line:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (j *jobState) finish(errMsg string) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.done = true
|
||||
j.err = errMsg
|
||||
for _, ch := range j.subs {
|
||||
close(ch)
|
||||
}
|
||||
j.subs = nil
|
||||
}
|
||||
|
||||
// subscribe returns a channel that receives all future lines.
|
||||
// Existing lines are returned first, then the channel streams new ones.
|
||||
func (j *jobState) subscribe() ([]string, <-chan string) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
existing := make([]string, len(j.lines))
|
||||
copy(existing, j.lines)
|
||||
if j.done {
|
||||
return existing, nil
|
||||
}
|
||||
ch := make(chan string, 256)
|
||||
j.subs = append(j.subs, ch)
|
||||
return existing, ch
|
||||
}
|
||||
|
||||
// jobManager manages async jobs identified by string IDs.
|
||||
type jobManager struct {
|
||||
mu sync.Mutex
|
||||
jobs map[string]*jobState
|
||||
}
|
||||
|
||||
var globalJobs = &jobManager{jobs: make(map[string]*jobState)}
|
||||
|
||||
func (m *jobManager) create(id string) *jobState {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
j := &jobState{}
|
||||
m.jobs[id] = j
|
||||
// Schedule cleanup after 30 minutes
|
||||
go func() {
|
||||
time.Sleep(30 * time.Minute)
|
||||
m.mu.Lock()
|
||||
delete(m.jobs, id)
|
||||
m.mu.Unlock()
|
||||
}()
|
||||
return j
|
||||
}
|
||||
|
||||
func (m *jobManager) get(id string) (*jobState, bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
j, ok := m.jobs[id]
|
||||
return j, ok
|
||||
}
|
||||
680
audit/internal/webui/pages.go
Normal file
680
audit/internal/webui/pages.go
Normal file
@@ -0,0 +1,680 @@
|
||||
package webui
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ── Layout ────────────────────────────────────────────────────────────────────
|
||||
|
||||
func layoutHead(title string) string {
|
||||
return `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1">
|
||||
<title>` + html.EscapeString(title) + `</title>
|
||||
<style>
|
||||
*{box-sizing:border-box;margin:0;padding:0}
|
||||
body{font-family:system-ui,-apple-system,sans-serif;background:#0f1117;color:#e2e8f0;display:flex;min-height:100vh}
|
||||
a{color:inherit;text-decoration:none}
|
||||
/* Sidebar */
|
||||
.sidebar{width:200px;min-height:100vh;background:#161b25;border-right:1px solid #252d3d;flex-shrink:0;display:flex;flex-direction:column}
|
||||
.sidebar-logo{padding:20px 16px 12px;font-size:20px;font-weight:700;color:#60a5fa;letter-spacing:-0.5px}
|
||||
.sidebar-logo span{color:#94a3b8;font-weight:400;font-size:13px;display:block;margin-top:2px}
|
||||
.nav{flex:1}
|
||||
.nav-item{display:block;padding:10px 16px;color:#94a3b8;font-size:14px;border-left:3px solid transparent;transition:all .15s}
|
||||
.nav-item:hover,.nav-item.active{background:#1e2535;color:#e2e8f0;border-left-color:#3b82f6}
|
||||
.nav-icon{margin-right:8px;opacity:.7}
|
||||
/* Content */
|
||||
.main{flex:1;display:flex;flex-direction:column;overflow:auto}
|
||||
.topbar{padding:16px 24px;border-bottom:1px solid #1e2535;display:flex;align-items:center;gap:12px}
|
||||
.topbar h1{font-size:18px;font-weight:600}
|
||||
.content{padding:24px;flex:1}
|
||||
/* Cards */
|
||||
.card{background:#161b25;border:1px solid #1e2535;border-radius:10px;margin-bottom:16px}
|
||||
.card-head{padding:14px 18px;border-bottom:1px solid #1e2535;font-weight:600;font-size:14px;display:flex;align-items:center;gap:8px}
|
||||
.card-body{padding:18px}
|
||||
/* Buttons */
|
||||
.btn{display:inline-flex;align-items:center;gap:6px;padding:8px 16px;border-radius:6px;font-size:13px;font-weight:600;cursor:pointer;border:none;transition:background .15s}
|
||||
.btn-primary{background:#3b82f6;color:#fff}.btn-primary:hover{background:#2563eb}
|
||||
.btn-danger{background:#ef4444;color:#fff}.btn-danger:hover{background:#dc2626}
|
||||
.btn-secondary{background:#1e2535;color:#94a3b8;border:1px solid #252d3d}.btn-secondary:hover{background:#252d3d;color:#e2e8f0}
|
||||
.btn-sm{padding:5px 10px;font-size:12px}
|
||||
/* Tables */
|
||||
table{width:100%;border-collapse:collapse;font-size:13px}
|
||||
th{text-align:left;padding:8px 12px;color:#64748b;font-weight:600;border-bottom:1px solid #1e2535}
|
||||
td{padding:8px 12px;border-bottom:1px solid #1a2030}
|
||||
tr:last-child td{border:none}
|
||||
tr:hover td{background:#1a2030}
|
||||
/* Status badges */
|
||||
.badge{display:inline-block;padding:2px 8px;border-radius:999px;font-size:11px;font-weight:600}
|
||||
.badge-ok{background:#166534;color:#86efac}
|
||||
.badge-warn{background:#713f12;color:#fde68a}
|
||||
.badge-err{background:#7f1d1d;color:#fca5a5}
|
||||
.badge-unknown{background:#1e293b;color:#64748b}
|
||||
/* Output terminal */
|
||||
.terminal{background:#0a0d14;border:1px solid #1e2535;border-radius:8px;padding:14px;font-family:monospace;font-size:12px;color:#86efac;max-height:400px;overflow-y:auto;white-space:pre-wrap;word-break:break-all}
|
||||
/* Forms */
|
||||
.form-row{margin-bottom:14px}
|
||||
.form-row label{display:block;font-size:12px;color:#64748b;margin-bottom:5px}
|
||||
.form-row input,.form-row select{width:100%;padding:8px 10px;background:#0f1117;border:1px solid #252d3d;border-radius:6px;color:#e2e8f0;font-size:13px;outline:none}
|
||||
.form-row input:focus,.form-row select:focus{border-color:#3b82f6}
|
||||
.chart-legend{font-size:11px;color:#64748b;padding:4px 0}
|
||||
/* Grid */
|
||||
.grid2{display:grid;grid-template-columns:1fr 1fr;gap:16px}
|
||||
.grid3{display:grid;grid-template-columns:1fr 1fr 1fr;gap:16px}
|
||||
@media(max-width:900px){.grid2,.grid3{grid-template-columns:1fr}}
|
||||
/* iframe viewer */
|
||||
.viewer-frame{width:100%;height:calc(100vh - 160px);border:0;border-radius:8px;background:#1a1f2e}
|
||||
/* Alerts */
|
||||
.alert{padding:10px 14px;border-radius:8px;font-size:13px;margin-bottom:14px}
|
||||
.alert-info{background:#1e3a5f;border:1px solid #2563eb;color:#93c5fd}
|
||||
.alert-warn{background:#451a03;border:1px solid #d97706;color:#fde68a}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
`
|
||||
}
|
||||
|
||||
func layoutNav(active string) string {
|
||||
items := []struct{ id, icon, label string }{
|
||||
{"dashboard", "", "Dashboard"},
|
||||
{"metrics", "", "Metrics"},
|
||||
{"tests", "", "Acceptance Tests"},
|
||||
{"burn-in", "", "Burn-in"},
|
||||
{"network", "", "Network"},
|
||||
{"services", "", "Services"},
|
||||
{"export", "", "Export"},
|
||||
{"tools", "", "Tools"},
|
||||
}
|
||||
var b strings.Builder
|
||||
b.WriteString(`<aside class="sidebar">`)
|
||||
b.WriteString(`<div class="sidebar-logo">bee<span>hardware audit</span></div>`)
|
||||
b.WriteString(`<nav class="nav">`)
|
||||
for _, item := range items {
|
||||
cls := "nav-item"
|
||||
if item.id == active {
|
||||
cls += " active"
|
||||
}
|
||||
href := "/"
|
||||
if item.id != "dashboard" {
|
||||
href = "/" + item.id
|
||||
}
|
||||
b.WriteString(fmt.Sprintf(`<a class="%s" href="%s">%s</a>`,
|
||||
cls, href, item.label))
|
||||
}
|
||||
b.WriteString(`</nav></aside>`)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// renderPage dispatches to the appropriate page renderer.
|
||||
func renderPage(page string, opts HandlerOptions) string {
|
||||
var pageID, title, body string
|
||||
switch page {
|
||||
case "dashboard", "":
|
||||
pageID = "dashboard"
|
||||
title = "Dashboard"
|
||||
body = renderDashboard(opts)
|
||||
case "metrics":
|
||||
pageID = "metrics"
|
||||
title = "Live Metrics"
|
||||
body = renderMetrics()
|
||||
case "tests":
|
||||
pageID = "tests"
|
||||
title = "Acceptance Tests"
|
||||
body = renderTests()
|
||||
case "burn-in":
|
||||
pageID = "burn-in"
|
||||
title = "Burn-in Tests"
|
||||
body = renderBurnIn()
|
||||
case "network":
|
||||
pageID = "network"
|
||||
title = "Network"
|
||||
body = renderNetwork()
|
||||
case "services":
|
||||
pageID = "services"
|
||||
title = "Services"
|
||||
body = renderServices()
|
||||
case "export":
|
||||
pageID = "export"
|
||||
title = "Export"
|
||||
body = renderExport(opts.ExportDir)
|
||||
case "tools":
|
||||
pageID = "tools"
|
||||
title = "Tools"
|
||||
body = renderTools()
|
||||
default:
|
||||
pageID = "dashboard"
|
||||
title = "Not Found"
|
||||
body = `<div class="alert alert-warn">Page not found.</div>`
|
||||
}
|
||||
|
||||
return layoutHead(opts.Title+" — "+title) +
|
||||
layoutNav(pageID) +
|
||||
`<div class="main"><div class="topbar"><h1>` + html.EscapeString(title) + `</h1></div><div class="content">` +
|
||||
body +
|
||||
`</div></div></body></html>`
|
||||
}
|
||||
|
||||
// ── Dashboard ─────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderDashboard(opts HandlerOptions) string {
|
||||
var b strings.Builder
|
||||
b.WriteString(`<div class="grid2">`)
|
||||
// Left: health summary
|
||||
b.WriteString(`<div>`)
|
||||
b.WriteString(renderHealthCard(opts))
|
||||
b.WriteString(`</div>`)
|
||||
// Right: quick actions
|
||||
b.WriteString(`<div>`)
|
||||
b.WriteString(`<div class="card"><div class="card-head">Quick Actions</div><div class="card-body">`)
|
||||
b.WriteString(`<a class="btn btn-primary" href="/export/support.tar.gz" style="display:block;margin-bottom:10px">⬇ Download Support Bundle</a>`)
|
||||
b.WriteString(`<a class="btn btn-secondary" href="/audit.json" style="display:block;margin-bottom:10px" target="_blank">📄 Open audit.json</a>`)
|
||||
b.WriteString(`<a class="btn btn-secondary" href="/export/" style="display:block">📁 Browse Export Files</a>`)
|
||||
b.WriteString(`<div style="margin-top:14px"><button class="btn btn-secondary" onclick="runAudit()">▶ Re-run Audit</button></div>`)
|
||||
b.WriteString(`</div></div>`)
|
||||
b.WriteString(`</div>`)
|
||||
b.WriteString(`</div>`)
|
||||
// Audit viewer iframe
|
||||
b.WriteString(`<div class="card"><div class="card-head">Audit Snapshot</div><div class="card-body" style="padding:0">`)
|
||||
b.WriteString(`<iframe class="viewer-frame" src="/viewer" loading="eager" referrerpolicy="same-origin"></iframe>`)
|
||||
b.WriteString(`</div></div>`)
|
||||
|
||||
// Audit run output div
|
||||
b.WriteString(`<div id="audit-output" style="display:none" class="card"><div class="card-head">Audit Output</div><div class="card-body"><div id="audit-terminal" class="terminal"></div></div></div>`)
|
||||
|
||||
b.WriteString(`<script>
|
||||
function runAudit() {
|
||||
document.getElementById('audit-output').style.display='block';
|
||||
const term = document.getElementById('audit-terminal');
|
||||
term.textContent = 'Starting audit...\n';
|
||||
fetch('/api/audit/run', {method:'POST'})
|
||||
.then(r => r.json())
|
||||
.then(d => {
|
||||
const es = new EventSource('/api/audit/stream?job_id=' + d.job_id);
|
||||
es.onmessage = e => { term.textContent += e.data + '\n'; term.scrollTop = term.scrollHeight; };
|
||||
es.addEventListener('done', e => { es.close(); term.textContent += (e.data ? '\\nERROR: ' + e.data : '\\nDone.') + '\n'; location.reload(); });
|
||||
});
|
||||
}
|
||||
</script>`)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func renderHealthCard(opts HandlerOptions) string {
|
||||
data, err := loadSnapshot(filepath.Join(opts.ExportDir, "runtime-health.json"))
|
||||
if err != nil {
|
||||
return `<div class="card"><div class="card-head">Runtime Health</div><div class="card-body"><span class="badge badge-unknown">No data</span></div></div>`
|
||||
}
|
||||
var health map[string]any
|
||||
if err := json.Unmarshal(data, &health); err != nil {
|
||||
return `<div class="card"><div class="card-head">Runtime Health</div><div class="card-body"><span class="badge badge-err">Parse error</span></div></div>`
|
||||
}
|
||||
status := fmt.Sprintf("%v", health["status"])
|
||||
badge := "badge-ok"
|
||||
if status == "PARTIAL" {
|
||||
badge = "badge-warn"
|
||||
} else if status == "FAIL" || status == "FAILED" {
|
||||
badge = "badge-err"
|
||||
}
|
||||
var b strings.Builder
|
||||
b.WriteString(`<div class="card"><div class="card-head">Runtime Health</div><div class="card-body">`)
|
||||
b.WriteString(fmt.Sprintf(`<div style="margin-bottom:10px"><span class="badge %s">%s</span></div>`, badge, html.EscapeString(status)))
|
||||
if issues, ok := health["issues"].([]any); ok && len(issues) > 0 {
|
||||
b.WriteString(`<div style="font-size:12px;color:#f87171">Issues:<br>`)
|
||||
for _, issue := range issues {
|
||||
if m, ok := issue.(map[string]any); ok {
|
||||
b.WriteString(html.EscapeString(fmt.Sprintf("%v: %v", m["code"], m["message"])) + "<br>")
|
||||
}
|
||||
}
|
||||
b.WriteString(`</div>`)
|
||||
}
|
||||
b.WriteString(`</div></div>`)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// ── Metrics ───────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderMetrics() string {
|
||||
return `<p style="color:#64748b;font-size:13px;margin-bottom:16px">Live metrics — updated every 2 seconds. Charts use go-analyze/charts (grafana theme).</p>
|
||||
|
||||
<div class="card" style="margin-bottom:16px">
|
||||
<div class="card-head">Server</div>
|
||||
<div class="card-body" style="padding:8px">
|
||||
<img id="chart-server" src="/api/metrics/chart/server.svg" style="width:100%;display:block;border-radius:6px" alt="Server metrics">
|
||||
<div id="sys-table" style="margin-top:8px;font-size:12px"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="gpu-charts"></div>
|
||||
|
||||
<script>
|
||||
let knownGPUs = [];
|
||||
|
||||
function refreshCharts() {
|
||||
const t = '?t=' + Date.now();
|
||||
const srv = document.getElementById('chart-server');
|
||||
if (srv) srv.src = srv.src.split('?')[0] + t;
|
||||
knownGPUs.forEach(idx => {
|
||||
const el = document.getElementById('chart-gpu-' + idx);
|
||||
if (el) el.src = el.src.split('?')[0] + t;
|
||||
});
|
||||
}
|
||||
setInterval(refreshCharts, 2000);
|
||||
|
||||
const es = new EventSource('/api/metrics/stream');
|
||||
es.addEventListener('metrics', e => {
|
||||
const d = JSON.parse(e.data);
|
||||
|
||||
// Add GPU chart cards as GPUs appear
|
||||
(d.gpus||[]).forEach(g => {
|
||||
if (knownGPUs.includes(g.index)) return;
|
||||
knownGPUs.push(g.index);
|
||||
const div = document.createElement('div');
|
||||
div.className = 'card';
|
||||
div.style.marginBottom = '16px';
|
||||
div.innerHTML = '<div class="card-head">GPU ' + g.index + '</div>' +
|
||||
'<div class="card-body" style="padding:8px">' +
|
||||
'<img id="chart-gpu-' + g.index + '" src="/api/metrics/chart/gpu/' + g.index + '.svg" style="width:100%;display:block;border-radius:6px" alt="GPU ' + g.index + '">' +
|
||||
'<div id="gpu-table-' + g.index + '" style="margin-top:8px;font-size:12px"></div>' +
|
||||
'</div>';
|
||||
document.getElementById('gpu-charts').appendChild(div);
|
||||
});
|
||||
|
||||
// Update numeric tables
|
||||
let sysHTML = '';
|
||||
const cpuTemp = (d.temps||[]).find(t => t.name==='CPU');
|
||||
if (cpuTemp) sysHTML += '<tr><td>CPU Temp</td><td>'+cpuTemp.celsius.toFixed(1)+'°C</td></tr>';
|
||||
if (d.cpu_load_pct) sysHTML += '<tr><td>CPU Load</td><td>'+d.cpu_load_pct.toFixed(1)+'%</td></tr>';
|
||||
if (d.mem_load_pct) sysHTML += '<tr><td>Mem Load</td><td>'+d.mem_load_pct.toFixed(1)+'%</td></tr>';
|
||||
(d.fans||[]).forEach(f => sysHTML += '<tr><td>'+f.name+'</td><td>'+f.rpm+' RPM</td></tr>');
|
||||
if (d.power_w) sysHTML += '<tr><td>Power</td><td>'+d.power_w.toFixed(0)+' W</td></tr>';
|
||||
const st = document.getElementById('sys-table');
|
||||
if (st) st.innerHTML = sysHTML ? '<table>'+sysHTML+'</table>' : '<p style="color:#64748b">No sensor data (ipmitool/sensors required)</p>';
|
||||
|
||||
(d.gpus||[]).forEach(g => {
|
||||
const t = document.getElementById('gpu-table-' + g.index);
|
||||
if (!t) return;
|
||||
t.innerHTML = '<table>' +
|
||||
'<tr><td>Temp</td><td>'+g.temp_c+'°C</td>' +
|
||||
'<td>Load</td><td>'+g.usage_pct+'%</td>' +
|
||||
'<td>Mem</td><td>'+g.mem_usage_pct+'%</td>' +
|
||||
'<td>Power</td><td>'+g.power_w+' W</td></tr></table>';
|
||||
});
|
||||
});
|
||||
es.onerror = () => {};
|
||||
</script>`
|
||||
}
|
||||
|
||||
// ── Acceptance Tests ──────────────────────────────────────────────────────────
|
||||
|
||||
func renderTests() string {
|
||||
return `<p style="color:#64748b;font-size:13px;margin-bottom:16px">Run hardware acceptance tests and view results.</p>
|
||||
<div class="grid2">
|
||||
` + renderSATCard("nvidia", "NVIDIA GPU", `<div class="form-row"><label>Diag Level</label><select id="sat-nvidia-level"><option value="1">Level 1 — Quick</option><option value="2">Level 2 — Standard</option><option value="3">Level 3 — Extended</option><option value="4">Level 4 — Full</option></select></div>`) +
|
||||
renderSATCard("memory", "Memory", "") +
|
||||
renderSATCard("storage", "Storage", "") +
|
||||
renderSATCard("cpu", "CPU", `<div class="form-row"><label>Duration (seconds)</label><input type="number" id="sat-cpu-dur" value="60" min="10"></div>`) +
|
||||
`</div>
|
||||
<div id="sat-output" style="display:none;margin-top:16px" class="card">
|
||||
<div class="card-head">Test Output <span id="sat-title"></span></div>
|
||||
<div class="card-body"><div id="sat-terminal" class="terminal"></div></div>
|
||||
</div>
|
||||
<script>
|
||||
let satES = null;
|
||||
function runSAT(target) {
|
||||
if (satES) satES.close();
|
||||
const body = {};
|
||||
if (target === 'nvidia') body.diag_level = parseInt(document.getElementById('sat-nvidia-level').value)||1;
|
||||
if (target === 'cpu') body.duration = parseInt(document.getElementById('sat-cpu-dur').value)||60;
|
||||
document.getElementById('sat-output').style.display='block';
|
||||
document.getElementById('sat-title').textContent = '— ' + target;
|
||||
const term = document.getElementById('sat-terminal');
|
||||
term.textContent = 'Starting ' + target + ' test...\n';
|
||||
fetch('/api/sat/'+target+'/run', {method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify(body)})
|
||||
.then(r => r.json())
|
||||
.then(d => {
|
||||
satES = new EventSource('/api/sat/stream?job_id='+d.job_id);
|
||||
satES.onmessage = e => { term.textContent += e.data+'\n'; term.scrollTop=term.scrollHeight; };
|
||||
satES.addEventListener('done', e => { satES.close(); term.textContent += (e.data ? '\nERROR: '+e.data : '\nCompleted.')+'\n'; });
|
||||
});
|
||||
}
|
||||
</script>`
|
||||
}
|
||||
|
||||
func renderSATCard(id, label, extra string) string {
|
||||
return fmt.Sprintf(`<div class="card"><div class="card-head">%s</div><div class="card-body">%s<button class="btn btn-primary" onclick="runSAT('%s')">▶ Run Test</button></div></div>`,
|
||||
label, extra, id)
|
||||
}
|
||||
|
||||
// ── Burn-in ───────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderBurnIn() string {
|
||||
return `<p style="color:#64748b;font-size:13px;margin-bottom:16px">Long-running GPU and system stress tests. Check <a href="/metrics" style="color:#60a5fa">Metrics</a> page for live telemetry.</p>
|
||||
<div class="grid2">
|
||||
<div class="card"><div class="card-head">GPU Platform Stress</div><div class="card-body">
|
||||
<div class="form-row"><label>Duration</label><select id="bi-dur"><option value="600">10 minutes</option><option value="3600">1 hour</option><option value="28800">8 hours</option><option value="86400">24 hours</option></select></div>
|
||||
<button class="btn btn-primary" onclick="runBurnIn('nvidia')">▶ Start GPU Stress</button>
|
||||
</div></div>
|
||||
<div class="card"><div class="card-head">CPU Stress</div><div class="card-body">
|
||||
<div class="form-row"><label>Duration (seconds)</label><input type="number" id="bi-cpu-dur" value="300" min="60"></div>
|
||||
<button class="btn btn-primary" onclick="runBurnIn('cpu')">▶ Start CPU Stress</button>
|
||||
</div></div>
|
||||
</div>
|
||||
<div id="bi-output" style="display:none;margin-top:16px" class="card">
|
||||
<div class="card-head">Output</div>
|
||||
<div class="card-body"><div id="bi-terminal" class="terminal"></div></div>
|
||||
</div>
|
||||
<script>
|
||||
let biES = null;
|
||||
function runBurnIn(target) {
|
||||
if (biES) biES.close();
|
||||
const body = {};
|
||||
if (target === 'nvidia') body.duration = parseInt(document.getElementById('bi-dur').value)||600;
|
||||
if (target === 'cpu') body.duration = parseInt(document.getElementById('bi-cpu-dur').value)||300;
|
||||
document.getElementById('bi-output').style.display='block';
|
||||
const term = document.getElementById('bi-terminal');
|
||||
term.textContent = 'Starting ' + target + ' burn-in...\n';
|
||||
fetch('/api/sat/'+target+'/run', {method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify(body)})
|
||||
.then(r => r.json())
|
||||
.then(d => {
|
||||
biES = new EventSource('/api/sat/stream?job_id='+d.job_id);
|
||||
biES.onmessage = e => { term.textContent += e.data+'\n'; term.scrollTop=term.scrollHeight; };
|
||||
biES.addEventListener('done', e => { biES.close(); term.textContent += (e.data ? '\nERROR: '+e.data : '\nCompleted.')+'\n'; });
|
||||
});
|
||||
}
|
||||
</script>`
|
||||
}
|
||||
|
||||
// ── Network ───────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderNetwork() string {
|
||||
return `<div class="card"><div class="card-head">Network Interfaces</div><div class="card-body">
|
||||
<div id="iface-table"><p style="color:#64748b;font-size:13px">Loading...</p></div>
|
||||
</div></div>
|
||||
<div class="grid2">
|
||||
<div class="card"><div class="card-head">DHCP</div><div class="card-body">
|
||||
<div class="form-row"><label>Interface (leave empty for all)</label><input type="text" id="dhcp-iface" placeholder="eth0"></div>
|
||||
<button class="btn btn-primary" onclick="runDHCP()">▶ Run DHCP</button>
|
||||
<div id="dhcp-out" style="margin-top:10px;font-size:12px;color:#86efac"></div>
|
||||
</div></div>
|
||||
<div class="card"><div class="card-head">Static IPv4</div><div class="card-body">
|
||||
<div class="form-row"><label>Interface</label><input type="text" id="st-iface" placeholder="eth0"></div>
|
||||
<div class="form-row"><label>Address</label><input type="text" id="st-addr" placeholder="192.168.1.100"></div>
|
||||
<div class="form-row"><label>Prefix length</label><input type="text" id="st-prefix" placeholder="24"></div>
|
||||
<div class="form-row"><label>Gateway</label><input type="text" id="st-gw" placeholder="192.168.1.1"></div>
|
||||
<div class="form-row"><label>DNS (comma-separated)</label><input type="text" id="st-dns" placeholder="8.8.8.8,8.8.4.4"></div>
|
||||
<button class="btn btn-primary" onclick="setStatic()">Apply Static IP</button>
|
||||
<div id="static-out" style="margin-top:10px;font-size:12px;color:#86efac"></div>
|
||||
</div></div>
|
||||
</div>
|
||||
<script>
|
||||
function loadNetwork() {
|
||||
fetch('/api/network').then(r=>r.json()).then(d => {
|
||||
const rows = (d.interfaces||[]).map(i =>
|
||||
'<tr><td>'+i.Name+'</td><td><span class="badge '+(i.State==='up'?'badge-ok':'badge-warn')+'">'+i.State+'</span></td><td>'+(i.IPv4||[]).join(', ')+'</td></tr>'
|
||||
).join('');
|
||||
document.getElementById('iface-table').innerHTML =
|
||||
'<table><tr><th>Interface</th><th>State</th><th>Addresses</th></tr>'+rows+'</table>' +
|
||||
(d.default_route ? '<p style="font-size:12px;color:#64748b;margin-top:8px">Default route: '+d.default_route+'</p>' : '');
|
||||
});
|
||||
}
|
||||
function runDHCP() {
|
||||
const iface = document.getElementById('dhcp-iface').value.trim();
|
||||
fetch('/api/network/dhcp',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({interface:iface||'all'})})
|
||||
.then(r=>r.json()).then(d => {
|
||||
document.getElementById('dhcp-out').textContent = d.output || d.error || 'Done.';
|
||||
loadNetwork();
|
||||
});
|
||||
}
|
||||
function setStatic() {
|
||||
const dns = document.getElementById('st-dns').value.split(',').map(s=>s.trim()).filter(Boolean);
|
||||
fetch('/api/network/static',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({
|
||||
interface: document.getElementById('st-iface').value,
|
||||
address: document.getElementById('st-addr').value,
|
||||
prefix: document.getElementById('st-prefix').value,
|
||||
gateway: document.getElementById('st-gw').value,
|
||||
dns: dns,
|
||||
})}).then(r=>r.json()).then(d => {
|
||||
document.getElementById('static-out').textContent = d.output || d.error || 'Done.';
|
||||
loadNetwork();
|
||||
});
|
||||
}
|
||||
loadNetwork();
|
||||
</script>`
|
||||
}
|
||||
|
||||
// ── Services ──────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderServices() string {
|
||||
return `<div class="card"><div class="card-head">Bee Services <button class="btn btn-sm btn-secondary" onclick="loadServices()" style="margin-left:auto">↻ Refresh</button></div>
|
||||
<div class="card-body">
|
||||
<div id="svc-table"><p style="color:#64748b;font-size:13px">Loading...</p></div>
|
||||
</div></div>
|
||||
<div id="svc-out" style="display:none;margin-top:8px" class="card">
|
||||
<div class="card-head">Output</div>
|
||||
<div class="card-body" style="padding:10px"><div id="svc-terminal" class="terminal" style="max-height:150px"></div></div>
|
||||
</div>
|
||||
<script>
|
||||
function loadServices() {
|
||||
fetch('/api/services').then(r=>r.json()).then(svcs => {
|
||||
const rows = svcs.map(s => {
|
||||
const st = s.state||'unknown';
|
||||
const badge = st==='active' ? 'badge-ok' : st==='failed' ? 'badge-err' : 'badge-warn';
|
||||
const id = 'svc-body-'+s.name.replace(/[^a-z0-9]/g,'-');
|
||||
const body = (s.body||'').replace(/</g,'<').replace(/>/g,'>');
|
||||
return '<tr>' +
|
||||
'<td style="white-space:nowrap">'+s.name+'</td>' +
|
||||
'<td style="white-space:nowrap"><span class="badge '+badge+'" style="cursor:pointer" onclick="toggleBody(\''+id+'\')">'+st+' ▾</span>' +
|
||||
'<div id="'+id+'" style="display:none;margin-top:6px"><pre style="font-size:11px;white-space:pre-wrap;word-break:break-all;max-height:200px;overflow-y:auto;background:#0a0d14;padding:8px;border-radius:6px;color:#94a3b8">'+body+'</pre></div>' +
|
||||
'</td>' +
|
||||
'<td style="white-space:nowrap">' +
|
||||
'<button class="btn btn-sm btn-secondary" onclick="svcAction(\''+s.name+'\',\'start\')">Start</button> ' +
|
||||
'<button class="btn btn-sm btn-secondary" onclick="svcAction(\''+s.name+'\',\'stop\')">Stop</button> ' +
|
||||
'<button class="btn btn-sm btn-secondary" onclick="svcAction(\''+s.name+'\',\'restart\')">Restart</button>' +
|
||||
'</td></tr>';
|
||||
}).join('');
|
||||
document.getElementById('svc-table').innerHTML =
|
||||
'<table><tr><th>Service</th><th>Status</th><th>Actions</th></tr>'+rows+'</table>';
|
||||
});
|
||||
}
|
||||
function toggleBody(id) {
|
||||
const el = document.getElementById(id);
|
||||
if (el) el.style.display = el.style.display==='none' ? 'block' : 'none';
|
||||
}
|
||||
function svcAction(name, action) {
|
||||
fetch('/api/services/action',{method:'POST',headers:{'Content-Type':'application/json'},body:JSON.stringify({name,action})})
|
||||
.then(r=>r.json()).then(d => {
|
||||
document.getElementById('svc-out').style.display='block';
|
||||
document.getElementById('svc-terminal').textContent = d.output || d.error || action+' '+name;
|
||||
setTimeout(loadServices, 1000);
|
||||
});
|
||||
}
|
||||
loadServices();
|
||||
</script>`
|
||||
}
|
||||
|
||||
// ── Export ────────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderExport(exportDir string) string {
|
||||
entries, _ := listExportFiles(exportDir)
|
||||
var rows strings.Builder
|
||||
for _, e := range entries {
|
||||
rows.WriteString(fmt.Sprintf(`<tr><td><a href="/export/file?path=%s" target="_blank">%s</a></td></tr>`,
|
||||
url.QueryEscape(e), html.EscapeString(e)))
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
rows.WriteString(`<tr><td style="color:#64748b">No export files found.</td></tr>`)
|
||||
}
|
||||
return `<div class="grid2">
|
||||
<div class="card"><div class="card-head">Support Bundle</div><div class="card-body">
|
||||
<p style="font-size:13px;color:#94a3b8;margin-bottom:12px">Creates a tar.gz archive of all audit files, SAT results, and logs.</p>
|
||||
<a class="btn btn-primary" href="/export/support.tar.gz">⬇ Download Support Bundle</a>
|
||||
</div></div>
|
||||
<div class="card"><div class="card-head">Export Files</div><div class="card-body">
|
||||
<table><tr><th>File</th></tr>` + rows.String() + `</table>
|
||||
</div></div>
|
||||
</div>`
|
||||
}
|
||||
|
||||
func listExportFiles(exportDir string) ([]string, error) {
|
||||
var entries []string
|
||||
err := filepath.Walk(strings.TrimSpace(exportDir), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
rel, err := filepath.Rel(exportDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, rel)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
sort.Strings(entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ── Tools ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
func renderTools() string {
|
||||
return `<div class="card"><div class="card-head">Tool Check <button class="btn btn-sm btn-secondary" onclick="checkTools()" style="margin-left:auto">↻ Check</button></div>
|
||||
<div class="card-body"><div id="tools-table"><p style="color:#64748b;font-size:13px">Click Check to verify installed tools.</p></div></div></div>
|
||||
<script>
|
||||
function checkTools() {
|
||||
document.getElementById('tools-table').innerHTML = '<p style="color:#64748b;font-size:13px">Checking...</p>';
|
||||
fetch('/api/tools/check').then(r=>r.json()).then(tools => {
|
||||
const rows = tools.map(t =>
|
||||
'<tr><td>'+t.Name+'</td><td><span class="badge '+(t.OK ? 'badge-ok' : 'badge-err')+'">'+(t.OK ? '✓ '+t.Path : '✗ missing')+'</span></td></tr>'
|
||||
).join('');
|
||||
document.getElementById('tools-table').innerHTML =
|
||||
'<table><tr><th>Tool</th><th>Status</th></tr>'+rows+'</table>';
|
||||
});
|
||||
}
|
||||
checkTools();
|
||||
</script>`
|
||||
}
|
||||
|
||||
// ── Viewer (compatibility) ────────────────────────────────────────────────────
|
||||
|
||||
// renderViewerPage renders the audit snapshot as a styled HTML page.
|
||||
// This endpoint is embedded as an iframe on the Dashboard page.
|
||||
func renderViewerPage(title string, snapshot []byte) string {
|
||||
var b strings.Builder
|
||||
b.WriteString(`<!DOCTYPE html><html><head><meta charset="utf-8">`)
|
||||
b.WriteString(`<title>` + html.EscapeString(title) + `</title>`)
|
||||
b.WriteString(`<style>
|
||||
*{box-sizing:border-box;margin:0;padding:0}
|
||||
body{font-family:system-ui,sans-serif;background:#0f1117;color:#e2e8f0;padding:20px}
|
||||
h2{font-size:14px;color:#64748b;margin-bottom:8px;margin-top:16px;text-transform:uppercase;letter-spacing:.05em}
|
||||
.grid{display:grid;grid-template-columns:repeat(auto-fill,minmax(280px,1fr));gap:12px}
|
||||
.card{background:#161b25;border:1px solid #1e2535;border-radius:8px;padding:14px}
|
||||
.card-title{font-size:12px;color:#64748b;margin-bottom:6px}
|
||||
.card-value{font-size:15px;font-weight:600}
|
||||
.badge{display:inline-block;padding:2px 8px;border-radius:999px;font-size:11px;font-weight:600}
|
||||
.ok{background:#166534;color:#86efac}.warn{background:#713f12;color:#fde68a}.err{background:#7f1d1d;color:#fca5a5}
|
||||
pre{background:#0a0d14;border:1px solid #1e2535;border-radius:6px;padding:12px;font-size:11px;overflow-x:auto;color:#94a3b8;white-space:pre-wrap;word-break:break-word;max-height:400px;overflow-y:auto}
|
||||
</style></head><body>
|
||||
`)
|
||||
if len(snapshot) == 0 {
|
||||
b.WriteString(`<p style="color:#64748b">No audit snapshot available yet. Re-run audit from the Dashboard.</p>`)
|
||||
b.WriteString(`</body></html>`)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
var data map[string]any
|
||||
if err := json.Unmarshal(snapshot, &data); err != nil {
|
||||
// Fallback: render raw JSON
|
||||
b.WriteString(`<pre>` + html.EscapeString(string(snapshot)) + `</pre>`)
|
||||
b.WriteString(`</body></html>`)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Collected at
|
||||
if t, ok := data["collected_at"].(string); ok {
|
||||
b.WriteString(`<p style="font-size:12px;color:#64748b;margin-bottom:16px">Collected: ` + html.EscapeString(t) + `</p>`)
|
||||
}
|
||||
|
||||
// Hardware section
|
||||
hw, _ := data["hardware"].(map[string]any)
|
||||
if hw == nil {
|
||||
hw = data
|
||||
}
|
||||
|
||||
renderHWCards(&b, hw)
|
||||
|
||||
// Full JSON below
|
||||
b.WriteString(`<h2>Raw JSON</h2>`)
|
||||
pretty, _ := json.MarshalIndent(data, "", " ")
|
||||
b.WriteString(`<pre>` + html.EscapeString(string(pretty)) + `</pre>`)
|
||||
b.WriteString(`</body></html>`)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func renderHWCards(b *strings.Builder, hw map[string]any) {
|
||||
sections := []struct{ key, label string }{
|
||||
{"board", "Board"},
|
||||
{"cpus", "CPUs"},
|
||||
{"memory", "Memory"},
|
||||
{"storage", "Storage"},
|
||||
{"gpus", "GPUs"},
|
||||
{"nics", "NICs"},
|
||||
{"psus", "Power Supplies"},
|
||||
}
|
||||
for _, s := range sections {
|
||||
v, ok := hw[s.key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
b.WriteString(`<h2>` + s.label + `</h2><div class="grid">`)
|
||||
renderValue(b, v)
|
||||
b.WriteString(`</div>`)
|
||||
}
|
||||
}
|
||||
|
||||
func renderValue(b *strings.Builder, v any) {
|
||||
switch val := v.(type) {
|
||||
case []any:
|
||||
for _, item := range val {
|
||||
renderValue(b, item)
|
||||
}
|
||||
case map[string]any:
|
||||
b.WriteString(`<div class="card">`)
|
||||
for k, vv := range val {
|
||||
b.WriteString(fmt.Sprintf(`<div class="card-title">%s</div><div class="card-value">%s</div>`,
|
||||
html.EscapeString(k), html.EscapeString(fmt.Sprintf("%v", vv))))
|
||||
}
|
||||
b.WriteString(`</div>`)
|
||||
}
|
||||
}
|
||||
|
||||
// ── Export index (compatibility) ──────────────────────────────────────────────
|
||||
|
||||
func renderExportIndex(exportDir string) (string, error) {
|
||||
entries, err := listExportFiles(exportDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var body strings.Builder
|
||||
body.WriteString(`<!DOCTYPE html><html><head><meta charset="utf-8"><title>Bee Export Files</title></head><body>`)
|
||||
body.WriteString(`<h1>Bee Export Files</h1><ul>`)
|
||||
for _, entry := range entries {
|
||||
body.WriteString(`<li><a href="/export/file?path=` + url.QueryEscape(entry) + `">` + html.EscapeString(entry) + `</a></li>`)
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
body.WriteString(`<li>No export files found.</li>`)
|
||||
}
|
||||
body.WriteString(`</ul></body></html>`)
|
||||
return body.String(), nil
|
||||
}
|
||||
@@ -1,138 +1,396 @@
|
||||
package webui
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bee/audit/internal/app"
|
||||
"bee/audit/internal/runtimeenv"
|
||||
gocharts "github.com/go-analyze/charts"
|
||||
"reanimator/chart/viewer"
|
||||
chartweb "reanimator/chart/web"
|
||||
"reanimator/chart/web"
|
||||
)
|
||||
|
||||
const defaultTitle = "Bee Hardware Audit"
|
||||
|
||||
// HandlerOptions configures the web UI handler.
|
||||
type HandlerOptions struct {
|
||||
Title string
|
||||
AuditPath string
|
||||
ExportDir string
|
||||
Title string
|
||||
AuditPath string
|
||||
ExportDir string
|
||||
App *app.App
|
||||
RuntimeMode runtimeenv.Mode
|
||||
}
|
||||
|
||||
// metricsRing holds a rolling window of live metric samples.
|
||||
type metricsRing struct {
|
||||
mu sync.Mutex
|
||||
vals []float64
|
||||
labels []string
|
||||
size int
|
||||
}
|
||||
|
||||
func newMetricsRing(size int) *metricsRing {
|
||||
return &metricsRing{size: size, vals: make([]float64, 0, size), labels: make([]string, 0, size)}
|
||||
}
|
||||
|
||||
func (r *metricsRing) push(v float64) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if len(r.vals) >= r.size {
|
||||
r.vals = r.vals[1:]
|
||||
r.labels = r.labels[1:]
|
||||
}
|
||||
r.vals = append(r.vals, v)
|
||||
r.labels = append(r.labels, time.Now().Format("15:04"))
|
||||
}
|
||||
|
||||
func (r *metricsRing) snapshot() ([]float64, []string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
v := make([]float64, len(r.vals))
|
||||
l := make([]string, len(r.labels))
|
||||
copy(v, r.vals)
|
||||
copy(l, r.labels)
|
||||
return v, l
|
||||
}
|
||||
|
||||
// gpuRings holds per-GPU ring buffers.
|
||||
type gpuRings struct {
|
||||
Temp *metricsRing
|
||||
Util *metricsRing
|
||||
MemUtil *metricsRing
|
||||
Power *metricsRing
|
||||
}
|
||||
|
||||
// handler is the HTTP handler for the web UI.
|
||||
type handler struct {
|
||||
opts HandlerOptions
|
||||
mux *http.ServeMux
|
||||
// server rings
|
||||
ringCPUTemp *metricsRing
|
||||
ringCPULoad *metricsRing
|
||||
ringMemLoad *metricsRing
|
||||
ringPower *metricsRing
|
||||
ringFans []*metricsRing
|
||||
fanNames []string
|
||||
// per-GPU rings (index = GPU index)
|
||||
gpuRings []*gpuRings
|
||||
ringsMu sync.Mutex
|
||||
}
|
||||
|
||||
// NewHandler creates the HTTP mux with all routes.
|
||||
func NewHandler(opts HandlerOptions) http.Handler {
|
||||
title := strings.TrimSpace(opts.Title)
|
||||
if title == "" {
|
||||
title = defaultTitle
|
||||
if strings.TrimSpace(opts.Title) == "" {
|
||||
opts.Title = defaultTitle
|
||||
}
|
||||
if strings.TrimSpace(opts.ExportDir) == "" {
|
||||
opts.ExportDir = app.DefaultExportDir
|
||||
}
|
||||
if opts.RuntimeMode == "" {
|
||||
opts.RuntimeMode = runtimeenv.ModeAuto
|
||||
}
|
||||
|
||||
auditPath := strings.TrimSpace(opts.AuditPath)
|
||||
exportDir := strings.TrimSpace(opts.ExportDir)
|
||||
if exportDir == "" {
|
||||
exportDir = app.DefaultExportDir
|
||||
h := &handler{
|
||||
opts: opts,
|
||||
ringCPUTemp: newMetricsRing(120),
|
||||
ringCPULoad: newMetricsRing(120),
|
||||
ringMemLoad: newMetricsRing(120),
|
||||
ringPower: newMetricsRing(120),
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("GET /static/", http.StripPrefix("/static/", chartweb.Static()))
|
||||
mux.HandleFunc("GET /healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok"))
|
||||
})
|
||||
mux.HandleFunc("GET /audit.json", func(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := loadSnapshot(auditPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
http.Error(w, "audit snapshot not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("read audit snapshot: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
_, _ = w.Write(data)
|
||||
})
|
||||
mux.HandleFunc("GET /export/support.tar.gz", func(w http.ResponseWriter, r *http.Request) {
|
||||
archive, err := app.BuildSupportBundle(exportDir)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("build support bundle: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(archive)))
|
||||
http.ServeFile(w, r, archive)
|
||||
})
|
||||
mux.HandleFunc("GET /runtime-health.json", func(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := loadSnapshot(filepath.Join(exportDir, "runtime-health.json"))
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
http.Error(w, "runtime health not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("read runtime health: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
_, _ = w.Write(data)
|
||||
})
|
||||
mux.HandleFunc("GET /export/", func(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := renderExportIndex(exportDir)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("render export index: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = w.Write([]byte(body))
|
||||
})
|
||||
mux.HandleFunc("GET /export/file", func(w http.ResponseWriter, r *http.Request) {
|
||||
rel := strings.TrimSpace(r.URL.Query().Get("path"))
|
||||
if rel == "" {
|
||||
http.Error(w, "path is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
clean := filepath.Clean(rel)
|
||||
if clean == "." || strings.HasPrefix(clean, "..") {
|
||||
http.Error(w, "invalid path", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http.ServeFile(w, r, filepath.Join(exportDir, clean))
|
||||
})
|
||||
mux.HandleFunc("GET /viewer", func(w http.ResponseWriter, r *http.Request) {
|
||||
snapshot, err := loadSnapshot(auditPath)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
http.Error(w, fmt.Sprintf("read audit snapshot: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
html, err := viewer.RenderHTML(snapshot, title)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("render snapshot: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = w.Write(html)
|
||||
})
|
||||
mux.HandleFunc("GET /", func(w http.ResponseWriter, r *http.Request) {
|
||||
noticeTitle, noticeBody := runtimeNotice(filepath.Join(exportDir, "runtime-health.json"))
|
||||
body := renderShellPage(title, noticeTitle, noticeBody)
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = w.Write([]byte(body))
|
||||
})
|
||||
|
||||
// ── Infrastructure ──────────────────────────────────────────────────────
|
||||
mux.HandleFunc("GET /healthz", h.handleHealthz)
|
||||
|
||||
// ── Existing read-only endpoints (preserved for compatibility) ──────────
|
||||
mux.HandleFunc("GET /audit.json", h.handleAuditJSON)
|
||||
mux.HandleFunc("GET /runtime-health.json", h.handleRuntimeHealthJSON)
|
||||
mux.HandleFunc("GET /export/support.tar.gz", h.handleSupportBundleDownload)
|
||||
mux.HandleFunc("GET /export/file", h.handleExportFile)
|
||||
mux.HandleFunc("GET /export/", h.handleExportIndex)
|
||||
mux.HandleFunc("GET /viewer", h.handleViewer)
|
||||
|
||||
// ── API ──────────────────────────────────────────────────────────────────
|
||||
// Audit
|
||||
mux.HandleFunc("POST /api/audit/run", h.handleAPIAuditRun)
|
||||
mux.HandleFunc("GET /api/audit/stream", h.handleAPIAuditStream)
|
||||
|
||||
// SAT
|
||||
mux.HandleFunc("POST /api/sat/nvidia/run", h.handleAPISATRun("nvidia"))
|
||||
mux.HandleFunc("POST /api/sat/memory/run", h.handleAPISATRun("memory"))
|
||||
mux.HandleFunc("POST /api/sat/storage/run", h.handleAPISATRun("storage"))
|
||||
mux.HandleFunc("POST /api/sat/cpu/run", h.handleAPISATRun("cpu"))
|
||||
mux.HandleFunc("GET /api/sat/stream", h.handleAPISATStream)
|
||||
|
||||
// Services
|
||||
mux.HandleFunc("GET /api/services", h.handleAPIServicesList)
|
||||
mux.HandleFunc("POST /api/services/action", h.handleAPIServicesAction)
|
||||
|
||||
// Network
|
||||
mux.HandleFunc("GET /api/network", h.handleAPINetworkStatus)
|
||||
mux.HandleFunc("POST /api/network/dhcp", h.handleAPINetworkDHCP)
|
||||
mux.HandleFunc("POST /api/network/static", h.handleAPINetworkStatic)
|
||||
|
||||
// Export
|
||||
mux.HandleFunc("GET /api/export/list", h.handleAPIExportList)
|
||||
mux.HandleFunc("POST /api/export/bundle", h.handleAPIExportBundle)
|
||||
|
||||
// Tools
|
||||
mux.HandleFunc("GET /api/tools/check", h.handleAPIToolsCheck)
|
||||
|
||||
// Preflight
|
||||
mux.HandleFunc("GET /api/preflight", h.handleAPIPreflight)
|
||||
|
||||
// Metrics — SSE stream of live sensor data + server-side SVG charts
|
||||
mux.HandleFunc("GET /api/metrics/stream", h.handleAPIMetricsStream)
|
||||
mux.HandleFunc("GET /api/metrics/chart/", h.handleMetricsChartSVG)
|
||||
|
||||
// Reanimator chart static assets
|
||||
mux.Handle("GET /chart/static/", http.StripPrefix("/chart/static/", web.Static()))
|
||||
|
||||
// ── Pages ────────────────────────────────────────────────────────────────
|
||||
mux.HandleFunc("GET /", h.handlePage)
|
||||
|
||||
h.mux = mux
|
||||
return mux
|
||||
}
|
||||
|
||||
// ListenAndServe starts the HTTP server.
|
||||
func ListenAndServe(addr string, opts HandlerOptions) error {
|
||||
return http.ListenAndServe(addr, NewHandler(opts))
|
||||
}
|
||||
|
||||
// ── Infrastructure handlers ──────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok"))
|
||||
}
|
||||
|
||||
// ── Compatibility endpoints ──────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handleAuditJSON(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := loadSnapshot(h.opts.AuditPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
http.Error(w, "audit snapshot not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("read audit snapshot: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
_, _ = w.Write(data)
|
||||
}
|
||||
|
||||
func (h *handler) handleRuntimeHealthJSON(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := loadSnapshot(filepath.Join(h.opts.ExportDir, "runtime-health.json"))
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
http.Error(w, "runtime health not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
http.Error(w, fmt.Sprintf("read runtime health: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
_, _ = w.Write(data)
|
||||
}
|
||||
|
||||
func (h *handler) handleSupportBundleDownload(w http.ResponseWriter, r *http.Request) {
|
||||
archive, err := app.BuildSupportBundle(h.opts.ExportDir)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("build support bundle: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "application/gzip")
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filepath.Base(archive)))
|
||||
http.ServeFile(w, r, archive)
|
||||
}
|
||||
|
||||
func (h *handler) handleExportFile(w http.ResponseWriter, r *http.Request) {
|
||||
rel := strings.TrimSpace(r.URL.Query().Get("path"))
|
||||
if rel == "" {
|
||||
http.Error(w, "path is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
clean := filepath.Clean(rel)
|
||||
if clean == "." || strings.HasPrefix(clean, "..") {
|
||||
http.Error(w, "invalid path", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http.ServeFile(w, r, filepath.Join(h.opts.ExportDir, clean))
|
||||
}
|
||||
|
||||
func (h *handler) handleExportIndex(w http.ResponseWriter, r *http.Request) {
|
||||
body, err := renderExportIndex(h.opts.ExportDir)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("render export index: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = w.Write([]byte(body))
|
||||
}
|
||||
|
||||
func (h *handler) handleViewer(w http.ResponseWriter, r *http.Request) {
|
||||
snapshot, _ := loadSnapshot(h.opts.AuditPath)
|
||||
body, err := viewer.RenderHTML(snapshot, h.opts.Title)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = w.Write(body)
|
||||
}
|
||||
|
||||
func (h *handler) handleMetricsChartSVG(w http.ResponseWriter, r *http.Request) {
|
||||
path := strings.TrimPrefix(r.URL.Path, "/api/metrics/chart/")
|
||||
path = strings.TrimSuffix(path, ".svg")
|
||||
|
||||
var datasets [][]float64
|
||||
var names []string
|
||||
var labels []string
|
||||
var title string
|
||||
|
||||
switch {
|
||||
case path == "server":
|
||||
title = "Server"
|
||||
vCPUTemp, l := h.ringCPUTemp.snapshot()
|
||||
vCPULoad, _ := h.ringCPULoad.snapshot()
|
||||
vMemLoad, _ := h.ringMemLoad.snapshot()
|
||||
vPower, _ := h.ringPower.snapshot()
|
||||
labels = l
|
||||
datasets = [][]float64{vCPUTemp, vCPULoad, vMemLoad, vPower}
|
||||
names = []string{"CPU Temp °C", "CPU Load %", "Mem Load %", "Power W"}
|
||||
|
||||
h.ringsMu.Lock()
|
||||
for i, fr := range h.ringFans {
|
||||
fv, _ := fr.snapshot()
|
||||
datasets = append(datasets, fv)
|
||||
name := "Fan"
|
||||
if i < len(h.fanNames) {
|
||||
name = h.fanNames[i]
|
||||
}
|
||||
names = append(names, name+" RPM")
|
||||
}
|
||||
h.ringsMu.Unlock()
|
||||
|
||||
case strings.HasPrefix(path, "gpu/"):
|
||||
idxStr := strings.TrimPrefix(path, "gpu/")
|
||||
idx := 0
|
||||
fmt.Sscanf(idxStr, "%d", &idx)
|
||||
h.ringsMu.Lock()
|
||||
var gr *gpuRings
|
||||
if idx < len(h.gpuRings) {
|
||||
gr = h.gpuRings[idx]
|
||||
}
|
||||
h.ringsMu.Unlock()
|
||||
if gr == nil {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
vTemp, l := gr.Temp.snapshot()
|
||||
vUtil, _ := gr.Util.snapshot()
|
||||
vMemUtil, _ := gr.MemUtil.snapshot()
|
||||
vPower, _ := gr.Power.snapshot()
|
||||
labels = l
|
||||
title = fmt.Sprintf("GPU %d", idx)
|
||||
datasets = [][]float64{vTemp, vUtil, vMemUtil, vPower}
|
||||
names = []string{"Temp °C", "Load %", "Mem %", "Power W"}
|
||||
|
||||
default:
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure all datasets same length as labels
|
||||
n := len(labels)
|
||||
if n == 0 {
|
||||
n = 1
|
||||
labels = []string{""}
|
||||
}
|
||||
for i := range datasets {
|
||||
if len(datasets[i]) == 0 {
|
||||
datasets[i] = make([]float64, n)
|
||||
}
|
||||
}
|
||||
|
||||
sparse := sparseLabels(labels, 6)
|
||||
|
||||
opt := gocharts.NewLineChartOptionWithData(datasets)
|
||||
opt.Title = gocharts.TitleOption{Text: title}
|
||||
opt.XAxis.Labels = sparse
|
||||
opt.Legend = gocharts.LegendOption{SeriesNames: names}
|
||||
|
||||
p := gocharts.NewPainter(gocharts.PainterOptions{
|
||||
OutputFormat: gocharts.ChartOutputSVG,
|
||||
Width: 1400,
|
||||
Height: 280,
|
||||
}, gocharts.PainterThemeOption(gocharts.GetTheme("grafana")))
|
||||
if err := p.LineChart(opt); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
buf, err := p.Bytes()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "image/svg+xml")
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
_, _ = w.Write(buf)
|
||||
}
|
||||
|
||||
func safeIdx(s []float64, i int) float64 {
|
||||
if i < len(s) {
|
||||
return s[i]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func sparseLabels(labels []string, n int) []string {
|
||||
out := make([]string, len(labels))
|
||||
step := len(labels) / n
|
||||
if step < 1 {
|
||||
step = 1
|
||||
}
|
||||
for i, l := range labels {
|
||||
if i%step == 0 {
|
||||
out[i] = l
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ── Page handler ─────────────────────────────────────────────────────────────
|
||||
|
||||
func (h *handler) handlePage(w http.ResponseWriter, r *http.Request) {
|
||||
page := strings.TrimPrefix(r.URL.Path, "/")
|
||||
if page == "" {
|
||||
page = "dashboard"
|
||||
}
|
||||
body := renderPage(page, h.opts)
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = w.Write([]byte(body))
|
||||
}
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
func loadSnapshot(path string) ([]byte, error) {
|
||||
if strings.TrimSpace(path) == "" {
|
||||
return nil, os.ErrNotExist
|
||||
@@ -140,101 +398,17 @@ func loadSnapshot(path string) ([]byte, error) {
|
||||
return os.ReadFile(path)
|
||||
}
|
||||
|
||||
func runtimeNotice(path string) (string, string) {
|
||||
health, err := app.ReadRuntimeHealth(path)
|
||||
if err != nil {
|
||||
return "Runtime Health", "No runtime health snapshot found yet."
|
||||
}
|
||||
body := fmt.Sprintf("Status: %s. Export dir: %s. Driver ready: %t. CUDA ready: %t. Network: %s. Export files: /export/",
|
||||
firstNonEmpty(health.Status, "UNKNOWN"),
|
||||
firstNonEmpty(health.ExportDir, app.DefaultExportDir),
|
||||
health.DriverReady,
|
||||
health.CUDAReady,
|
||||
firstNonEmpty(health.NetworkStatus, "UNKNOWN"),
|
||||
)
|
||||
if len(health.Issues) > 0 {
|
||||
body += " Issues: "
|
||||
parts := make([]string, 0, len(health.Issues))
|
||||
for _, issue := range health.Issues {
|
||||
parts = append(parts, issue.Code)
|
||||
}
|
||||
body += strings.Join(parts, ", ")
|
||||
}
|
||||
return "Runtime Health", body
|
||||
// writeJSON sends v as JSON with status 200.
|
||||
func writeJSON(w http.ResponseWriter, v any) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
func renderExportIndex(exportDir string) (string, error) {
|
||||
var entries []string
|
||||
err := filepath.Walk(strings.TrimSpace(exportDir), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
rel, err := filepath.Rel(exportDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, rel)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return "", err
|
||||
}
|
||||
sort.Strings(entries)
|
||||
var body strings.Builder
|
||||
body.WriteString("<!DOCTYPE html><html><head><meta charset=\"utf-8\"><title>Bee Export Files</title></head><body>")
|
||||
body.WriteString("<h1>Bee Export Files</h1><ul>")
|
||||
for _, entry := range entries {
|
||||
body.WriteString("<li><a href=\"/export/file?path=" + url.QueryEscape(entry) + "\">" + html.EscapeString(entry) + "</a></li>")
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
body.WriteString("<li>No export files found.</li>")
|
||||
}
|
||||
body.WriteString("</ul></body></html>")
|
||||
return body.String(), nil
|
||||
}
|
||||
|
||||
func renderShellPage(title, noticeTitle, noticeBody string) string {
|
||||
var body strings.Builder
|
||||
body.WriteString("<!DOCTYPE html><html><head><meta charset=\"utf-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">")
|
||||
body.WriteString("<title>" + html.EscapeString(title) + "</title>")
|
||||
body.WriteString(`<style>
|
||||
body{margin:0;font-family:system-ui,-apple-system,BlinkMacSystemFont,"Segoe UI",sans-serif;background:#f4f1ea;color:#1b1b18}
|
||||
.shell{min-height:100vh;display:grid;grid-template-rows:auto auto 1fr}
|
||||
.header{padding:18px 20px 12px;border-bottom:1px solid rgba(0,0,0,.08);background:#fbf8f2}
|
||||
.header h1{margin:0;font-size:24px}
|
||||
.header p{margin:6px 0 0;color:#5a5a52}
|
||||
.actions{display:flex;flex-wrap:wrap;gap:10px;padding:12px 20px;background:#fbf8f2}
|
||||
.actions a{display:inline-block;text-decoration:none;padding:10px 14px;border-radius:999px;background:#1f5f4a;color:#fff;font-weight:600}
|
||||
.actions a.secondary{background:#d8e5dd;color:#17372b}
|
||||
.notice{margin:16px 20px 0;padding:14px 16px;border-radius:14px;background:#fff7df;border:1px solid #ead9a4}
|
||||
.notice h2{margin:0 0 6px;font-size:16px}
|
||||
.notice p{margin:0;color:#4f4a37}
|
||||
.viewer-wrap{padding:16px 20px 20px}
|
||||
.viewer{width:100%;height:calc(100vh - 170px);border:0;border-radius:18px;background:#fff;box-shadow:0 12px 40px rgba(0,0,0,.08)}
|
||||
@media (max-width:720px){.viewer{height:calc(100vh - 240px)}}
|
||||
</style></head><body><div class="shell">`)
|
||||
body.WriteString("<header class=\"header\"><h1>" + html.EscapeString(title) + "</h1><p>Audit viewer with support bundle and raw export access.</p></header>")
|
||||
body.WriteString("<nav class=\"actions\">")
|
||||
body.WriteString("<a href=\"/export/support.tar.gz\">Download support bundle</a>")
|
||||
body.WriteString("<a class=\"secondary\" href=\"/audit.json\">Open audit.json</a>")
|
||||
body.WriteString("<a class=\"secondary\" href=\"/runtime-health.json\">Open runtime-health.json</a>")
|
||||
body.WriteString("<a class=\"secondary\" href=\"/export/\">Browse export files</a>")
|
||||
body.WriteString("</nav>")
|
||||
if strings.TrimSpace(noticeTitle) != "" {
|
||||
body.WriteString("<section class=\"notice\"><h2>" + html.EscapeString(noticeTitle) + "</h2><p>" + html.EscapeString(noticeBody) + "</p></section>")
|
||||
}
|
||||
body.WriteString("<main class=\"viewer-wrap\"><iframe class=\"viewer\" src=\"/viewer\" loading=\"eager\" referrerpolicy=\"same-origin\"></iframe></main>")
|
||||
body.WriteString("</div></body></html>")
|
||||
return body.String()
|
||||
}
|
||||
|
||||
func firstNonEmpty(value, fallback string) string {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return fallback
|
||||
}
|
||||
return value
|
||||
// writeError sends a JSON error response.
|
||||
func writeError(w http.ResponseWriter, status int, msg string) {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.Header().Set("Cache-Control", "no-store")
|
||||
w.WriteHeader(status)
|
||||
_ = json.NewEncoder(w).Encode(map[string]string{"error": msg})
|
||||
}
|
||||
|
||||
2
bible
2
bible
Submodule bible updated: 688b87e98d...456c1f022c
38
bible-local/architecture/charting.md
Normal file
38
bible-local/architecture/charting.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Charting architecture
|
||||
|
||||
## Decision: one chart engine for all live metrics
|
||||
|
||||
**Engine:** `github.com/go-analyze/charts` (pure Go, no CGO, SVG output)
|
||||
**Theme:** `grafana` (dark background, coloured lines)
|
||||
|
||||
All live metrics charts in the web UI are server-side SVG images served by Go
|
||||
and polled by the browser every 2 seconds via `<img src="...?t=now">`.
|
||||
There is no client-side canvas or JS chart library.
|
||||
|
||||
### Why go-analyze/charts
|
||||
|
||||
- Pure Go, no CGO — builds cleanly inside the live-build container
|
||||
- SVG output — crisp at any display resolution, full-width without pixelation
|
||||
- Grafana theme matches the dark web UI colour scheme
|
||||
- Active fork of the archived wcharczuk/go-chart
|
||||
|
||||
### SAT stress-test charts
|
||||
|
||||
The `drawGPUChartSVG` function in `platform/gpu_metrics.go` is a separate
|
||||
self-contained SVG renderer used **only** for completed SAT run reports
|
||||
(HTML export, burn-in summaries). It is not used for live metrics.
|
||||
|
||||
### Live metrics chart endpoints
|
||||
|
||||
| Path | Content |
|
||||
|------|---------|
|
||||
| `GET /api/metrics/chart/server.svg` | CPU temp, CPU load %, mem load %, power W, fan RPMs |
|
||||
| `GET /api/metrics/chart/gpu/{idx}.svg` | GPU temp °C, load %, mem %, power W |
|
||||
|
||||
Charts are 1400 × 280 px SVG. The page renders them at `width: 100%` in a
|
||||
single-column layout so they always fill the viewport width.
|
||||
|
||||
### Ring buffers
|
||||
|
||||
Each metric is stored in a 120-sample ring buffer (2 minutes of history at 1 Hz).
|
||||
Buffers are per-server or per-GPU and grow dynamically as new GPUs appear.
|
||||
@@ -9,6 +9,8 @@ DHCP is used only for LAN (operator SSH access). Internet is NOT available.
|
||||
|
||||
## Boot sequence (single ISO)
|
||||
|
||||
The live system is expected to boot with `toram`, so `live-boot` copies the full read-only medium into RAM before mounting the root filesystem. After that point, runtime must not depend on the original USB/BMC virtual media staying readable.
|
||||
|
||||
`systemd` boot order:
|
||||
|
||||
```
|
||||
@@ -20,11 +22,12 @@ local-fs.target
|
||||
│ creates /dev/nvidia* nodes)
|
||||
├── bee-audit.service (runs `bee audit` → /var/log/bee-audit.json,
|
||||
│ never blocks boot on partial collector failures)
|
||||
└── bee-web.service (runs `bee web` on :80,
|
||||
reads the latest audit snapshot on each request)
|
||||
├── bee-web.service (runs `bee web` on :80 — full interactive web UI)
|
||||
└── bee-desktop.service (startx → openbox + chromium http://localhost/)
|
||||
```
|
||||
|
||||
**Critical invariants:**
|
||||
- The live ISO boots with `boot=live toram`. Runtime binaries must continue working even if the original boot media disappears after early boot.
|
||||
- OpenSSH MUST start without network. `bee-sshsetup.service` runs before `ssh.service`.
|
||||
- `bee-network.service` uses `dhclient -nw` (background) — network bring-up is best effort and non-blocking.
|
||||
- `bee-nvidia.service` loads modules via `insmod` with absolute paths — NOT `modprobe`.
|
||||
@@ -41,17 +44,21 @@ Local-console behavior:
|
||||
```text
|
||||
tty1
|
||||
└── live-config autologin → bee
|
||||
└── /home/bee/.profile
|
||||
└── exec menu
|
||||
└── /usr/local/bin/bee-tui
|
||||
└── sudo -n /usr/local/bin/bee tui --runtime livecd
|
||||
└── /home/bee/.profile (prints web UI URLs)
|
||||
|
||||
display :0
|
||||
└── bee-desktop.service (User=bee)
|
||||
└── startx /usr/local/bin/bee-openbox-session -- :0
|
||||
├── tint2 (taskbar)
|
||||
├── chromium http://localhost/
|
||||
└── openbox (WM)
|
||||
```
|
||||
|
||||
Rules:
|
||||
- local `tty1` lands in user `bee`, not directly in `root`
|
||||
- `menu` must work without typing `sudo`
|
||||
- TUI actions still run as `root` via `sudo -n`
|
||||
- SSH is independent from the tty1 path
|
||||
- `bee-desktop.service` starts X11 + openbox + Chromium automatically after `bee-web.service`
|
||||
- Chromium opens `http://localhost/` — the full interactive web UI
|
||||
- SSH is independent from the desktop path
|
||||
- serial console support is enabled for VM boot debugging
|
||||
|
||||
## ISO build sequence
|
||||
@@ -71,24 +78,39 @@ build-in-container.sh [--authorized-keys /path/to/keys]
|
||||
d. build kernel modules against Debian headers
|
||||
e. create `libnvidia-ml.so.1` / `libcuda.so.1` symlinks in cache
|
||||
f. cache in `dist/nvidia-<version>-<kver>/`
|
||||
7. inject NVIDIA `.ko` → staged `/usr/local/lib/nvidia/`
|
||||
8. inject `nvidia-smi` → staged `/usr/local/bin/nvidia-smi`
|
||||
9. inject `libnvidia-ml` + `libcuda` → staged `/usr/lib/`
|
||||
10. write staged `/etc/bee-release` (versions + git commit)
|
||||
11. patch staged `motd` with build metadata
|
||||
12. copy `iso/builder/` into a temporary live-build workdir under `dist/`
|
||||
13. sync staged overlay into workdir `config/includes.chroot/`
|
||||
14. run `lb config && lb build` inside the privileged builder container
|
||||
7. `build-cublas.sh`:
|
||||
a. download `libcublas`, `libcublasLt`, `libcudart` runtime + dev packages from the NVIDIA CUDA Debian repo
|
||||
b. verify packages against repo `Packages.gz`
|
||||
c. extract headers for `bee-gpu-stress` build
|
||||
d. cache userspace libs in `dist/cublas-<version>+cuda<series>/`
|
||||
8. build `bee-gpu-stress` against extracted cuBLASLt/cudart headers
|
||||
9. inject NVIDIA `.ko` → staged `/usr/local/lib/nvidia/`
|
||||
10. inject `nvidia-smi` → staged `/usr/local/bin/nvidia-smi`
|
||||
11. inject `libnvidia-ml` + `libcuda` + `libcublas` + `libcublasLt` + `libcudart` → staged `/usr/lib/`
|
||||
12. write staged `/etc/bee-release` (versions + git commit)
|
||||
13. patch staged `motd` with build metadata
|
||||
14. copy `iso/builder/` into a temporary live-build workdir under `dist/`
|
||||
15. sync staged overlay into workdir `config/includes.chroot/`
|
||||
16. run `lb config && lb build` inside the privileged builder container
|
||||
```
|
||||
|
||||
Build host notes:
|
||||
- `build-in-container.sh` targets `linux/amd64` builder containers by default, including Docker Desktop on macOS / Apple Silicon.
|
||||
- Override with `BEE_BUILDER_PLATFORM=<os/arch>` only if you intentionally need a different container platform.
|
||||
- If the local builder image under the same tag was previously built for the wrong architecture, the script rebuilds it automatically.
|
||||
|
||||
**Critical invariants:**
|
||||
- `DEBIAN_KERNEL_ABI` in `iso/builder/VERSIONS` pins the exact kernel ABI used in BOTH places:
|
||||
1. `build-in-container.sh` / `build-nvidia-module.sh` — Debian kernel headers for module build
|
||||
2. `auto/config` — `linux-image-${DEBIAN_KERNEL_ABI}` in the ISO
|
||||
- NVIDIA modules go to staged `usr/local/lib/nvidia/` — NOT to `/lib/modules/<kver>/extra/`.
|
||||
- `bee-gpu-stress` must be built against cached CUDA userspace headers from `build-cublas.sh`, not against random host-installed CUDA headers.
|
||||
- The live ISO must ship `libcublas`, `libcublasLt`, and `libcudart` together with `libcuda` so tensor-core stress works without internet or package installs at boot.
|
||||
- The source overlay in `iso/overlay/` is treated as immutable source. Build-time files are injected only into the staged overlay.
|
||||
- The live-build workdir under `dist/` is disposable; source files under `iso/builder/` stay clean.
|
||||
- Container build requires `--privileged` because `live-build` uses mounts/chroots/loop devices during ISO assembly.
|
||||
- On macOS / Docker Desktop, the builder still must run as `linux/amd64` so the shipped ISO binaries remain `amd64`.
|
||||
- Operators must provision enough RAM to hold the full compressed live medium plus normal runtime overhead, because `toram` copies the entire read-only ISO payload into memory before the system reaches steady state.
|
||||
|
||||
## Post-boot smoke test
|
||||
|
||||
@@ -131,10 +153,15 @@ Current validation state:
|
||||
Every collector returns `nil, nil` on tool-not-found. Errors are logged, never fatal.
|
||||
|
||||
Acceptance flows:
|
||||
- `bee sat nvidia` → diagnostic archive with `nvidia-smi -q` + `nvidia-bug-report` + lightweight `bee-gpu-stress`
|
||||
- `bee sat nvidia` → diagnostic archive with `nvidia-smi -q` + `nvidia-bug-report` + mixed-precision `bee-gpu-stress`
|
||||
- `bee sat memory` → `memtester` archive
|
||||
- `bee sat storage` → SMART/NVMe diagnostic archive and short self-test trigger where supported
|
||||
- SAT `summary.txt` now includes `overall_status` and per-job `*_status` values (`OK`, `FAILED`, `UNSUPPORTED`)
|
||||
- `bee-gpu-stress` should prefer cuBLASLt GEMM load over the old integer/PTX burn path:
|
||||
- Ampere: `fp16` + `fp32`/TF32 tensor-core load
|
||||
- Ada / Hopper: add `fp8`
|
||||
- Blackwell+: add `fp4`
|
||||
- PTX fallback is only for missing cuBLASLt/userspace or unsupported narrow datatypes
|
||||
- Runtime overrides:
|
||||
- `BEE_GPU_STRESS_SECONDS`
|
||||
- `BEE_GPU_STRESS_SIZE_MB`
|
||||
|
||||
@@ -21,13 +21,14 @@ Fills gaps where Redfish/logpile is blind:
|
||||
- Read-only hardware inventory: board, CPU, memory, storage, PCIe, PSU, GPU, NIC, RAID
|
||||
- Machine-readable health summary derived from collector verdicts
|
||||
- Operator-triggered acceptance tests for NVIDIA, memory, and storage
|
||||
- NVIDIA SAT includes both diagnostic collection and lightweight GPU stress via `bee-gpu-stress`
|
||||
- NVIDIA SAT includes both diagnostic collection and mixed-precision GPU stress via `bee-gpu-stress`
|
||||
- `bee-gpu-stress` should exercise tensor/inference paths (`fp16`, `fp32`/TF32, `fp8`, `fp4` when supported by the GPU/userspace stack) and fall back to Driver API PTX burn only if cuBLASLt is unavailable
|
||||
- Automatic boot audit with operator-facing local console and SSH access
|
||||
- NVIDIA proprietary driver loaded at boot for GPU enrichment via `nvidia-smi`
|
||||
- SSH access (OpenSSH) always available for inspection and debugging
|
||||
- Interactive Go TUI via `bee tui` for network setup, service management, and acceptance tests
|
||||
- Read-only web viewer via `bee web`, rendering the latest audit snapshot through the embedded Reanimator Chart
|
||||
- Local `tty1` operator UX: `bee` autologin, `menu` auto-start, privileged actions via `sudo -n`
|
||||
- Full web UI via `bee web` on port 80: interactive control panel with live metrics, SAT tests, network config, service management, export, and tools
|
||||
- Local operator desktop: openbox + Xorg + Chromium auto-opening `http://localhost/`
|
||||
- Local `tty1` operator UX: `bee` autologin, openbox desktop auto-starts with Chromium on `http://localhost/`
|
||||
|
||||
## Network isolation — CRITICAL
|
||||
|
||||
@@ -69,15 +70,18 @@ Fills gaps where Redfish/logpile is blind:
|
||||
| SSH | OpenSSH server |
|
||||
| NVIDIA driver | Proprietary `.run` installer, built against Debian kernel headers |
|
||||
| NVIDIA modules | Loaded via `insmod` from `/usr/local/lib/nvidia/` |
|
||||
| GPU stress backend | `bee-gpu-stress` + cuBLASLt/cuBLAS/cudart mixed-precision GEMM, with Driver API PTX fallback |
|
||||
| Builder | Debian 12 host/VM or Debian 12 container image |
|
||||
|
||||
## Operator UX
|
||||
|
||||
- On the live ISO, `tty1` autologins as `bee`
|
||||
- The login profile auto-runs `menu`, which enters the Go TUI
|
||||
- The TUI itself executes privileged actions as `root` via `sudo -n`
|
||||
- `bee-desktop.service` starts X11 + openbox + Chromium on display `:0`
|
||||
- Chromium opens `http://localhost/` — the full web UI
|
||||
- SSH remains available independently of the local console path
|
||||
- Remote operators can open `http://<ip>/` in any browser on the same LAN
|
||||
- VM-oriented builds also include `qemu-guest-agent` and serial console support for debugging
|
||||
- The ISO boots with `toram`, so loss of the original USB/BMC virtual media after boot should not break already-installed runtime binaries
|
||||
|
||||
## Runtime split
|
||||
|
||||
@@ -85,6 +89,7 @@ Fills gaps where Redfish/logpile is blind:
|
||||
- Live-ISO-only responsibilities stay in `iso/` integration code
|
||||
- Live ISO launches the Go CLI with `--runtime livecd`
|
||||
- Local/manual runs use `--runtime auto` or `--runtime local`
|
||||
- Live ISO targets must have enough RAM for the full compressed live medium plus runtime working set because the boot medium is copied into memory at startup
|
||||
|
||||
## Key paths
|
||||
|
||||
@@ -99,7 +104,10 @@ Fills gaps where Redfish/logpile is blind:
|
||||
| `internal/chart/` | Git submodule with `reanimator/chart`, embedded into `bee web` |
|
||||
| `iso/builder/VERSIONS` | Pinned versions: Debian, Go, NVIDIA driver, kernel ABI |
|
||||
| `iso/builder/smoketest.sh` | Post-boot smoke test — run via SSH to verify live ISO |
|
||||
| `iso/overlay/etc/profile.d/bee.sh` | `menu` helper + tty1 auto-start policy |
|
||||
| `iso/overlay/home/bee/.profile` | `bee` shell profile for local console startup |
|
||||
| `iso/overlay/etc/profile.d/bee.sh` | tty1 welcome message with web UI URLs |
|
||||
| `iso/overlay/home/bee/.profile` | `bee` shell profile (PATH only) |
|
||||
| `iso/overlay/etc/systemd/system/bee-desktop.service` | starts X11 + openbox + chromium |
|
||||
| `iso/overlay/usr/local/bin/bee-desktop` | startx wrapper for bee-desktop.service |
|
||||
| `iso/overlay/usr/local/bin/bee-openbox-session` | xinitrc: tint2 + chromium + openbox |
|
||||
| `dist/` | Build outputs (gitignored) |
|
||||
| `iso/out/` | Downloaded ISO files (gitignored) |
|
||||
|
||||
58
iso/README.md
Normal file
58
iso/README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# ISO Build
|
||||
|
||||
`bee` ISO is built inside a Debian 12 builder container via `iso/builder/build-in-container.sh`.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Docker Desktop or another Docker-compatible container runtime
|
||||
- Privileged containers enabled
|
||||
- Enough free disk space for builder cache, Debian live-build artifacts, NVIDIA driver cache, and CUDA userspace packages
|
||||
|
||||
## Build On macOS
|
||||
|
||||
From the repository root:
|
||||
|
||||
```sh
|
||||
sh iso/builder/build-in-container.sh
|
||||
```
|
||||
|
||||
The script defaults to `linux/amd64` builder containers, so it works on:
|
||||
|
||||
- Intel Mac
|
||||
- Apple Silicon (`M1` / `M2` / `M3` / `M4`) via Docker Desktop's Linux VM
|
||||
|
||||
You do not need to pass `--platform` manually for normal ISO builds.
|
||||
|
||||
## Useful Options
|
||||
|
||||
Build with explicit SSH keys baked into the ISO:
|
||||
|
||||
```sh
|
||||
sh iso/builder/build-in-container.sh --authorized-keys ~/.ssh/id_ed25519.pub
|
||||
```
|
||||
|
||||
Rebuild the builder image:
|
||||
|
||||
```sh
|
||||
sh iso/builder/build-in-container.sh --rebuild-image
|
||||
```
|
||||
|
||||
Use a custom cache directory:
|
||||
|
||||
```sh
|
||||
sh iso/builder/build-in-container.sh --cache-dir /path/to/cache
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The builder image is automatically rebuilt if the local tag exists for the wrong architecture.
|
||||
- The live ISO boots with Debian `live-boot` `toram`, so the read-only medium is copied into RAM during boot and the runtime no longer depends on the original USB/BMC virtual media staying present.
|
||||
- Target systems need enough RAM for the full compressed live medium plus normal runtime overhead, or boot may fail before reaching the TUI.
|
||||
- Override the container platform only if you know why:
|
||||
|
||||
```sh
|
||||
BEE_BUILDER_PLATFORM=linux/amd64 sh iso/builder/build-in-container.sh
|
||||
```
|
||||
|
||||
- The shipped ISO is still `amd64`.
|
||||
- Output ISO artifacts are written under `dist/`.
|
||||
@@ -26,6 +26,20 @@ RUN apt-get update -qq && apt-get install -y \
|
||||
linux-headers-amd64 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Add NVIDIA CUDA repo and install nvcc (needed to compile nccl-tests)
|
||||
RUN wget -qO /tmp/cuda-keyring.gpg \
|
||||
https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/3bf863cc.pub \
|
||||
&& gpg --dearmor < /tmp/cuda-keyring.gpg \
|
||||
> /usr/share/keyrings/nvidia-cuda.gpg \
|
||||
&& rm /tmp/cuda-keyring.gpg \
|
||||
&& echo "deb [signed-by=/usr/share/keyrings/nvidia-cuda.gpg] \
|
||||
https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/ /" \
|
||||
> /etc/apt/sources.list.d/cuda.list \
|
||||
&& apt-get update -qq \
|
||||
&& apt-get install -y cuda-nvcc-12-8 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& ln -sfn /usr/local/cuda-12.8 /usr/local/cuda
|
||||
|
||||
RUN arch="$(dpkg --print-architecture)" \
|
||||
&& case "$arch" in \
|
||||
amd64) goarch=amd64 ;; \
|
||||
|
||||
@@ -4,5 +4,9 @@ NVIDIA_DRIVER_VERSION=590.48.01
|
||||
NCCL_VERSION=2.28.9-1
|
||||
NCCL_CUDA_VERSION=13.0
|
||||
NCCL_SHA256=2e6faafd2c19cffc7738d9283976a3200ea9db9895907f337f0c7e5a25563186
|
||||
NCCL_TESTS_VERSION=2.13.10
|
||||
NVCC_VERSION=12.8
|
||||
CUBLAS_VERSION=13.0.2.14-1
|
||||
CUDA_USERSPACE_VERSION=13.0.96-1
|
||||
GO_VERSION=1.24.0
|
||||
AUDIT_VERSION=1.0.0
|
||||
|
||||
@@ -32,6 +32,6 @@ lb config noauto \
|
||||
--memtest none \
|
||||
--iso-volume "EASY-BEE" \
|
||||
--iso-application "EASY-BEE" \
|
||||
--bootappend-live "boot=live components console=tty0 console=ttyS0,115200n8 loglevel=3 username=bee user-fullname=Bee modprobe.blacklist=nouveau" \
|
||||
--bootappend-live "boot=live components quiet nomodeset video=1920x1080 console=tty0 console=ttyS0,115200n8 loglevel=3 username=bee user-fullname=Bee modprobe.blacklist=nouveau" \
|
||||
--apt-recommends false \
|
||||
"${@}"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
190
iso/builder/build-cublas.sh
Normal file
190
iso/builder/build-cublas.sh
Normal file
@@ -0,0 +1,190 @@
|
||||
#!/bin/sh
|
||||
# build-cublas.sh — download cuBLASLt/cuBLAS/cudart runtime + headers for bee-gpu-stress.
|
||||
#
|
||||
# Downloads .deb packages from NVIDIA's CUDA apt repository (Debian 12, x86_64),
|
||||
# verifies them against Packages.gz, and extracts the small subset we need:
|
||||
# - headers for compiling bee-gpu-stress against cuBLASLt
|
||||
# - runtime libs for libcublas, libcublasLt, libcudart inside the ISO
|
||||
|
||||
set -e
|
||||
|
||||
CUBLAS_VERSION="$1"
|
||||
CUDA_USERSPACE_VERSION="$2"
|
||||
CUDA_SERIES="$3"
|
||||
DIST_DIR="$4"
|
||||
|
||||
[ -n "$CUBLAS_VERSION" ] || { echo "usage: $0 <cublas-version> <cuda-userspace-version> <cuda-series> <dist-dir>"; exit 1; }
|
||||
[ -n "$CUDA_USERSPACE_VERSION" ] || { echo "usage: $0 <cublas-version> <cuda-userspace-version> <cuda-series> <dist-dir>"; exit 1; }
|
||||
[ -n "$CUDA_SERIES" ] || { echo "usage: $0 <cublas-version> <cuda-userspace-version> <cuda-series> <dist-dir>"; exit 1; }
|
||||
[ -n "$DIST_DIR" ] || { echo "usage: $0 <cublas-version> <cuda-userspace-version> <cuda-series> <dist-dir>"; exit 1; }
|
||||
|
||||
CUDA_SERIES_DASH=$(printf '%s' "$CUDA_SERIES" | tr '.' '-')
|
||||
REPO_BASE="https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64"
|
||||
CACHE_DIR="${DIST_DIR}/cublas-${CUBLAS_VERSION}+cuda${CUDA_SERIES}"
|
||||
CACHE_ROOT="${BEE_CACHE_DIR:-${DIST_DIR}/cache}"
|
||||
DOWNLOAD_CACHE_DIR="${CACHE_ROOT}/cublas-downloads"
|
||||
PACKAGES_GZ="${DOWNLOAD_CACHE_DIR}/Packages.gz"
|
||||
|
||||
echo "=== cuBLAS ${CUBLAS_VERSION} / cudart ${CUDA_USERSPACE_VERSION} / CUDA ${CUDA_SERIES} ==="
|
||||
|
||||
if [ -f "${CACHE_DIR}/include/cublasLt.h" ] && [ -f "${CACHE_DIR}/include/cuda_runtime_api.h" ] \
|
||||
&& [ -f "${CACHE_DIR}/include/crt/host_defines.h" ] \
|
||||
&& [ -f "${CACHE_DIR}/include/nv/target" ] \
|
||||
&& [ "$(find "${CACHE_DIR}/lib" \( -name 'libcublas.so*' -o -name 'libcublasLt.so*' -o -name 'libcudart.so*' \) 2>/dev/null | wc -l)" -gt 0 ]; then
|
||||
echo "=== cuBLAS cached, skipping download ==="
|
||||
echo "cache: $CACHE_DIR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
mkdir -p "${DOWNLOAD_CACHE_DIR}" "${CACHE_DIR}/include" "${CACHE_DIR}/lib"
|
||||
|
||||
echo "=== downloading Packages.gz ==="
|
||||
wget -q -O "${PACKAGES_GZ}" "${REPO_BASE}/Packages.gz"
|
||||
|
||||
lookup_pkg() {
|
||||
pkg="$1"
|
||||
ver="$2" # if empty, match any version (first found)
|
||||
gzip -dc "${PACKAGES_GZ}" | awk -v pkg="$pkg" -v ver="$ver" '
|
||||
/^Package: / { cur_pkg=$2; gsub(/\r/, "", cur_pkg) }
|
||||
/^Version: / { cur_ver=$2; gsub(/\r/, "", cur_ver) }
|
||||
/^Filename: / { cur_file=$2; gsub(/\r/, "", cur_file) }
|
||||
/^SHA256: / { cur_sha=$2; gsub(/\r/, "", cur_sha) }
|
||||
/^$/ {
|
||||
if (cur_pkg == pkg && (ver == "" || cur_ver == ver)) {
|
||||
print cur_file " " cur_sha
|
||||
printed=1
|
||||
exit
|
||||
}
|
||||
cur_pkg=""; cur_ver=""; cur_file=""; cur_sha=""
|
||||
}
|
||||
END {
|
||||
if (!printed && cur_pkg == pkg && (ver == "" || cur_ver == ver)) {
|
||||
print cur_file " " cur_sha
|
||||
}
|
||||
}'
|
||||
}
|
||||
|
||||
download_verified_pkg() {
|
||||
pkg="$1"
|
||||
ver="$2"
|
||||
|
||||
meta="$(lookup_pkg "$pkg" "$ver")"
|
||||
[ -n "$meta" ] || { echo "ERROR: package metadata not found for ${pkg} ${ver}"; exit 1; }
|
||||
|
||||
repo_file="$(printf '%s\n' "$meta" | awk '{print $1}')"
|
||||
repo_sha="$(printf '%s\n' "$meta" | awk '{print $2}')"
|
||||
[ -n "$repo_file" ] || { echo "ERROR: package filename missing for ${pkg}"; exit 1; }
|
||||
[ -n "$repo_sha" ] || { echo "ERROR: package sha missing for ${pkg}"; exit 1; }
|
||||
|
||||
out="${DOWNLOAD_CACHE_DIR}/$(basename "$repo_file")"
|
||||
if [ -f "$out" ]; then
|
||||
actual_sha="$(sha256sum "$out" | awk '{print $1}')"
|
||||
if [ "$actual_sha" = "$repo_sha" ]; then
|
||||
echo "=== using cached $(basename "$repo_file") ===" >&2
|
||||
printf '%s\n' "$out"
|
||||
return 0
|
||||
fi
|
||||
echo "=== removing stale $(basename "$repo_file") (sha256 mismatch) ===" >&2
|
||||
rm -f "$out"
|
||||
fi
|
||||
|
||||
echo "=== downloading $(basename "$repo_file") ===" >&2
|
||||
wget --show-progress -O "$out" "${REPO_BASE}/$(basename "$repo_file")"
|
||||
|
||||
actual_sha="$(sha256sum "$out" | awk '{print $1}')"
|
||||
if [ "$actual_sha" != "$repo_sha" ]; then
|
||||
echo "ERROR: sha256 mismatch for $(basename "$repo_file")" >&2
|
||||
echo " expected: $repo_sha" >&2
|
||||
echo " actual: $actual_sha" >&2
|
||||
rm -f "$out"
|
||||
exit 1
|
||||
fi
|
||||
echo "sha256 OK: $(basename "$repo_file")" >&2
|
||||
printf '%s\n' "$out"
|
||||
}
|
||||
|
||||
extract_deb() {
|
||||
deb="$1"
|
||||
dst="$2"
|
||||
mkdir -p "$dst"
|
||||
(
|
||||
cd "$dst"
|
||||
ar x "$deb"
|
||||
data_tar=$(ls data.tar.* 2>/dev/null | head -1)
|
||||
[ -n "$data_tar" ] || { echo "ERROR: data.tar.* not found in $deb"; exit 1; }
|
||||
tar xf "$data_tar"
|
||||
)
|
||||
}
|
||||
|
||||
copy_headers() {
|
||||
from="$1"
|
||||
if [ -d "${from}/usr/include" ]; then
|
||||
cp -a "${from}/usr/include/." "${CACHE_DIR}/include/"
|
||||
fi
|
||||
# NVIDIA CUDA packages install headers under /usr/local/cuda-X.Y/targets/x86_64-linux/include/
|
||||
find "$from" -type d -name include | while read -r inc_dir; do
|
||||
case "$inc_dir" in
|
||||
*/usr/include) ;; # already handled above
|
||||
*)
|
||||
if find "${inc_dir}" -maxdepth 3 \( -name '*.h' -o -type f \) | grep -q .; then
|
||||
cp -a "${inc_dir}/." "${CACHE_DIR}/include/"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
copy_libs() {
|
||||
from="$1"
|
||||
find "$from" \( -name 'libcublas.so*' -o -name 'libcublasLt.so*' -o -name 'libcudart.so*' \) \
|
||||
\( -type f -o -type l \) -exec cp -a {} "${CACHE_DIR}/lib/" \;
|
||||
}
|
||||
|
||||
make_links() {
|
||||
base="$1"
|
||||
versioned=$(find "${CACHE_DIR}/lib" -maxdepth 1 -name "${base}.so.[0-9]*" -type f | sort | head -1)
|
||||
[ -n "$versioned" ] || return 0
|
||||
soname=$(printf '%s\n' "$versioned" | sed -E "s#.*/(${base}\.so\.[0-9]+).*#\\1#")
|
||||
target=$(basename "$versioned")
|
||||
ln -sf "$target" "${CACHE_DIR}/lib/${soname}" 2>/dev/null || true
|
||||
ln -sf "${soname}" "${CACHE_DIR}/lib/${base}.so" 2>/dev/null || true
|
||||
}
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
trap 'rm -rf "$TMP_DIR"' EXIT INT TERM
|
||||
|
||||
CUBLAS_RT_DEB=$(download_verified_pkg "libcublas-${CUDA_SERIES_DASH}" "${CUBLAS_VERSION}")
|
||||
CUBLAS_DEV_DEB=$(download_verified_pkg "libcublas-dev-${CUDA_SERIES_DASH}" "${CUBLAS_VERSION}")
|
||||
CUDART_RT_DEB=$(download_verified_pkg "cuda-cudart-${CUDA_SERIES_DASH}" "${CUDA_USERSPACE_VERSION}")
|
||||
CUDART_DEV_DEB=$(download_verified_pkg "cuda-cudart-dev-${CUDA_SERIES_DASH}" "${CUDA_USERSPACE_VERSION}")
|
||||
CUDA_CRT_DEB=$(download_verified_pkg "cuda-crt-${CUDA_SERIES_DASH}" "")
|
||||
CUDA_CCCL_DEB=$(download_verified_pkg "cuda-cccl-${CUDA_SERIES_DASH}" "")
|
||||
|
||||
extract_deb "$CUBLAS_RT_DEB" "${TMP_DIR}/cublas-rt"
|
||||
extract_deb "$CUBLAS_DEV_DEB" "${TMP_DIR}/cublas-dev"
|
||||
extract_deb "$CUDART_RT_DEB" "${TMP_DIR}/cudart-rt"
|
||||
extract_deb "$CUDART_DEV_DEB" "${TMP_DIR}/cudart-dev"
|
||||
extract_deb "$CUDA_CRT_DEB" "${TMP_DIR}/cuda-crt"
|
||||
extract_deb "$CUDA_CCCL_DEB" "${TMP_DIR}/cuda-cccl"
|
||||
|
||||
copy_headers "${TMP_DIR}/cublas-dev"
|
||||
copy_headers "${TMP_DIR}/cudart-dev"
|
||||
copy_headers "${TMP_DIR}/cuda-crt"
|
||||
copy_headers "${TMP_DIR}/cuda-cccl"
|
||||
copy_libs "${TMP_DIR}/cublas-rt"
|
||||
copy_libs "${TMP_DIR}/cudart-rt"
|
||||
|
||||
make_links "libcublas"
|
||||
make_links "libcublasLt"
|
||||
make_links "libcudart"
|
||||
|
||||
[ -f "${CACHE_DIR}/include/cublasLt.h" ] || { echo "ERROR: cublasLt.h not extracted"; exit 1; }
|
||||
[ -f "${CACHE_DIR}/include/cuda_runtime_api.h" ] || { echo "ERROR: cuda_runtime_api.h not extracted"; exit 1; }
|
||||
[ "$(find "${CACHE_DIR}/lib" -maxdepth 1 -name 'libcublasLt.so*' | wc -l)" -gt 0 ] || { echo "ERROR: libcublasLt not extracted"; exit 1; }
|
||||
[ "$(find "${CACHE_DIR}/lib" -maxdepth 1 -name 'libcublas.so*' | wc -l)" -gt 0 ] || { echo "ERROR: libcublas not extracted"; exit 1; }
|
||||
[ "$(find "${CACHE_DIR}/lib" -maxdepth 1 -name 'libcudart.so*' | wc -l)" -gt 0 ] || { echo "ERROR: libcudart not extracted"; exit 1; }
|
||||
|
||||
echo "=== cuBLAS extraction complete ==="
|
||||
echo "cache: $CACHE_DIR"
|
||||
echo "headers: $(find "${CACHE_DIR}/include" -type f | wc -l)"
|
||||
echo "libs: $(find "${CACHE_DIR}/lib" -maxdepth 1 \( -name 'libcublas*.so*' -o -name 'libcudart.so*' \) | wc -l)"
|
||||
@@ -7,6 +7,7 @@ REPO_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
|
||||
BUILDER_DIR="${REPO_ROOT}/iso/builder"
|
||||
CONTAINER_TOOL="${CONTAINER_TOOL:-docker}"
|
||||
IMAGE_TAG="${BEE_BUILDER_IMAGE:-bee-iso-builder}"
|
||||
BUILDER_PLATFORM="${BEE_BUILDER_PLATFORM:-linux/amd64}"
|
||||
CACHE_DIR="${BEE_BUILDER_CACHE_DIR:-${REPO_ROOT}/dist/container-cache}"
|
||||
AUTH_KEYS=""
|
||||
REBUILD_IMAGE=0
|
||||
@@ -40,6 +41,13 @@ if ! command -v "$CONTAINER_TOOL" >/dev/null 2>&1; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PLATFORM_OS="${BUILDER_PLATFORM%/*}"
|
||||
PLATFORM_ARCH="${BUILDER_PLATFORM#*/}"
|
||||
if [ -z "$PLATFORM_OS" ] || [ -z "$PLATFORM_ARCH" ] || [ "$PLATFORM_OS" = "$BUILDER_PLATFORM" ]; then
|
||||
echo "invalid BEE_BUILDER_PLATFORM: ${BUILDER_PLATFORM} (expected os/arch, e.g. linux/amd64)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "$AUTH_KEYS" ]; then
|
||||
[ -f "$AUTH_KEYS" ] || { echo "authorized_keys not found: $AUTH_KEYS" >&2; exit 1; }
|
||||
AUTH_KEYS_ABS="$(cd "$(dirname "$AUTH_KEYS")" && pwd)/$(basename "$AUTH_KEYS")"
|
||||
@@ -56,17 +64,35 @@ mkdir -p \
|
||||
|
||||
IMAGE_REF="${IMAGE_TAG}:debian${DEBIAN_VERSION}"
|
||||
|
||||
if [ "$REBUILD_IMAGE" = "1" ] || ! "$CONTAINER_TOOL" image inspect "${IMAGE_REF}" >/dev/null 2>&1; then
|
||||
image_matches_platform() {
|
||||
actual_platform="$("$CONTAINER_TOOL" image inspect --format '{{.Os}}/{{.Architecture}}' "${IMAGE_REF}" 2>/dev/null || true)"
|
||||
[ "$actual_platform" = "${BUILDER_PLATFORM}" ]
|
||||
}
|
||||
|
||||
NEED_BUILD_IMAGE=0
|
||||
if [ "$REBUILD_IMAGE" = "1" ]; then
|
||||
NEED_BUILD_IMAGE=1
|
||||
elif ! "$CONTAINER_TOOL" image inspect "${IMAGE_REF}" >/dev/null 2>&1; then
|
||||
NEED_BUILD_IMAGE=1
|
||||
elif ! image_matches_platform; then
|
||||
actual_platform="$("$CONTAINER_TOOL" image inspect --format '{{.Os}}/{{.Architecture}}' "${IMAGE_REF}" 2>/dev/null || echo unknown)"
|
||||
echo "=== rebuilding builder image ${IMAGE_REF}: platform mismatch (${actual_platform} != ${BUILDER_PLATFORM}) ==="
|
||||
NEED_BUILD_IMAGE=1
|
||||
fi
|
||||
|
||||
if [ "$NEED_BUILD_IMAGE" = "1" ]; then
|
||||
"$CONTAINER_TOOL" build \
|
||||
--platform "${BUILDER_PLATFORM}" \
|
||||
--build-arg GO_VERSION="${GO_VERSION}" \
|
||||
-t "${IMAGE_REF}" \
|
||||
"${BUILDER_DIR}"
|
||||
else
|
||||
echo "=== using existing builder image ${IMAGE_REF} ==="
|
||||
echo "=== using existing builder image ${IMAGE_REF} (${BUILDER_PLATFORM}) ==="
|
||||
fi
|
||||
|
||||
set -- \
|
||||
run --rm --privileged \
|
||||
--platform "${BUILDER_PLATFORM}" \
|
||||
-v "${REPO_ROOT}:/work" \
|
||||
-v "${CACHE_DIR}:/cache" \
|
||||
-e BEE_CONTAINER_BUILD=1 \
|
||||
@@ -80,6 +106,7 @@ set -- \
|
||||
|
||||
if [ -n "$AUTH_KEYS" ]; then
|
||||
set -- run --rm --privileged \
|
||||
--platform "${BUILDER_PLATFORM}" \
|
||||
-v "${REPO_ROOT}:/work" \
|
||||
-v "${CACHE_DIR}:/cache" \
|
||||
-v "${AUTH_KEYS_DIR}:/tmp/bee-authkeys:ro" \
|
||||
|
||||
138
iso/builder/build-nccl-tests.sh
Executable file
138
iso/builder/build-nccl-tests.sh
Executable file
@@ -0,0 +1,138 @@
|
||||
#!/bin/sh
|
||||
# build-nccl-tests.sh — build nccl-tests all_reduce_perf for the LiveCD.
|
||||
#
|
||||
# Downloads nccl-tests source from GitHub, downloads libnccl-dev .deb for
|
||||
# nccl.h, and compiles all_reduce_perf with nvcc (cuda-nvcc-13-0).
|
||||
#
|
||||
# Output is cached in DIST_DIR/nccl-tests-<version>/ so subsequent builds
|
||||
# are instant unless NCCL_TESTS_VERSION changes.
|
||||
#
|
||||
# Output layout:
|
||||
# $CACHE_DIR/bin/all_reduce_perf
|
||||
|
||||
set -e
|
||||
|
||||
NCCL_TESTS_VERSION="$1"
|
||||
NCCL_VERSION="$2"
|
||||
NCCL_CUDA_VERSION="$3"
|
||||
DIST_DIR="$4"
|
||||
|
||||
[ -n "$NCCL_TESTS_VERSION" ] || { echo "usage: $0 <nccl-tests-version> <nccl-version> <cuda-version> <dist-dir>"; exit 1; }
|
||||
[ -n "$NCCL_VERSION" ] || { echo "usage: $0 <nccl-tests-version> <nccl-version> <cuda-version> <dist-dir>"; exit 1; }
|
||||
[ -n "$NCCL_CUDA_VERSION" ] || { echo "usage: $0 <nccl-tests-version> <nccl-version> <cuda-version> <dist-dir>"; exit 1; }
|
||||
[ -n "$DIST_DIR" ] || { echo "usage: $0 <nccl-tests-version> <nccl-version> <cuda-version> <dist-dir>"; exit 1; }
|
||||
|
||||
echo "=== nccl-tests ${NCCL_TESTS_VERSION} ==="
|
||||
|
||||
CACHE_DIR="${DIST_DIR}/nccl-tests-${NCCL_TESTS_VERSION}"
|
||||
CACHE_ROOT="${BEE_CACHE_DIR:-${DIST_DIR}/cache}"
|
||||
DOWNLOAD_CACHE_DIR="${CACHE_ROOT}/nccl-tests-downloads"
|
||||
|
||||
if [ -f "${CACHE_DIR}/bin/all_reduce_perf" ]; then
|
||||
echo "=== nccl-tests cached, skipping build ==="
|
||||
echo "binary: ${CACHE_DIR}/bin/all_reduce_perf"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Resolve nvcc path (cuda-nvcc-12-8 installs to /usr/local/cuda-12.8/bin/nvcc)
|
||||
NVCC=""
|
||||
for candidate in nvcc /usr/local/cuda-12.8/bin/nvcc /usr/local/cuda-12/bin/nvcc /usr/local/cuda/bin/nvcc; do
|
||||
if command -v "$candidate" >/dev/null 2>&1 || [ -x "$candidate" ]; then
|
||||
NVCC="$candidate"
|
||||
break
|
||||
fi
|
||||
done
|
||||
[ -n "$NVCC" ] || { echo "ERROR: nvcc not found — install cuda-nvcc-13-0"; exit 1; }
|
||||
echo "nvcc: $NVCC"
|
||||
|
||||
# Determine CUDA_HOME from nvcc location
|
||||
CUDA_HOME="$(dirname "$(dirname "$NVCC")")"
|
||||
echo "CUDA_HOME: $CUDA_HOME"
|
||||
|
||||
# Download libnccl-dev for nccl.h
|
||||
REPO_BASE="https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64"
|
||||
DEV_PKG="libnccl-dev_${NCCL_VERSION}+cuda${NCCL_CUDA_VERSION}_amd64.deb"
|
||||
DEV_URL="${REPO_BASE}/${DEV_PKG}"
|
||||
|
||||
mkdir -p "$DOWNLOAD_CACHE_DIR"
|
||||
DEV_DEB="${DOWNLOAD_CACHE_DIR}/${DEV_PKG}"
|
||||
|
||||
if [ ! -f "$DEV_DEB" ]; then
|
||||
echo "=== downloading libnccl-dev ==="
|
||||
wget --show-progress -O "$DEV_DEB" "$DEV_URL"
|
||||
fi
|
||||
|
||||
# Extract nccl.h from libnccl-dev
|
||||
NCCL_INCLUDE_TMP=$(mktemp -d)
|
||||
trap 'rm -rf "$NCCL_INCLUDE_TMP" "$BUILD_TMP"' EXIT INT TERM
|
||||
|
||||
cd "$NCCL_INCLUDE_TMP"
|
||||
ar x "$DEV_DEB"
|
||||
DATA_TAR=$(ls data.tar.* 2>/dev/null | head -1)
|
||||
[ -n "$DATA_TAR" ] || { echo "ERROR: data.tar.* not found in libnccl-dev .deb"; exit 1; }
|
||||
tar xf "$DATA_TAR"
|
||||
|
||||
# nccl.h lands in ./usr/include/ or ./usr/local/cuda-X.Y/targets/.../include/
|
||||
NCCL_H=$(find . -name 'nccl.h' -type f 2>/dev/null | head -1)
|
||||
[ -n "$NCCL_H" ] || { echo "ERROR: nccl.h not found in libnccl-dev package"; exit 1; }
|
||||
NCCL_INCLUDE_DIR="$(pwd)/$(dirname "$NCCL_H")"
|
||||
echo "nccl.h: $NCCL_H"
|
||||
|
||||
# libnccl.so comes from the already-built NCCL cache (build-nccl.sh ran first)
|
||||
NCCL_LIB_DIR="${DIST_DIR}/nccl-${NCCL_VERSION}+cuda${NCCL_CUDA_VERSION}/lib"
|
||||
[ -d "$NCCL_LIB_DIR" ] || { echo "ERROR: NCCL lib dir not found at $NCCL_LIB_DIR — run build-nccl.sh first"; exit 1; }
|
||||
echo "nccl lib: $NCCL_LIB_DIR"
|
||||
|
||||
# Download nccl-tests source
|
||||
SRC_TAR="${DOWNLOAD_CACHE_DIR}/nccl-tests-v${NCCL_TESTS_VERSION}.tar.gz"
|
||||
SRC_URL="https://github.com/NVIDIA/nccl-tests/archive/refs/tags/v${NCCL_TESTS_VERSION}.tar.gz"
|
||||
|
||||
if [ ! -f "$SRC_TAR" ]; then
|
||||
echo "=== downloading nccl-tests v${NCCL_TESTS_VERSION} ==="
|
||||
wget --show-progress -O "$SRC_TAR" "$SRC_URL"
|
||||
fi
|
||||
|
||||
# Extract and build
|
||||
BUILD_TMP=$(mktemp -d)
|
||||
cd "$BUILD_TMP"
|
||||
tar xf "$SRC_TAR"
|
||||
SRC_DIR=$(ls -d nccl-tests-* 2>/dev/null | head -1)
|
||||
[ -n "$SRC_DIR" ] || { echo "ERROR: source directory not found in archive"; exit 1; }
|
||||
cd "$SRC_DIR"
|
||||
|
||||
echo "=== building all_reduce_perf ==="
|
||||
# Pick gencode based on the actual nvcc version:
|
||||
# CUDA 12.x — Volta..Blackwell (sm_70..sm_100)
|
||||
# CUDA 13.x — Hopper..Blackwell (sm_90..sm_100, Pascal/Volta/Ampere dropped)
|
||||
NVCC_MAJOR=$("$NVCC" --version 2>/dev/null | grep -oE 'release [0-9]+' | awk '{print $2}' | head -1)
|
||||
echo "nvcc major version: ${NVCC_MAJOR:-unknown}"
|
||||
if [ "${NVCC_MAJOR:-0}" -ge 13 ] 2>/dev/null; then
|
||||
GENCODE="-gencode=arch=compute_90,code=sm_90 \
|
||||
-gencode=arch=compute_100,code=sm_100"
|
||||
echo "gencode: sm_90 sm_100 (CUDA 13+)"
|
||||
else
|
||||
GENCODE="-gencode=arch=compute_70,code=sm_70 \
|
||||
-gencode=arch=compute_80,code=sm_80 \
|
||||
-gencode=arch=compute_86,code=sm_86 \
|
||||
-gencode=arch=compute_90,code=sm_90 \
|
||||
-gencode=arch=compute_100,code=sm_100"
|
||||
echo "gencode: sm_70..sm_100 (CUDA 12)"
|
||||
fi
|
||||
LIBRARY_PATH="$NCCL_LIB_DIR${LIBRARY_PATH:+:$LIBRARY_PATH}" \
|
||||
make MPI=0 \
|
||||
NVCC="$NVCC" \
|
||||
CUDA_HOME="$CUDA_HOME" \
|
||||
NCCL_HOME="$NCCL_INCLUDE_DIR/.." \
|
||||
NCCL_LIB="$NCCL_LIB_DIR" \
|
||||
NVCC_GENCODE="$GENCODE" \
|
||||
BUILDDIR="./build"
|
||||
|
||||
[ -f "./build/all_reduce_perf" ] || { echo "ERROR: all_reduce_perf not found after build"; exit 1; }
|
||||
|
||||
mkdir -p "${CACHE_DIR}/bin"
|
||||
cp "./build/all_reduce_perf" "${CACHE_DIR}/bin/all_reduce_perf"
|
||||
chmod +x "${CACHE_DIR}/bin/all_reduce_perf"
|
||||
|
||||
echo "=== nccl-tests build complete ==="
|
||||
echo "binary: ${CACHE_DIR}/bin/all_reduce_perf"
|
||||
ls -lh "${CACHE_DIR}/bin/all_reduce_perf"
|
||||
@@ -69,7 +69,36 @@ resolve_audit_version() {
|
||||
date +%Y%m%d
|
||||
}
|
||||
|
||||
# ISO image versioned separately from the audit binary (iso/v* tags).
|
||||
resolve_iso_version() {
|
||||
if [ -n "${BEE_ISO_VERSION:-}" ]; then
|
||||
echo "${BEE_ISO_VERSION}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
tag="$(git -C "${REPO_ROOT}" describe --tags --match 'iso/v*' --abbrev=7 --dirty 2>/dev/null || true)"
|
||||
case "${tag}" in
|
||||
iso/v*)
|
||||
echo "${tag#iso/v}"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Also accept plain v* tags (e.g. v2, v2.1 used for GUI releases)
|
||||
tag="$(git -C "${REPO_ROOT}" describe --tags --match 'v*' --abbrev=7 --dirty 2>/dev/null || true)"
|
||||
case "${tag}" in
|
||||
v*)
|
||||
echo "${tag#v}"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Fall back to audit version so the name is still meaningful
|
||||
resolve_audit_version
|
||||
}
|
||||
|
||||
AUDIT_VERSION_EFFECTIVE="$(resolve_audit_version)"
|
||||
ISO_VERSION_EFFECTIVE="$(resolve_iso_version)"
|
||||
|
||||
# Auto-detect kernel ABI: refresh apt index, then query current linux-image-amd64 dependency.
|
||||
# If headers for the detected ABI are not yet installed (kernel updated since image build),
|
||||
@@ -101,7 +130,7 @@ fi
|
||||
|
||||
echo "=== bee ISO build ==="
|
||||
echo "Debian: ${DEBIAN_VERSION}, Kernel ABI: ${DEBIAN_KERNEL_ABI}, Go: ${GO_VERSION}"
|
||||
echo "Audit version: ${AUDIT_VERSION_EFFECTIVE}"
|
||||
echo "Audit version: ${AUDIT_VERSION_EFFECTIVE}, ISO version: ${ISO_VERSION_EFFECTIVE}"
|
||||
echo ""
|
||||
|
||||
echo "=== syncing git submodules ==="
|
||||
@@ -139,6 +168,16 @@ else
|
||||
echo "=== bee binary up to date, skipping build ==="
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== downloading cuBLAS/cuBLASLt/cudart ${NCCL_CUDA_VERSION} userspace ==="
|
||||
sh "${BUILDER_DIR}/build-cublas.sh" \
|
||||
"${CUBLAS_VERSION}" \
|
||||
"${CUDA_USERSPACE_VERSION}" \
|
||||
"${NCCL_CUDA_VERSION}" \
|
||||
"${DIST_DIR}"
|
||||
|
||||
CUBLAS_CACHE="${DIST_DIR}/cublas-${CUBLAS_VERSION}+cuda${NCCL_CUDA_VERSION}"
|
||||
|
||||
GPU_STRESS_NEED_BUILD=1
|
||||
if [ -f "$GPU_STRESS_BIN" ] && [ "${BUILDER_DIR}/bee-gpu-stress.c" -ot "$GPU_STRESS_BIN" ]; then
|
||||
GPU_STRESS_NEED_BUILD=0
|
||||
@@ -147,9 +186,10 @@ fi
|
||||
if [ "$GPU_STRESS_NEED_BUILD" = "1" ]; then
|
||||
echo "=== building bee-gpu-stress ==="
|
||||
gcc -O2 -s -Wall -Wextra \
|
||||
-I"${CUBLAS_CACHE}/include" \
|
||||
-o "$GPU_STRESS_BIN" \
|
||||
"${BUILDER_DIR}/bee-gpu-stress.c" \
|
||||
-ldl
|
||||
-ldl -lm
|
||||
echo "binary: $GPU_STRESS_BIN"
|
||||
else
|
||||
echo "=== bee-gpu-stress up to date, skipping build ==="
|
||||
@@ -166,7 +206,8 @@ rm -f \
|
||||
"${OVERLAY_STAGE_DIR}/root/.ssh/authorized_keys" \
|
||||
"${OVERLAY_STAGE_DIR}/usr/local/bin/bee" \
|
||||
"${OVERLAY_STAGE_DIR}/usr/local/bin/bee-gpu-stress" \
|
||||
"${OVERLAY_STAGE_DIR}/usr/local/bin/bee-smoketest"
|
||||
"${OVERLAY_STAGE_DIR}/usr/local/bin/bee-smoketest" \
|
||||
"${OVERLAY_STAGE_DIR}/usr/local/bin/all_reduce_perf"
|
||||
|
||||
# --- inject authorized_keys for SSH access ---
|
||||
AUTHORIZED_KEYS_FILE="${OVERLAY_STAGE_DIR}/root/.ssh/authorized_keys"
|
||||
@@ -263,12 +304,30 @@ NCCL_CACHE="${DIST_DIR}/nccl-${NCCL_VERSION}+cuda${NCCL_CUDA_VERSION}"
|
||||
cp "${NCCL_CACHE}/lib/"* "${OVERLAY_STAGE_DIR}/usr/lib/"
|
||||
echo "=== NCCL: $(ls "${NCCL_CACHE}/lib/" | wc -l) files injected into /usr/lib/ ==="
|
||||
|
||||
# Inject cuBLAS/cuBLASLt/cudart runtime libs used by bee-gpu-stress tensor-core GEMM path
|
||||
cp "${CUBLAS_CACHE}/lib/"* "${OVERLAY_STAGE_DIR}/usr/lib/"
|
||||
echo "=== cuBLAS: $(ls "${CUBLAS_CACHE}/lib/" | wc -l) files injected into /usr/lib/ ==="
|
||||
|
||||
# --- build nccl-tests ---
|
||||
echo ""
|
||||
echo "=== building nccl-tests ${NCCL_TESTS_VERSION} ==="
|
||||
sh "${BUILDER_DIR}/build-nccl-tests.sh" \
|
||||
"${NCCL_TESTS_VERSION}" \
|
||||
"${NCCL_VERSION}" \
|
||||
"${NCCL_CUDA_VERSION}" \
|
||||
"${DIST_DIR}"
|
||||
|
||||
NCCL_TESTS_CACHE="${DIST_DIR}/nccl-tests-${NCCL_TESTS_VERSION}"
|
||||
cp "${NCCL_TESTS_CACHE}/bin/all_reduce_perf" "${OVERLAY_STAGE_DIR}/usr/local/bin/all_reduce_perf"
|
||||
chmod +x "${OVERLAY_STAGE_DIR}/usr/local/bin/all_reduce_perf"
|
||||
echo "=== all_reduce_perf injected ==="
|
||||
|
||||
# --- embed build metadata ---
|
||||
mkdir -p "${OVERLAY_STAGE_DIR}/etc"
|
||||
BUILD_DATE="$(date +%Y-%m-%d)"
|
||||
GIT_COMMIT="$(git -C "${REPO_ROOT}" rev-parse --short HEAD 2>/dev/null || echo unknown)"
|
||||
cat > "${OVERLAY_STAGE_DIR}/etc/bee-release" <<EOF
|
||||
BEE_ISO_VERSION=${AUDIT_VERSION_EFFECTIVE}
|
||||
BEE_ISO_VERSION=${ISO_VERSION_EFFECTIVE}
|
||||
BEE_AUDIT_VERSION=${AUDIT_VERSION_EFFECTIVE}
|
||||
BUILD_DATE=${BUILD_DATE}
|
||||
GIT_COMMIT=${GIT_COMMIT}
|
||||
@@ -277,6 +336,9 @@ DEBIAN_KERNEL_ABI=${DEBIAN_KERNEL_ABI}
|
||||
NVIDIA_DRIVER_VERSION=${NVIDIA_DRIVER_VERSION}
|
||||
NCCL_VERSION=${NCCL_VERSION}
|
||||
NCCL_CUDA_VERSION=${NCCL_CUDA_VERSION}
|
||||
CUBLAS_VERSION=${CUBLAS_VERSION}
|
||||
CUDA_USERSPACE_VERSION=${CUDA_USERSPACE_VERSION}
|
||||
NCCL_TESTS_VERSION=${NCCL_TESTS_VERSION}
|
||||
EOF
|
||||
|
||||
# Patch motd with build info
|
||||
@@ -310,7 +372,7 @@ lb build 2>&1
|
||||
|
||||
# live-build outputs live-image-amd64.hybrid.iso in LB_DIR
|
||||
ISO_RAW="${LB_DIR}/live-image-amd64.hybrid.iso"
|
||||
ISO_OUT="${DIST_DIR}/bee-debian${DEBIAN_VERSION}-v${AUDIT_VERSION_EFFECTIVE}-amd64.iso"
|
||||
ISO_OUT="${DIST_DIR}/bee-debian${DEBIAN_VERSION}-v${ISO_VERSION_EFFECTIVE}-amd64.iso"
|
||||
if [ -f "$ISO_RAW" ]; then
|
||||
cp "$ISO_RAW" "$ISO_OUT"
|
||||
echo ""
|
||||
|
||||
@@ -8,7 +8,7 @@ else
|
||||
fi
|
||||
|
||||
if loadfont $font ; then
|
||||
set gfxmode=800x600
|
||||
set gfxmode=1920x1080,1280x1024,auto
|
||||
set gfxpayload=keep
|
||||
insmod efi_gop
|
||||
insmod efi_uga
|
||||
|
||||
@@ -14,6 +14,11 @@ menuentry "EASY-BEE" {
|
||||
initrd @INITRD_LIVE@
|
||||
}
|
||||
|
||||
menuentry "EASY-BEE (load to RAM)" {
|
||||
linux @KERNEL_LIVE@ @APPEND_LIVE@ toram bee.nvidia.mode=normal
|
||||
initrd @INITRD_LIVE@
|
||||
}
|
||||
|
||||
menuentry "EASY-BEE (NVIDIA GSP=off)" {
|
||||
linux @KERNEL_LIVE@ @APPEND_LIVE@ bee.nvidia.mode=gsp-off
|
||||
initrd @INITRD_LIVE@
|
||||
|
||||
@@ -5,6 +5,12 @@ label live-@FLAVOUR@-normal
|
||||
initrd @INITRD@
|
||||
append @APPEND_LIVE@ bee.nvidia.mode=normal
|
||||
|
||||
label live-@FLAVOUR@-toram
|
||||
menu label EASY-BEE (^load to RAM)
|
||||
linux @LINUX@
|
||||
initrd @INITRD@
|
||||
append @APPEND_LIVE@ toram bee.nvidia.mode=normal
|
||||
|
||||
label live-@FLAVOUR@-gsp-off
|
||||
menu label EASY-BEE (^NVIDIA GSP=off)
|
||||
linux @LINUX@
|
||||
|
||||
@@ -15,12 +15,13 @@ ensure_bee_console_user() {
|
||||
mkdir -p /home/bee
|
||||
chown -R bee:bee /home/bee
|
||||
echo "bee:eeb" | chpasswd
|
||||
usermod -aG sudo bee 2>/dev/null || true
|
||||
usermod -aG sudo,video,input bee 2>/dev/null || true
|
||||
}
|
||||
|
||||
ensure_bee_console_user
|
||||
|
||||
# Enable bee services
|
||||
systemctl enable nvidia-dcgm.service 2>/dev/null || true
|
||||
systemctl enable bee-network.service
|
||||
systemctl enable bee-nvidia.service
|
||||
systemctl enable bee-preflight.service
|
||||
@@ -28,6 +29,7 @@ systemctl enable bee-audit.service
|
||||
systemctl enable bee-web.service
|
||||
systemctl enable bee-sshsetup.service
|
||||
systemctl enable ssh.service
|
||||
systemctl enable lightdm.service 2>/dev/null || true
|
||||
systemctl enable qemu-guest-agent.service 2>/dev/null || true
|
||||
systemctl enable serial-getty@ttyS0.service 2>/dev/null || true
|
||||
systemctl enable serial-getty@ttyS1.service 2>/dev/null || true
|
||||
@@ -38,7 +40,6 @@ chmod +x /usr/local/bin/bee-network.sh 2>/dev/null || true
|
||||
chmod +x /usr/local/bin/bee-nvidia-load 2>/dev/null || true
|
||||
chmod +x /usr/local/bin/bee-sshsetup 2>/dev/null || true
|
||||
chmod +x /usr/local/bin/bee-smoketest 2>/dev/null || true
|
||||
chmod +x /usr/local/bin/bee-tui 2>/dev/null || true
|
||||
chmod +x /usr/local/bin/bee 2>/dev/null || true
|
||||
chmod +x /usr/local/bin/bee-log-run 2>/dev/null || true
|
||||
|
||||
|
||||
66
iso/builder/config/hooks/normal/9002-nvidia-dcgm.hook.chroot
Executable file
66
iso/builder/config/hooks/normal/9002-nvidia-dcgm.hook.chroot
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/sh
|
||||
# 9002-nvidia-dcgm.hook.chroot — install NVIDIA DCGM inside the live-build chroot.
|
||||
# DCGM (Data Center GPU Manager) provides dcgmi diag for acceptance testing.
|
||||
# Adds NVIDIA's CUDA apt repository (debian12/x86_64) and installs datacenter-gpu-manager.
|
||||
|
||||
set -e
|
||||
|
||||
NVIDIA_KEYRING="/usr/share/keyrings/nvidia-cuda.gpg"
|
||||
NVIDIA_LIST="/etc/apt/sources.list.d/nvidia-cuda.list"
|
||||
NVIDIA_KEY_URL="https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/3bf863cc.pub"
|
||||
NVIDIA_REPO="https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/"
|
||||
APT_UPDATED=0
|
||||
|
||||
mkdir -p /usr/share/keyrings /etc/apt/sources.list.d
|
||||
|
||||
ensure_tool() {
|
||||
tool="$1"
|
||||
pkg="$2"
|
||||
if command -v "${tool}" >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
if [ "${APT_UPDATED}" -eq 0 ]; then
|
||||
apt-get update -qq
|
||||
APT_UPDATED=1
|
||||
fi
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends "${pkg}"
|
||||
}
|
||||
|
||||
ensure_cert_bundle() {
|
||||
if [ -s /etc/ssl/certs/ca-certificates.crt ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "${APT_UPDATED}" -eq 0 ]; then
|
||||
apt-get update -qq
|
||||
APT_UPDATED=1
|
||||
fi
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates
|
||||
}
|
||||
|
||||
if ! ensure_cert_bundle || ! ensure_tool wget wget || ! ensure_tool gpg gpg; then
|
||||
echo "WARN: prerequisites missing — skipping DCGM install"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Download and import NVIDIA GPG key
|
||||
if ! wget -qO- "${NVIDIA_KEY_URL}" | gpg --dearmor --yes --output "${NVIDIA_KEYRING}"; then
|
||||
echo "WARN: failed to fetch NVIDIA GPG key — skipping DCGM install"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cat > "${NVIDIA_LIST}" <<EOF
|
||||
deb [signed-by=${NVIDIA_KEYRING}] ${NVIDIA_REPO} /
|
||||
EOF
|
||||
|
||||
apt-get update -qq
|
||||
|
||||
if DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends datacenter-gpu-manager; then
|
||||
echo "=== DCGM: datacenter-gpu-manager installed ==="
|
||||
dcgmi --version 2>/dev/null || true
|
||||
else
|
||||
echo "WARN: datacenter-gpu-manager install failed — DCGM unavailable"
|
||||
fi
|
||||
|
||||
# Clean up apt lists to keep ISO size down
|
||||
rm -f "${NVIDIA_LIST}"
|
||||
apt-get clean
|
||||
@@ -18,8 +18,15 @@ qemu-guest-agent
|
||||
# SSH
|
||||
openssh-server
|
||||
|
||||
# Disk installer
|
||||
squashfs-tools
|
||||
parted
|
||||
grub-pc-bin
|
||||
grub-efi-amd64-bin
|
||||
|
||||
# Filesystem support for USB export targets
|
||||
exfatprogs
|
||||
exfat-fuse
|
||||
ntfs-3g
|
||||
|
||||
# Utilities
|
||||
@@ -41,6 +48,16 @@ stress-ng
|
||||
# QR codes (for displaying audit results)
|
||||
qrencode
|
||||
|
||||
# Local desktop (openbox + chromium kiosk)
|
||||
openbox
|
||||
tint2
|
||||
xorg
|
||||
xterm
|
||||
chromium
|
||||
xserver-xorg-video-fbdev
|
||||
xserver-xorg-video-vesa
|
||||
lightdm
|
||||
|
||||
# Firmware
|
||||
firmware-linux-free
|
||||
firmware-amd-graphics
|
||||
|
||||
2
iso/overlay/etc/X11/Xwrapper.config
Normal file
2
iso/overlay/etc/X11/Xwrapper.config
Normal file
@@ -0,0 +1,2 @@
|
||||
allowed_users=anybody
|
||||
needs_root_rights=yes
|
||||
22
iso/overlay/etc/X11/xorg.conf.d/10-fbdev.conf
Normal file
22
iso/overlay/etc/X11/xorg.conf.d/10-fbdev.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
Section "Device"
|
||||
Identifier "fbdev"
|
||||
Driver "fbdev"
|
||||
Option "fbdev" "/dev/fb0"
|
||||
EndSection
|
||||
|
||||
Section "Monitor"
|
||||
Identifier "monitor0"
|
||||
Modeline "1920x1080" 148.50 1920 2008 2052 2200 1080 1084 1089 1125 +hsync +vsync
|
||||
Option "PreferredMode" "1920x1080"
|
||||
EndSection
|
||||
|
||||
Section "Screen"
|
||||
Identifier "screen0"
|
||||
Device "fbdev"
|
||||
Monitor "monitor0"
|
||||
DefaultDepth 24
|
||||
SubSection "Display"
|
||||
Depth 24
|
||||
Modes "1920x1080" "1280x1024" "1024x768"
|
||||
EndSubSection
|
||||
EndSection
|
||||
@@ -0,0 +1,5 @@
|
||||
[Seat:*]
|
||||
autologin-user=bee
|
||||
autologin-user-timeout=0
|
||||
autologin-session=openbox
|
||||
user-session=openbox
|
||||
@@ -1,20 +1,18 @@
|
||||
export PATH="$PATH:/usr/local/bin:/opt/rocm/bin:/opt/rocm/sbin"
|
||||
|
||||
menu() {
|
||||
if [ -x /usr/local/bin/bee-tui ]; then
|
||||
/usr/local/bin/bee-tui "$@"
|
||||
else
|
||||
echo "bee-tui is not installed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# On the local console, keep the shell visible and let the operator
|
||||
# start the TUI explicitly. This avoids black-screen failures if the
|
||||
# terminal implementation does not support the TUI well.
|
||||
# Print web UI URLs on the local console at login.
|
||||
if [ -z "${SSH_CONNECTION:-}" ] \
|
||||
&& [ -z "${SSH_TTY:-}" ] \
|
||||
&& [ "$(tty 2>/dev/null)" = "/dev/tty1" ]; then
|
||||
&& [ -z "${SSH_TTY:-}" ]; then
|
||||
echo "Bee live environment ready."
|
||||
echo "Run 'menu' to open the TUI."
|
||||
echo ""
|
||||
echo " Web UI (local): http://localhost/"
|
||||
# Print IP addresses for remote access
|
||||
_ips=$(ip -4 addr show scope global 2>/dev/null | awk '/inet /{print $2}' | cut -d/ -f1)
|
||||
for _ip in $_ips; do
|
||||
echo " Web UI (remote): http://$_ip/"
|
||||
done
|
||||
unset _ips _ip
|
||||
echo ""
|
||||
echo " Network setup: netconf"
|
||||
echo " Kernel logs: Alt+F2 | Extra shell: Alt+F3"
|
||||
fi
|
||||
|
||||
4
iso/overlay/etc/systemd/journald.conf.d/bee.conf
Normal file
4
iso/overlay/etc/systemd/journald.conf.d/bee.conf
Normal file
@@ -0,0 +1,4 @@
|
||||
[Journal]
|
||||
# Do not forward service logs to the console — bee-tui runs on tty1
|
||||
# and log spam makes the screen unusable on physical monitors.
|
||||
ForwardToConsole=no
|
||||
@@ -1,4 +1,4 @@
|
||||
[Journal]
|
||||
ForwardToConsole=yes
|
||||
TTYPath=/dev/ttyS0
|
||||
MaxLevelConsole=debug
|
||||
MaxLevelConsole=info
|
||||
|
||||
@@ -1,13 +1 @@
|
||||
export PATH="/usr/local/bin:$PATH"
|
||||
|
||||
if [ -z "${SSH_CONNECTION:-}" ] \
|
||||
&& [ -z "${SSH_TTY:-}" ] \
|
||||
&& [ "$(tty 2>/dev/null)" = "/dev/tty1" ]; then
|
||||
if command -v menu >/dev/null 2>&1; then
|
||||
menu
|
||||
elif [ -x /usr/local/bin/bee-tui ]; then
|
||||
/usr/local/bin/bee-tui
|
||||
else
|
||||
echo "Bee menu is unavailable."
|
||||
fi
|
||||
fi
|
||||
|
||||
189
iso/overlay/usr/local/bin/bee-install
Executable file
189
iso/overlay/usr/local/bin/bee-install
Executable file
@@ -0,0 +1,189 @@
|
||||
#!/bin/bash
|
||||
# bee-install — install the live system to a local disk.
|
||||
#
|
||||
# Usage: bee-install <device> [logfile]
|
||||
# device — target block device, e.g. /dev/sda (will be WIPED)
|
||||
# logfile — optional path to write progress log (default: /tmp/bee-install.log)
|
||||
#
|
||||
# Layout (UEFI): GPT, /dev/sdX1=EFI 512MB vfat, /dev/sdX2=root ext4
|
||||
# Layout (BIOS): MBR, /dev/sdX1=root ext4
|
||||
#
|
||||
# Squashfs source: /run/live/medium/live/filesystem.squashfs
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DEVICE="${1:-}"
|
||||
LOGFILE="${2:-/tmp/bee-install.log}"
|
||||
|
||||
if [ -z "$DEVICE" ]; then
|
||||
echo "Usage: bee-install <device> [logfile]" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [ ! -b "$DEVICE" ]; then
|
||||
echo "ERROR: $DEVICE is not a block device" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SQUASHFS="/run/live/medium/live/filesystem.squashfs"
|
||||
if [ ! -f "$SQUASHFS" ]; then
|
||||
echo "ERROR: squashfs not found at $SQUASHFS" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MOUNT_ROOT="/mnt/bee-install-root"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log() { echo "[$(date +%H:%M:%S)] $*" | tee -a "$LOGFILE"; }
|
||||
die() { log "ERROR: $*"; exit 1; }
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Detect UEFI
|
||||
if [ -d /sys/firmware/efi ]; then
|
||||
UEFI=1
|
||||
log "Boot mode: UEFI"
|
||||
else
|
||||
UEFI=0
|
||||
log "Boot mode: BIOS/legacy"
|
||||
fi
|
||||
|
||||
# Determine partition names (nvme uses p-suffix: nvme0n1p1)
|
||||
if echo "$DEVICE" | grep -qE 'nvme|mmcblk'; then
|
||||
PART_PREFIX="${DEVICE}p"
|
||||
else
|
||||
PART_PREFIX="${DEVICE}"
|
||||
fi
|
||||
|
||||
if [ "$UEFI" = "1" ]; then
|
||||
PART_EFI="${PART_PREFIX}1"
|
||||
PART_ROOT="${PART_PREFIX}2"
|
||||
else
|
||||
PART_ROOT="${PART_PREFIX}1"
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "=== BEE DISK INSTALLER ==="
|
||||
log "Target device : $DEVICE"
|
||||
log "Root partition: $PART_ROOT"
|
||||
[ "$UEFI" = "1" ] && log "EFI partition : $PART_EFI"
|
||||
log "Squashfs : $SQUASHFS ($(du -sh "$SQUASHFS" | cut -f1))"
|
||||
log "Log : $LOGFILE"
|
||||
log ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 1/7: Unmounting target device ---"
|
||||
# Unmount any partitions on target device
|
||||
for part in "${DEVICE}"* ; do
|
||||
if [ "$part" = "$DEVICE" ]; then continue; fi
|
||||
if mountpoint -q "$part" 2>/dev/null; then
|
||||
log " umount $part"
|
||||
umount "$part" || true
|
||||
fi
|
||||
done
|
||||
# Also unmount our mount point if leftover
|
||||
umount "${MOUNT_ROOT}" 2>/dev/null || true
|
||||
umount "${MOUNT_ROOT}/boot/efi" 2>/dev/null || true
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 2/7: Partitioning $DEVICE ---"
|
||||
if [ "$UEFI" = "1" ]; then
|
||||
parted -s "$DEVICE" mklabel gpt
|
||||
parted -s "$DEVICE" mkpart EFI fat32 1MiB 513MiB
|
||||
parted -s "$DEVICE" set 1 esp on
|
||||
parted -s "$DEVICE" mkpart root ext4 513MiB 100%
|
||||
else
|
||||
parted -s "$DEVICE" mklabel msdos
|
||||
parted -s "$DEVICE" mkpart primary ext4 1MiB 100%
|
||||
parted -s "$DEVICE" set 1 boot on
|
||||
fi
|
||||
# Wait for kernel to see new partitions
|
||||
sleep 1
|
||||
partprobe "$DEVICE" 2>/dev/null || true
|
||||
sleep 1
|
||||
log " Partitioning done."
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 3/7: Formatting ---"
|
||||
if [ "$UEFI" = "1" ]; then
|
||||
mkfs.vfat -F32 -n EFI "$PART_EFI"
|
||||
log " $PART_EFI formatted as vfat (EFI)"
|
||||
fi
|
||||
mkfs.ext4 -F -L bee-root "$PART_ROOT"
|
||||
log " $PART_ROOT formatted as ext4"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 4/7: Mounting target ---"
|
||||
mkdir -p "$MOUNT_ROOT"
|
||||
mount "$PART_ROOT" "$MOUNT_ROOT"
|
||||
if [ "$UEFI" = "1" ]; then
|
||||
mkdir -p "${MOUNT_ROOT}/boot/efi"
|
||||
mount "$PART_EFI" "${MOUNT_ROOT}/boot/efi"
|
||||
fi
|
||||
log " Mounted."
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 5/7: Unpacking filesystem (this takes 10-20 minutes) ---"
|
||||
log " Source: $SQUASHFS"
|
||||
log " Target: $MOUNT_ROOT"
|
||||
unsquashfs -f -d "$MOUNT_ROOT" "$SQUASHFS" 2>&1 | \
|
||||
grep -E '^\[|^inod|^created|^extract' | \
|
||||
while read -r line; do log " $line"; done || true
|
||||
log " Unpack complete."
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 6/7: Configuring installed system ---"
|
||||
|
||||
# Write /etc/fstab
|
||||
ROOT_UUID=$(blkid -s UUID -o value "$PART_ROOT")
|
||||
log " Root UUID: $ROOT_UUID"
|
||||
cat > "${MOUNT_ROOT}/etc/fstab" <<FSTAB
|
||||
# Generated by bee-install
|
||||
UUID=${ROOT_UUID} / ext4 defaults,errors=remount-ro 0 1
|
||||
tmpfs /tmp tmpfs defaults,size=512m 0 0
|
||||
FSTAB
|
||||
if [ "$UEFI" = "1" ]; then
|
||||
EFI_UUID=$(blkid -s UUID -o value "$PART_EFI")
|
||||
echo "UUID=${EFI_UUID} /boot/efi vfat umask=0077 0 1" >> "${MOUNT_ROOT}/etc/fstab"
|
||||
fi
|
||||
log " fstab written."
|
||||
|
||||
# Remove live-boot persistence markers so installed system boots normally
|
||||
rm -f "${MOUNT_ROOT}/etc/live/boot.conf" 2>/dev/null || true
|
||||
rm -f "${MOUNT_ROOT}/etc/live/live.conf" 2>/dev/null || true
|
||||
|
||||
# Bind mount virtual filesystems for chroot
|
||||
mount --bind /dev "${MOUNT_ROOT}/dev"
|
||||
mount --bind /proc "${MOUNT_ROOT}/proc"
|
||||
mount --bind /sys "${MOUNT_ROOT}/sys"
|
||||
[ "$UEFI" = "1" ] && mount --bind /sys/firmware/efi/efivars "${MOUNT_ROOT}/sys/firmware/efi/efivars" 2>/dev/null || true
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
log "--- Step 7/7: Installing GRUB bootloader ---"
|
||||
if [ "$UEFI" = "1" ]; then
|
||||
chroot "$MOUNT_ROOT" grub-install \
|
||||
--target=x86_64-efi \
|
||||
--efi-directory=/boot/efi \
|
||||
--bootloader-id=bee \
|
||||
--recheck 2>&1 | while read -r line; do log " $line"; done || true
|
||||
else
|
||||
chroot "$MOUNT_ROOT" grub-install \
|
||||
--target=i386-pc \
|
||||
--recheck \
|
||||
"$DEVICE" 2>&1 | while read -r line; do log " $line"; done || true
|
||||
fi
|
||||
chroot "$MOUNT_ROOT" update-grub 2>&1 | while read -r line; do log " $line"; done || true
|
||||
log " GRUB installed."
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Cleanup
|
||||
log "--- Cleanup ---"
|
||||
umount "${MOUNT_ROOT}/sys/firmware/efi/efivars" 2>/dev/null || true
|
||||
umount "${MOUNT_ROOT}/sys" 2>/dev/null || true
|
||||
umount "${MOUNT_ROOT}/proc" 2>/dev/null || true
|
||||
umount "${MOUNT_ROOT}/dev" 2>/dev/null || true
|
||||
[ "$UEFI" = "1" ] && umount "${MOUNT_ROOT}/boot/efi" 2>/dev/null || true
|
||||
umount "$MOUNT_ROOT" 2>/dev/null || true
|
||||
rmdir "$MOUNT_ROOT" 2>/dev/null || true
|
||||
|
||||
log ""
|
||||
log "=== INSTALLATION COMPLETE ==="
|
||||
log "Remove the ISO and reboot to start the installed system."
|
||||
25
iso/overlay/usr/local/bin/bee-openbox-session
Executable file
25
iso/overlay/usr/local/bin/bee-openbox-session
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/sh
|
||||
# openbox session: launch tint2 taskbar + chromium, then openbox as WM.
|
||||
# This file is used as an xinitrc by bee-desktop.
|
||||
|
||||
# Wait for bee-web to be accepting connections (up to 15 seconds)
|
||||
i=0
|
||||
while [ $i -lt 15 ]; do
|
||||
if curl -sf http://localhost/healthz >/dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
i=$((i+1))
|
||||
done
|
||||
|
||||
tint2 &
|
||||
chromium \
|
||||
--no-sandbox \
|
||||
--disable-infobars \
|
||||
--disable-translate \
|
||||
--no-first-run \
|
||||
--disable-session-crashed-bubble \
|
||||
--disable-features=TranslateUI \
|
||||
http://localhost/ &
|
||||
|
||||
exec openbox
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
clear
|
||||
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
exec sudo -n /usr/local/bin/bee tui --runtime livecd "$@"
|
||||
fi
|
||||
|
||||
exec /usr/local/bin/bee tui --runtime livecd "$@"
|
||||
50
iso/overlay/usr/local/bin/netconf
Executable file
50
iso/overlay/usr/local/bin/netconf
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/bin/sh
|
||||
# Quick network configurator for the local console.
|
||||
set -e
|
||||
|
||||
# List interfaces (exclude lo)
|
||||
IFACES=$(ip -o link show | awk -F': ' '$2 != "lo" {print $2}' | cut -d@ -f1)
|
||||
|
||||
echo "Interfaces:"
|
||||
i=1
|
||||
for iface in $IFACES; do
|
||||
ip=$(ip -4 addr show "$iface" 2>/dev/null | awk '/inet /{print $2}' | head -1)
|
||||
echo " $i) $iface ${ip:-no IP}"
|
||||
i=$((i+1))
|
||||
done
|
||||
echo ""
|
||||
printf "Interface name [or Enter to pick first]: "
|
||||
read IFACE
|
||||
if [ -z "$IFACE" ]; then
|
||||
IFACE=$(echo "$IFACES" | head -1)
|
||||
fi
|
||||
echo "Selected: $IFACE"
|
||||
echo ""
|
||||
echo " 1) DHCP"
|
||||
echo " 2) Static"
|
||||
printf "Mode [1]: "
|
||||
read MODE
|
||||
MODE=${MODE:-1}
|
||||
|
||||
if [ "$MODE" = "1" ]; then
|
||||
echo "Running DHCP on $IFACE..."
|
||||
dhclient -v "$IFACE"
|
||||
else
|
||||
printf "IP address (e.g. 192.168.1.100/24): "
|
||||
read ADDR
|
||||
printf "Gateway (e.g. 192.168.1.1): "
|
||||
read GW
|
||||
printf "DNS [8.8.8.8]: "
|
||||
read DNS
|
||||
DNS=${DNS:-8.8.8.8}
|
||||
|
||||
ip addr flush dev "$IFACE"
|
||||
ip addr add "$ADDR" dev "$IFACE"
|
||||
ip link set "$IFACE" up
|
||||
ip route add default via "$GW"
|
||||
echo "nameserver $DNS" > /etc/resolv.conf
|
||||
echo "Done."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
ip -4 addr show "$IFACE"
|
||||
6
iso/overlay/usr/share/xsessions/openbox.desktop
Normal file
6
iso/overlay/usr/share/xsessions/openbox.desktop
Normal file
@@ -0,0 +1,6 @@
|
||||
[Desktop Entry]
|
||||
Name=Openbox
|
||||
Comment=Bee Hardware Audit Desktop
|
||||
Exec=/usr/local/bin/bee-openbox-session
|
||||
TryExec=openbox
|
||||
Type=Application
|
||||
Reference in New Issue
Block a user