feat(tui): live GPU chart during stress test, full VRAM allocation
- GPU Platform Stress Test now shows a live in-TUI chart instead of nvtop. nvidia-smi is polled every second; up to 60 data points per GPU kept. All three metrics (Usage %, Temp °C, Power W) drawn on a single plot, each normalised to its own range and rendered in a different colour. - Memory allocation changed from MemoryMB/16 to MemoryMB-512 (full VRAM minus 512 MB driver overhead) so bee-gpu-stress actually stresses memory. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -3,6 +3,7 @@ package tui
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
@@ -130,6 +131,22 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
m.body = msg.body
|
||||
}
|
||||
return m, m.refreshSnapshotCmd()
|
||||
case gpuLiveTickMsg:
|
||||
if m.screen == screenGPUStressRunning {
|
||||
if len(msg.rows) > 0 {
|
||||
elapsed := time.Since(m.gpuLiveStart).Seconds()
|
||||
for i := range msg.rows {
|
||||
msg.rows[i].ElapsedSec = elapsed
|
||||
}
|
||||
m.gpuLiveRows = append(m.gpuLiveRows, msg.rows...)
|
||||
n := max(1, len(msg.indices))
|
||||
if len(m.gpuLiveRows) > 60*n {
|
||||
m.gpuLiveRows = m.gpuLiveRows[len(m.gpuLiveRows)-60*n:]
|
||||
}
|
||||
}
|
||||
return m, pollGPULive(msg.indices)
|
||||
}
|
||||
return m, nil
|
||||
case nvidiaSATDoneMsg:
|
||||
if m.nvidiaSATAborted {
|
||||
return m, nil
|
||||
|
||||
Reference in New Issue
Block a user