70 lines
1.8 KiB
Go
70 lines
1.8 KiB
Go
package webui
|
|
|
|
import (
|
|
"path/filepath"
|
|
"testing"
|
|
"time"
|
|
|
|
"bee/audit/internal/platform"
|
|
)
|
|
|
|
func TestMetricsDBLoadSamplesKeepsChronologicalRangeForGPUs(t *testing.T) {
|
|
db, err := openMetricsDB(filepath.Join(t.TempDir(), "metrics.db"))
|
|
if err != nil {
|
|
t.Fatalf("openMetricsDB: %v", err)
|
|
}
|
|
defer db.Close()
|
|
|
|
base := time.Unix(1_700_000_000, 0).UTC()
|
|
for i := 0; i < 3; i++ {
|
|
err := db.Write(platform.LiveMetricSample{
|
|
Timestamp: base.Add(time.Duration(i) * time.Second),
|
|
CPULoadPct: float64(10 + i),
|
|
MemLoadPct: float64(20 + i),
|
|
PowerW: float64(300 + i),
|
|
GPUs: []platform.GPUMetricRow{
|
|
{GPUIndex: 0, PowerW: float64(100 + i)},
|
|
{GPUIndex: 2, PowerW: float64(200 + i)},
|
|
},
|
|
})
|
|
if err != nil {
|
|
t.Fatalf("Write(%d): %v", i, err)
|
|
}
|
|
}
|
|
|
|
all, err := db.LoadAll()
|
|
if err != nil {
|
|
t.Fatalf("LoadAll: %v", err)
|
|
}
|
|
if len(all) != 3 {
|
|
t.Fatalf("LoadAll len=%d want 3", len(all))
|
|
}
|
|
for i, sample := range all {
|
|
if len(sample.GPUs) != 2 {
|
|
t.Fatalf("LoadAll sample %d GPUs=%v want 2 rows", i, sample.GPUs)
|
|
}
|
|
if sample.GPUs[0].GPUIndex != 0 || sample.GPUs[0].PowerW != float64(100+i) {
|
|
t.Fatalf("LoadAll sample %d GPU0=%+v", i, sample.GPUs[0])
|
|
}
|
|
if sample.GPUs[1].GPUIndex != 2 || sample.GPUs[1].PowerW != float64(200+i) {
|
|
t.Fatalf("LoadAll sample %d GPU1=%+v", i, sample.GPUs[1])
|
|
}
|
|
}
|
|
|
|
recent, err := db.LoadRecent(2)
|
|
if err != nil {
|
|
t.Fatalf("LoadRecent: %v", err)
|
|
}
|
|
if len(recent) != 2 {
|
|
t.Fatalf("LoadRecent len=%d want 2", len(recent))
|
|
}
|
|
if !recent[0].Timestamp.Before(recent[1].Timestamp) {
|
|
t.Fatalf("LoadRecent timestamps not ascending: %v >= %v", recent[0].Timestamp, recent[1].Timestamp)
|
|
}
|
|
for i, sample := range recent {
|
|
if len(sample.GPUs) != 2 {
|
|
t.Fatalf("LoadRecent sample %d GPUs=%v want 2 rows", i, sample.GPUs)
|
|
}
|
|
}
|
|
}
|