Commit remaining workspace changes

This commit is contained in:
2026-04-23 20:32:26 +03:00
parent 749fc8a94d
commit be4b439804
9 changed files with 858 additions and 51 deletions

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"os"
@@ -13,6 +14,7 @@ import (
"sort"
"strings"
"sync"
"syscall"
"time"
"bee/audit/internal/app"
@@ -110,8 +112,9 @@ type Task struct {
ReportHTMLPath string `json:"report_html_path,omitempty"`
// runtime fields (not serialised)
job *jobState
params taskParams
job *jobState
runnerPID int
params taskParams
}
// taskParams holds optional parameters parsed from the run request.
@@ -328,6 +331,13 @@ var (
installCommand = func(ctx context.Context, device string, logPath string) *exec.Cmd {
return exec.CommandContext(ctx, "bee-install", device, logPath)
}
externalTaskRunnerCommand = func(exportDir, taskID string) (*exec.Cmd, error) {
exe, err := os.Executable()
if err != nil {
return nil, err
}
return exec.Command(exe, "bee-worker", "--export-dir", exportDir, "--task-id", taskID), nil
}
)
// enqueue adds a task to the queue and notifies the worker.
@@ -365,6 +375,11 @@ func (q *taskQueue) prune() {
// nextPending returns the highest-priority pending task (nil if none).
func (q *taskQueue) nextPending() *Task {
for _, t := range q.tasks {
if t.Status == TaskRunning {
return nil
}
}
var best *Task
for _, t := range q.tasks {
if t.Status != TaskPending {
@@ -484,6 +499,7 @@ func (q *taskQueue) startWorker(opts *HandlerOptions) {
if !q.started {
q.loadLocked()
q.started = true
q.resumeRunningTasksLocked()
goRecoverLoop("task worker", 2*time.Second, q.worker)
}
hasPending := q.nextPending() != nil
@@ -517,15 +533,12 @@ func (q *taskQueue) worker() {
t.StartedAt = &now
t.DoneAt = nil
t.ErrMsg = ""
j := newTaskJobState(t.LogPath, taskSerialPrefix(t))
j := newTaskJobState(t.LogPath)
t.job = j
q.persistLocked()
q.mu.Unlock()
taskCtx, taskCancel := context.WithCancel(context.Background())
j.cancel = taskCancel
q.executeTask(t, j, taskCtx)
taskCancel()
q.runTaskExternal(t, j)
q.mu.Lock()
q.prune()
@@ -537,6 +550,207 @@ func (q *taskQueue) worker() {
}
}
func (q *taskQueue) resumeRunningTasksLocked() {
for _, t := range q.tasks {
if t.Status != TaskRunning {
continue
}
if t.job == nil {
t.job = newTaskJobState(t.LogPath)
}
q.attachExternalTaskControlsLocked(t, t.job)
q.startRecoveredTaskMonitorLocked(t, t.job)
}
}
func (q *taskQueue) attachExternalTaskControlsLocked(t *Task, j *jobState) {
if t == nil || j == nil {
return
}
j.cancel = func() {
pid := t.runnerPID
if pid <= 0 {
if state, ok := readTaskRunnerState(t); ok {
pid = state.PID
}
}
if pid > 0 {
_ = syscall.Kill(pid, syscall.SIGTERM)
}
}
}
func (q *taskQueue) startRecoveredTaskMonitorLocked(t *Task, j *jobState) {
if t == nil || j == nil || t.runnerPID <= 0 {
return
}
goRecoverOnce("task runner monitor", func() {
stopTail := make(chan struct{})
doneTail := make(chan struct{})
go q.followTaskLog(t, j, stopTail, doneTail)
for processAlive(t.runnerPID) {
time.Sleep(500 * time.Millisecond)
}
close(stopTail)
<-doneTail
q.finishExternalTask(t, j, nil)
})
}
func (q *taskQueue) runTaskExternal(t *Task, j *jobState) {
stopTail := make(chan struct{})
doneTail := make(chan struct{})
defer func() {
close(stopTail)
<-doneTail
}()
go q.followTaskLog(t, j, stopTail, doneTail)
cmd, err := externalTaskRunnerCommand(q.opts.ExportDir, t.ID)
if err != nil {
j.appendFromLog("ERROR: " + err.Error())
q.finishExternalTask(t, j, err)
return
}
if err := cmd.Start(); err != nil {
j.appendFromLog("ERROR: " + err.Error())
q.finishExternalTask(t, j, err)
return
}
q.mu.Lock()
t.runnerPID = cmd.Process.Pid
q.attachExternalTaskControlsLocked(t, j)
q.persistLocked()
q.mu.Unlock()
waitErr := cmd.Wait()
time.Sleep(200 * time.Millisecond)
q.finishExternalTask(t, j, waitErr)
}
func (q *taskQueue) followTaskLog(t *Task, j *jobState, stop <-chan struct{}, done chan<- struct{}) {
defer close(done)
path := ""
if t != nil {
path = t.LogPath
}
if strings.TrimSpace(path) == "" {
return
}
offset := int64(0)
if info, err := os.Stat(path); err == nil {
offset = info.Size()
}
var partial string
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
flush := func() {
data, newOffset, err := readTaskLogDelta(path, offset)
if err != nil || len(data) == 0 {
offset = newOffset
return
}
offset = newOffset
text := partial + strings.ReplaceAll(string(data), "\r\n", "\n")
lines := strings.Split(text, "\n")
partial = lines[len(lines)-1]
for _, line := range lines[:len(lines)-1] {
if line == "" {
continue
}
j.appendFromLog(line)
}
}
for {
select {
case <-ticker.C:
flush()
case <-stop:
flush()
if strings.TrimSpace(partial) != "" {
j.appendFromLog(partial)
}
return
}
}
}
func readTaskLogDelta(path string, offset int64) ([]byte, int64, error) {
f, err := os.Open(path)
if err != nil {
return nil, offset, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, offset, err
}
if info.Size() < offset {
offset = 0
}
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, offset, err
}
data, err := io.ReadAll(io.LimitReader(f, 1<<20))
return data, offset + int64(len(data)), err
}
func (q *taskQueue) finishExternalTask(t *Task, j *jobState, waitErr error) {
q.mu.Lock()
defer q.mu.Unlock()
if t.Status == TaskDone || t.Status == TaskFailed || t.Status == TaskCancelled {
if j != nil && !j.isDone() {
j.finish(t.ErrMsg)
j.closeLog()
}
select {
case q.trigger <- struct{}{}:
default:
}
return
}
state, ok := readTaskRunnerState(t)
switch {
case ok && state.Status != TaskRunning:
t.Status = state.Status
t.ErrMsg = state.Error
now := state.UpdatedAt
if now.IsZero() {
now = time.Now()
}
t.DoneAt = &now
case waitErr != nil:
now := time.Now()
t.Status = TaskFailed
t.ErrMsg = waitErr.Error()
t.DoneAt = &now
default:
now := time.Now()
t.Status = TaskFailed
t.ErrMsg = "task runner exited without final state"
t.DoneAt = &now
}
t.runnerPID = 0
q.finalizeTaskArtifactPathsLocked(t)
q.persistLocked()
if j != nil && !j.isDone() {
j.finish(t.ErrMsg)
j.closeLog()
}
if t.ErrMsg != "" {
taskSerialEvent(t, "finished with status="+t.Status+" error="+t.ErrMsg)
} else {
taskSerialEvent(t, "finished with status="+t.Status)
}
select {
case q.trigger <- struct{}{}:
default:
}
}
func (q *taskQueue) executeTask(t *Task, j *jobState, ctx context.Context) {
startedKmsgWatch := false
defer q.finalizeTaskRun(t, j)
@@ -985,15 +1199,11 @@ func (h *handler) handleAPITasksCancel(w http.ResponseWriter, r *http.Request) {
taskSerialEvent(t, "finished with status="+t.Status)
writeJSON(w, map[string]string{"status": "cancelled"})
case TaskRunning:
if t.job != nil {
t.job.abort()
if t.job == nil || !t.job.abort() {
writeError(w, http.StatusConflict, "task is not cancellable")
return
}
t.Status = TaskCancelled
now := time.Now()
t.DoneAt = &now
globalQueue.persistLocked()
taskSerialEvent(t, "finished with status="+t.Status)
writeJSON(w, map[string]string{"status": "cancelled"})
writeJSON(w, map[string]string{"status": "aborting"})
default:
writeError(w, http.StatusConflict, "task is not running or pending")
}
@@ -1039,12 +1249,6 @@ func (h *handler) handleAPITasksCancelAll(w http.ResponseWriter, _ *http.Request
if t.job != nil {
t.job.abort()
}
if taskMayLeaveOrphanWorkers(t.Target) {
platform.KillTestWorkers()
}
t.Status = TaskCancelled
t.DoneAt = &now
taskSerialEvent(t, "finished with status="+t.Status)
n++
}
}
@@ -1175,18 +1379,29 @@ func (q *taskQueue) loadLocked() {
}
q.assignTaskLogPathLocked(t)
if t.Status == TaskRunning {
// The task was interrupted by a bee-web restart. Child processes
// (e.g. bee-gpu-burn-worker, dcgmi/nvvs) survive the restart in
// their own process groups. Kill any matching stale workers before
// marking the task failed so the next GPU test does not inherit a
// busy DCGM slot or duplicate workers.
if taskMayLeaveOrphanWorkers(t.Target) {
_ = platform.KillTestWorkers()
state, ok := readTaskRunnerState(t)
switch {
case ok && state.Status == TaskRunning && processAlive(state.PID):
t.runnerPID = state.PID
t.job = newTaskJobState(t.LogPath)
case ok && state.Status != TaskRunning:
t.runnerPID = state.PID
t.Status = state.Status
t.ErrMsg = state.Error
now := state.UpdatedAt
if now.IsZero() {
now = time.Now()
}
t.DoneAt = &now
default:
if taskMayLeaveOrphanWorkers(t.Target) {
_ = platform.KillTestWorkers()
}
now := time.Now()
t.Status = TaskFailed
t.DoneAt = &now
t.ErrMsg = "interrupted by bee-web restart"
}
now := time.Now()
t.Status = TaskFailed
t.DoneAt = &now
t.ErrMsg = "interrupted by bee-web restart"
} else if t.Status == TaskPending {
t.StartedAt = nil
t.DoneAt = nil