refactor(workspace-status): typed constants + AST-based drift gate

Eliminate raw 'awaiting_agent'/'hibernating'/'failed'/etc string literals
from production status writes. Adds models.WorkspaceStatus typed alias and
models.AllWorkspaceStatuses canonical slice; every UPDATE workspaces SET
status = ... now passes a parameterized $N typed value rather than a
hard-coded SQL literal.

Defense-in-depth follow-up to migration 046 (#2388): the Postgres enum
type was missing 'awaiting_agent' + 'hibernating' for ~5 days because
sqlmock regex matching cannot enforce live enum constraints. The drift
gate is now a proper Go AST + SQL parser (no regex), asserting the
codebase ⊆ migration enum and every const appears in the canonical
slice. With status as a parameterized typed value, future enum mismatches
fail at the SQL layer in tests, not silently in prod.

Test coverage: full suite passes with -race; drift gate green.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Hongming Wang 2026-04-30 10:41:41 -07:00
parent 36fd658cc0
commit fdf1b5d76a
28 changed files with 348 additions and 225 deletions

View File

@ -7,6 +7,7 @@ import (
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
"github.com/google/uuid"
)
@ -131,7 +132,8 @@ func buildBundleConfigFiles(b *Bundle) map[string][]byte {
func markFailed(ctx context.Context, wsID string, broadcaster *events.Broadcaster, err error) {
db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'failed', updated_at = now() WHERE id = $1`, wsID)
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`,
models.StatusFailed, wsID)
broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", wsID, map[string]interface{}{
"error": err.Error(),
})

View File

@ -1,24 +1,42 @@
package db_test
// Static drift gate: every workspaces.status literal written in the Go
// tree must exist in the workspace_status enum defined by the migrations.
// Static drift gate: every value declared in models.AllWorkspaceStatuses
// must exist in the workspace_status enum after every migration applies.
//
// Why this exists: the `workspace_status` enum (migrations 043 + 046)
// shipped without 'awaiting_agent' even though application code wrote
// that value, and every UPDATE silently failed in production for five
// days before the gap surfaced (see 046_workspace_status_awaiting_agent.up.sql).
// The unit tests passed because sqlmock matches SQL by regex, not against
// a live enum constraint.
// Why this exists: the workspace_status enum (migration 043) initially
// shipped without 'awaiting_agent' and 'hibernating' even though
// application code already wrote both. Every UPDATE silently failed in
// production for five days because:
//
// Approach: extract every Go string literal whose body matches
// (?i)workspaces[^a-z_].*status (so "UPDATE workspaces SET status",
// "FROM workspaces WHERE ... status", "INSERT INTO workspaces ... status",
// CTEs that reference workspaces, etc.). For each such SQL fragment,
// pull the single-quoted status values out of `status =`, `status IN`,
// `THEN`, and `ELSE`. Every value must be in the union of CREATE TYPE +
// ALTER TYPE ADD VALUE across all migrations.
// - Status values were ad-hoc string literals scattered across raw
// SQL strings in 8+ files, with no compile-time check.
// - sqlmock matched SQL by regex, not against the live enum.
// - Errors were dropped or log-and-continued at every call site.
//
// The fix is layered. This gate is the static layer:
//
// - models.AllWorkspaceStatuses is the source of truth for the
// codebase side. Every status write goes through one of those
// typed constants (the parameterized-write refactor enforces this).
// - The migrations are the source of truth for the DB side.
// - This test parses both and asserts the codebase set ⊆ migration set.
//
// If you add a new status:
//
// 1. Add a `Status…` constant in models/workspace_status.go AND
// append it to AllWorkspaceStatuses.
// 2. Open a migration `ALTER TYPE workspace_status ADD VALUE 'X'`.
// 3. This test confirms both happened in the same PR.
//
// If you intend to retire a status: keep it in the enum as long as any
// row could legitimately still hold it, then drop it from
// AllWorkspaceStatuses (the gate runs the inclusion in one direction
// only — extras in the enum are fine).
import (
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"regexp"
@ -32,32 +50,31 @@ func TestWorkspaceStatusEnum_NoLiteralDrift(t *testing.T) {
repoRoot := findRepoRoot(t)
migrationsDir := filepath.Join(repoRoot, "workspace-server", "migrations")
internalDir := filepath.Join(repoRoot, "workspace-server", "internal")
statusFile := filepath.Join(repoRoot, "workspace-server", "internal", "models", "workspace_status.go")
enum := loadWorkspaceStatusEnum(t, migrationsDir)
if len(enum) == 0 {
t.Fatalf("could not parse workspace_status enum from %s — gate is non-functional", migrationsDir)
}
literals := collectWorkspacesStatusLiterals(t, internalDir)
if len(literals) == 0 {
t.Fatalf("found zero workspaces.status literals under %s — gate is non-functional", internalDir)
codebase := loadAllWorkspaceStatuses(t, statusFile)
if len(codebase) == 0 {
t.Fatalf("could not parse models.AllWorkspaceStatuses from %s — gate is non-functional", statusFile)
}
var rogue []string
for lit := range literals {
if _, ok := enum[lit]; ok {
continue
for lit := range codebase {
if _, ok := enum[lit]; !ok {
rogue = append(rogue, lit)
}
rogue = append(rogue, lit)
}
if len(rogue) > 0 {
sort.Strings(rogue)
t.Errorf(
"workspaces.status literal(s) %v are written by Go code but not in the workspace_status enum.\n"+
"Add a migration `ALTER TYPE workspace_status ADD VALUE 'X';` (see 046 for shape).\n"+
"Enum currently is: %v",
rogue, sortedKeys(enum),
"workspace status constants %v are declared in models.AllWorkspaceStatuses but not in the workspace_status enum.\n"+
"Add a migration `ALTER TYPE workspace_status ADD VALUE 'X';` (see migration 046 for shape).\n"+
"Enum currently: %v\nCodebase declares: %v",
rogue, sortedKeys(enum), sortedKeys(codebase),
)
}
}
@ -101,99 +118,117 @@ func loadWorkspaceStatusEnum(t *testing.T, migrationsDir string) map[string]stru
return out
}
// collectWorkspacesStatusLiterals walks every non-test .go file under
// root, finds Go string literals that contain `UPDATE workspaces` or
// `INSERT INTO workspaces`, and extracts the status literals appearing
// inside the matching SQL statement.
// loadAllWorkspaceStatuses parses workspace_status.go and extracts:
//
// Why this scope: any UPDATE/INSERT against `workspaces` is the moment
// a status literal hits the column constrained by the enum. Read-side
// SQL (SELECT ... WHERE status = 'X') cannot fail on enum drift, so it's
// out of scope. JOINs to `workspaces` from other tables (e.g. approvals
// joining workspaces for display) write to a different table's status —
// also out of scope. Anchoring on the leading `UPDATE workspaces` /
// `INSERT INTO workspaces` keyword unambiguously identifies the writes
// we care about.
func collectWorkspacesStatusLiterals(t *testing.T, root string) map[string]struct{} {
// - Every `Status… WorkspaceStatus = "..."` declaration in the const block.
// - Every entry in the AllWorkspaceStatuses slice literal.
//
// The gate asserts the slice's set equals (or is a subset of) the const
// block's set, so a new status added to the const block but forgotten
// in AllWorkspaceStatuses surfaces here. AllWorkspaceStatuses is the
// canonical "what the codebase expects the DB to accept" list — any
// const not in the slice is unenforced by the gate.
func loadAllWorkspaceStatuses(t *testing.T, statusFile string) map[string]struct{} {
t.Helper()
// Match raw-string and double-quoted Go string literals. Backtick
// strings can span multiple lines. Both forms are extracted via the
// same DOTALL regex over the whole file body.
rawRE := regexp.MustCompile("(?s)`([^`]*?)`")
dquoteRE := regexp.MustCompile(`"((?:[^"\\]|\\.)*)"`)
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, statusFile, nil, parser.ParseComments)
if err != nil {
t.Fatalf("parse %s: %v", statusFile, err)
}
// A SQL string is in scope if it begins (after optional leading
// whitespace) with UPDATE workspaces or INSERT INTO workspaces.
// `(?i)` is case-insensitive; `\s*` allows the format-friendly
// leading newline and indent that the codebase uses.
updateWorkspacesRE := regexp.MustCompile(`(?is)^\s*UPDATE\s+workspaces\b`)
insertWorkspacesRE := regexp.MustCompile(`(?is)^\s*INSERT\s+INTO\s+workspaces\b`)
consts := make(map[string]string) // const name → string value
var sliceEntries []string // identifiers used in AllWorkspaceStatuses
allWorkspaceStatusesFound := false
// Inside a scoped SQL fragment, status literals appear in:
// status = 'X' — assignment in SET (or filter in WHERE)
// status IN ('X', ...) — filter
// status NOT IN ('X') — filter
// THEN 'X' — CASE arm
// ELSE 'X' — CASE default
statusEqRE := regexp.MustCompile(`(?i)status\s*(?:=|!=|<>)\s*'([a-z_]+)'`)
statusInRE := regexp.MustCompile(`(?i)status\s+(?:NOT\s+)?IN\s*\(([^)]*)\)`)
thenRE := regexp.MustCompile(`(?i)THEN\s+'([a-z_]+)'`)
elseRE := regexp.MustCompile(`(?i)ELSE\s+'([a-z_]+)'`)
inListLiteralRE := regexp.MustCompile(`'([a-z_]+)'`)
out := make(map[string]struct{})
walkErr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if !strings.HasSuffix(path, ".go") {
return nil
}
if strings.HasSuffix(path, "_test.go") {
return nil
}
body, err := os.ReadFile(path)
if err != nil {
return err
}
text := string(body)
harvest := func(fragment string) {
if !updateWorkspacesRE.MatchString(fragment) && !insertWorkspacesRE.MatchString(fragment) {
return
}
for _, m := range statusEqRE.FindAllStringSubmatch(fragment, -1) {
out[m[1]] = struct{}{}
}
for _, m := range statusInRE.FindAllStringSubmatch(fragment, -1) {
for _, lit := range inListLiteralRE.FindAllStringSubmatch(m[1], -1) {
out[lit[1]] = struct{}{}
ast.Inspect(f, func(n ast.Node) bool {
switch decl := n.(type) {
case *ast.GenDecl:
if decl.Tok == token.CONST {
for _, spec := range decl.Specs {
vs, ok := spec.(*ast.ValueSpec)
if !ok {
continue
}
for i, name := range vs.Names {
if !strings.HasPrefix(name.Name, "Status") {
continue
}
if i >= len(vs.Values) {
continue
}
lit, ok := vs.Values[i].(*ast.BasicLit)
if !ok || lit.Kind != token.STRING {
continue
}
unquoted := strings.Trim(lit.Value, `"`)
consts[name.Name] = unquoted
}
}
}
for _, m := range thenRE.FindAllStringSubmatch(fragment, -1) {
out[m[1]] = struct{}{}
}
for _, m := range elseRE.FindAllStringSubmatch(fragment, -1) {
out[m[1]] = struct{}{}
if decl.Tok == token.VAR {
for _, spec := range decl.Specs {
vs, ok := spec.(*ast.ValueSpec)
if !ok {
continue
}
for i, name := range vs.Names {
if name.Name != "AllWorkspaceStatuses" {
continue
}
allWorkspaceStatusesFound = true
if i >= len(vs.Values) {
continue
}
composite, ok := vs.Values[i].(*ast.CompositeLit)
if !ok {
continue
}
for _, elt := range composite.Elts {
ident, ok := elt.(*ast.Ident)
if !ok {
continue
}
sliceEntries = append(sliceEntries, ident.Name)
}
}
}
}
}
for _, m := range rawRE.FindAllStringSubmatch(text, -1) {
harvest(m[1])
}
for _, m := range dquoteRE.FindAllStringSubmatch(text, -1) {
harvest(m[1])
}
return nil
return true
})
if walkErr != nil {
t.Fatalf("walk %s: %v", root, walkErr)
if !allWorkspaceStatusesFound {
t.Fatalf("AllWorkspaceStatuses not found in %s", statusFile)
}
// Cross-check: every slice entry must resolve to a known const.
out := make(map[string]struct{})
for _, entry := range sliceEntries {
v, ok := consts[entry]
if !ok {
t.Errorf("AllWorkspaceStatuses references undefined identifier %q in %s", entry, statusFile)
continue
}
out[v] = struct{}{}
}
// Cross-check: every const must be in the slice (otherwise the
// gate runs against an outdated source-of-truth list).
sliceSet := make(map[string]struct{}, len(sliceEntries))
for _, e := range sliceEntries {
sliceSet[e] = struct{}{}
}
for name := range consts {
if _, ok := sliceSet[name]; !ok {
t.Errorf(
"const %q is declared but missing from AllWorkspaceStatuses in %s — "+
"add it to the slice or the drift gate cannot enforce migration coverage for it",
name, statusFile,
)
}
}
return out
}

View File

@ -187,7 +187,7 @@ func (h *WorkspaceHandler) maybeMarkContainerDead(ctx context.Context, workspace
return false
}
log.Printf("ProxyA2A: container for %s is dead — marking offline and triggering restart", workspaceID)
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'offline', updated_at = now() WHERE id = $1 AND status NOT IN ('removed', 'provisioning')`, workspaceID); err != nil {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2 AND status NOT IN ('removed', 'provisioning')`, models.StatusOffline, workspaceID); err != nil {
log.Printf("ProxyA2A: failed to mark workspace %s offline: %v", workspaceID, err)
}
db.ClearWorkspaceKeys(ctx, workspaceID)

View File

@ -16,6 +16,7 @@ import (
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
"github.com/gin-gonic/gin"
)
@ -281,8 +282,8 @@ func TestProxyA2A_Upstream502_TriggersContainerDeadCheck(t *testing.T) {
mock.ExpectQuery(`SELECT COALESCE\(runtime, 'langgraph'\) FROM workspaces WHERE id =`).
WithArgs("ws-tunnel-dead").
WillReturnRows(sqlmock.NewRows([]string{"runtime"}).AddRow("hermes"))
mock.ExpectExec(`UPDATE workspaces SET status = 'offline'`).
WithArgs("ws-tunnel-dead").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusOffline, "ws-tunnel-dead").
WillReturnResult(sqlmock.NewResult(0, 1))
w := httptest.NewRecorder()
@ -1808,8 +1809,8 @@ func TestMaybeMarkContainerDead_CPOnly_NotRunning(t *testing.T) {
mock.ExpectQuery(`SELECT COALESCE\(runtime, 'langgraph'\) FROM workspaces WHERE id =`).
WithArgs("ws-saas-dead").
WillReturnRows(sqlmock.NewRows([]string{"runtime"}).AddRow("hermes"))
mock.ExpectExec(`UPDATE workspaces SET status = 'offline'`).
WithArgs("ws-saas-dead").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusOffline, "ws-saas-dead").
WillReturnResult(sqlmock.NewResult(0, 1))
got := handler.maybeMarkContainerDead(context.Background(), "ws-saas-dead")

View File

@ -378,8 +378,8 @@ func TestHeartbeat_ExactThreshold_Degraded(t *testing.T) {
mock.ExpectQuery("SELECT status FROM workspaces WHERE id =").
WithArgs("ws-edge").
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("online"))
mock.ExpectExec("UPDATE workspaces SET status = 'degraded'").
WithArgs("ws-edge").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusDegraded, "ws-edge").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").
WillReturnResult(sqlmock.NewResult(0, 1))
@ -419,8 +419,8 @@ func TestHeartbeat_DegradedRecovery(t *testing.T) {
mock.ExpectQuery("SELECT status FROM workspaces WHERE id =").
WithArgs("ws-rec").
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("degraded"))
mock.ExpectExec("UPDATE workspaces SET status = 'online'").
WithArgs("ws-rec").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusOnline, "ws-rec").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").
WillReturnResult(sqlmock.NewResult(0, 1))
@ -1000,8 +1000,8 @@ func TestPause_Success(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
// UPDATE status to paused
mock.ExpectExec("UPDATE workspaces SET status = 'paused'").
WithArgs("ws-pause").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusPaused, "ws-pause").
WillReturnResult(sqlmock.NewResult(0, 1))
// RecordAndBroadcast
@ -1098,22 +1098,22 @@ func TestPause_WithDescendants(t *testing.T) {
AddRow("ws-worker-2", "Worker 2"))
// UPDATE + broadcast for parent (ws-team)
mock.ExpectExec("UPDATE workspaces SET status = 'paused'").
WithArgs("ws-team").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusPaused, "ws-team").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").
WillReturnResult(sqlmock.NewResult(0, 1))
// UPDATE + broadcast for child-1
mock.ExpectExec("UPDATE workspaces SET status = 'paused'").
WithArgs("ws-worker-1").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusPaused, "ws-worker-1").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").
WillReturnResult(sqlmock.NewResult(0, 1))
// UPDATE + broadcast for child-2
mock.ExpectExec("UPDATE workspaces SET status = 'paused'").
WithArgs("ws-worker-2").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusPaused, "ws-worker-2").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").
WillReturnResult(sqlmock.NewResult(0, 1))

View File

@ -28,7 +28,7 @@ func TestExtended_WorkspaceDelete(t *testing.T) {
// #73: batch UPDATE happens BEFORE any container teardown.
// Uses ANY($1::uuid[]) even with a single ID for consistency.
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(1, 1))
// Batch canvas layout delete (same id set).

View File

@ -209,8 +209,8 @@ func TestHeartbeatHandler_Degraded(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("online"))
// Expect status transition to degraded
mock.ExpectExec("UPDATE workspaces SET status = 'degraded'").
WithArgs("ws-123").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusDegraded, "ws-123").
WillReturnResult(sqlmock.NewResult(0, 1))
// Expect RecordAndBroadcast INSERT for WORKSPACE_DEGRADED
@ -257,8 +257,8 @@ func TestHeartbeatHandler_Recovery(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("degraded"))
// Expect status transition back to online
mock.ExpectExec("UPDATE workspaces SET status = 'online'").
WithArgs("ws-123").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusOnline, "ws-123").
WillReturnResult(sqlmock.NewResult(0, 1))
// Expect RecordAndBroadcast INSERT for WORKSPACE_ONLINE

View File

@ -22,6 +22,7 @@ import (
"testing"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/gin-gonic/gin"
)
@ -51,7 +52,7 @@ func TestHibernateWorkspace_OnlineWorkspace_Success(t *testing.T) {
// Step 1: atomic claim UPDATE succeeds.
mock.ExpectExec(`UPDATE workspaces`).
WithArgs(wsID).
WithArgs(wsID, models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 1))
// Post-claim SELECT for name/tier.
@ -60,8 +61,8 @@ func TestHibernateWorkspace_OnlineWorkspace_Success(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"name", "tier"}).AddRow("Idle Agent", 1))
// Step 3: final UPDATE to 'hibernated'.
mock.ExpectExec(`UPDATE workspaces SET status = 'hibernated'`).
WithArgs(wsID).
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusHibernated, wsID).
WillReturnResult(sqlmock.NewResult(0, 1))
// Broadcaster inserts a structure_events row.
@ -98,7 +99,7 @@ func TestHibernateWorkspace_NotEligible_NoOp(t *testing.T) {
// Atomic claim finds nothing matching WHERE (workspace offline, paused, etc.).
mock.ExpectExec(`UPDATE workspaces`).
WithArgs(wsID).
WithArgs(wsID, models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 0))
// Set a Redis key to confirm it is NOT cleared by early return.
@ -129,7 +130,7 @@ func TestHibernateWorkspace_DBUpdateFails_NoCrash(t *testing.T) {
// Step 1: atomic claim succeeds.
mock.ExpectExec(`UPDATE workspaces`).
WithArgs(wsID).
WithArgs(wsID, models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 1))
// Post-claim SELECT.
@ -138,8 +139,8 @@ func TestHibernateWorkspace_DBUpdateFails_NoCrash(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"name", "tier"}).AddRow("Flaky Agent", 2))
// Step 3: final UPDATE fails.
mock.ExpectExec(`UPDATE workspaces SET status = 'hibernated'`).
WithArgs(wsID).
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusHibernated, wsID).
WillReturnError(fmt.Errorf("db: connection refused"))
// Must not panic — test will catch a panic via t.Fatal.
@ -203,7 +204,7 @@ func TestHibernateHandler_Online_Returns200(t *testing.T) {
// HibernateWorkspace() step 1: atomic claim.
mock.ExpectExec(`UPDATE workspaces`).
WithArgs(wsID).
WithArgs(wsID, models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 1))
// Post-claim SELECT for name/tier.
@ -212,8 +213,8 @@ func TestHibernateHandler_Online_Returns200(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"name", "tier", "active_tasks"}).AddRow("Online Bot", 1, 0))
// Step 3: final UPDATE.
mock.ExpectExec(`UPDATE workspaces SET status = 'hibernated'`).
WithArgs(wsID).
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusHibernated, wsID).
WillReturnResult(sqlmock.NewResult(0, 1))
// Broadcaster INSERT.
@ -318,7 +319,7 @@ func TestHibernateHandler_ActiveTasks_ForceTrue_Returns200(t *testing.T) {
// HibernateWorkspace claim
mock.ExpectExec(`UPDATE workspaces`).
WithArgs(wsID).
WithArgs(wsID, models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 1))
// Post-claim SELECT
@ -327,8 +328,8 @@ func TestHibernateHandler_ActiveTasks_ForceTrue_Returns200(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"name", "tier"}).AddRow("Force Bot", 1))
// Final UPDATE to hibernated
mock.ExpectExec(`UPDATE workspaces SET status = 'hibernated'`).
WithArgs(wsID).
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusHibernated, wsID).
WillReturnResult(sqlmock.NewResult(0, 1))
// Broadcaster

View File

@ -7,6 +7,7 @@ import (
"testing"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/gin-gonic/gin"
)
@ -148,8 +149,8 @@ func TestHeartbeat_NativeStatusMgmt_WedgedStillRespected(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("online"))
// Wedged degrade UPDATE — must still happen even with native_status_mgmt
mock.ExpectExec("UPDATE workspaces SET status = 'degraded'").
WithArgs("ws-wedged").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusDegraded, "ws-wedged").
WillReturnResult(sqlmock.NewResult(0, 1))
// WORKSPACE_DEGRADED broadcast still fires

View File

@ -169,7 +169,7 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX
// Handle external workspaces
if ws.External {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'online', url = $1 WHERE id = $2`, ws.URL, id); err != nil {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, url = $2 WHERE id = $3`, models.StatusOnline, ws.URL, id); err != nil {
log.Printf("Org import: external workspace status update failed for %s: %v", ws.Name, err)
}
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", id, map[string]interface{}{

View File

@ -645,8 +645,8 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
// degraded card without the operator scraping container logs.
if payload.RuntimeState == "wedged" && currentStatus == "online" {
_, err := db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'degraded', updated_at = now() WHERE id = $1 AND status = 'online'`,
payload.WorkspaceID)
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2 AND status = 'online'`,
models.StatusDegraded, payload.WorkspaceID)
if err != nil {
log.Printf("Heartbeat: failed to mark %s degraded (wedged): %v", payload.WorkspaceID, err)
}
@ -668,7 +668,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
nativeStatus := runtimeOverrides.HasCapability(payload.WorkspaceID, "status_mgmt")
if !nativeStatus && currentStatus == "online" && payload.ErrorRate >= 0.5 {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'degraded', updated_at = now() WHERE id = $1`, payload.WorkspaceID); err != nil {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusDegraded, payload.WorkspaceID); err != nil {
log.Printf("Heartbeat: failed to mark %s degraded: %v", payload.WorkspaceID, err)
}
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_DEGRADED", payload.WorkspaceID, map[string]interface{}{
@ -687,7 +687,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
// Skipped under native_status_mgmt for the same reason as the
// degrade branch above: the adapter owns the transition.
if !nativeStatus && currentStatus == "degraded" && payload.ErrorRate < 0.1 && payload.RuntimeState == "" {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'online', updated_at = now() WHERE id = $1`, payload.WorkspaceID); err != nil {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusOnline, payload.WorkspaceID); err != nil {
log.Printf("Heartbeat: failed to recover %s to online: %v", payload.WorkspaceID, err)
}
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{})
@ -697,7 +697,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
// #73 guard: `AND status = 'offline'` makes the flip conditional in a single statement,
// so a Delete that races with this recovery can't flip 'removed' back to 'online'.
if currentStatus == "offline" {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'online', updated_at = now() WHERE id = $1 AND status = 'offline'`, payload.WorkspaceID); err != nil {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2 AND status = 'offline'`, models.StatusOnline, payload.WorkspaceID); err != nil {
log.Printf("Heartbeat: failed to recover %s from offline: %v", payload.WorkspaceID, err)
}
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{})
@ -710,7 +710,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
// transition is the only mechanism that moves newly-started workspaces out of
// the phantom-idle state. (#1784)
if currentStatus == "provisioning" {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'online', updated_at = now() WHERE id = $1 AND status = 'provisioning'`, payload.WorkspaceID); err != nil {
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2 AND status = 'provisioning'`, models.StatusOnline, payload.WorkspaceID); err != nil {
log.Printf("Heartbeat: failed to transition %s from provisioning to online: %v", payload.WorkspaceID, err)
} else {
log.Printf("Heartbeat: transitioned %s from provisioning to online (heartbeat received)", payload.WorkspaceID)

View File

@ -10,6 +10,7 @@ import (
"testing"
"github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/gin-gonic/gin"
)
@ -116,8 +117,8 @@ func TestHeartbeatHandler_OfflineToOnline(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("offline"))
// Expect status transition back to online
mock.ExpectExec("UPDATE workspaces SET status = 'online'").
WithArgs("ws-offline").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusOnline, "ws-offline").
WillReturnResult(sqlmock.NewResult(0, 1))
// Expect RecordAndBroadcast INSERT for WORKSPACE_ONLINE
@ -166,8 +167,8 @@ func TestHeartbeatHandler_ProvisioningToOnline(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("provisioning"))
// Expect status transition to online (#1784)
mock.ExpectExec("UPDATE workspaces SET status = 'online'").
WithArgs("ws-provisioning").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusOnline, "ws-provisioning").
WillReturnResult(sqlmock.NewResult(0, 1))
// Expect RecordAndBroadcast INSERT for WORKSPACE_ONLINE
@ -340,8 +341,8 @@ func TestHeartbeatHandler_RuntimeWedged_FlipsOnlineToDegraded(t *testing.T) {
// The wedge-handling branch fires the degraded UPDATE with the
// `AND status = 'online'` guard (race-safe against concurrent
// removal). Match the SQL with the guard included.
mock.ExpectExec("UPDATE workspaces SET status = 'degraded'.*status = 'online'").
WithArgs("ws-wedged").
mock.ExpectExec("UPDATE workspaces SET status =.*status = 'online'").
WithArgs(models.StatusDegraded, "ws-wedged").
WillReturnResult(sqlmock.NewResult(0, 1))
// RecordAndBroadcast for WORKSPACE_DEGRADED
@ -436,8 +437,8 @@ func TestHeartbeatHandler_DegradedToOnline_AfterWedgeClears(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("degraded"))
// Recovery UPDATE fires (degraded → online).
mock.ExpectExec("UPDATE workspaces SET status = 'online'").
WithArgs("ws-recovered").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusOnline, "ws-recovered").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").

View File

@ -201,7 +201,7 @@ func (h *TeamHandler) Collapse(c *gin.Context) {
// Mark as removed
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'removed', updated_at = now() WHERE id = $1`, childID); err != nil {
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusRemoved, childID); err != nil {
log.Printf("Team collapse: failed to remove workspace %s: %v", childID, err)
}
if _, err := db.DB.ExecContext(ctx,

View File

@ -76,7 +76,7 @@ func TestTeamCollapse_WithChildren(t *testing.T) {
AddRow("child-2", "Worker B"))
// UPDATE + DELETE + broadcast for child-1
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs("child-1").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("DELETE FROM canvas_layouts").
@ -86,7 +86,7 @@ func TestTeamCollapse_WithChildren(t *testing.T) {
WillReturnResult(sqlmock.NewResult(0, 1))
// UPDATE + DELETE + broadcast for child-2
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs("child-2").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("DELETE FROM canvas_layouts").

View File

@ -336,7 +336,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
if payload.External || payload.Runtime == "external" {
var connectionToken string
if payload.URL != "" {
db.DB.ExecContext(ctx, `UPDATE workspaces SET url = $1, status = 'online', runtime = 'external', updated_at = now() WHERE id = $2`, payload.URL, id)
db.DB.ExecContext(ctx, `UPDATE workspaces SET url = $1, status = $2, runtime = 'external', updated_at = now() WHERE id = $3`, payload.URL, models.StatusOnline, id)
if err := db.CacheURL(ctx, id, payload.URL); err != nil {
log.Printf("External workspace: failed to cache URL for %s: %v", id, err)
}
@ -348,7 +348,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
// in awaiting_agent. First POST /registry/register call
// from the external agent (with this token + its URL)
// flips the row to online.
db.DB.ExecContext(ctx, `UPDATE workspaces SET status = 'awaiting_agent', runtime = 'external', updated_at = now() WHERE id = $1`, id)
db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, runtime = 'external', updated_at = now() WHERE id = $2`, models.StatusAwaitingAgent, id)
tok, tokErr := wsauth.IssueToken(ctx, db.DB, id)
if tokErr != nil {
log.Printf("External workspace %s: token issuance failed: %v", id, tokErr)
@ -460,7 +460,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
ON CONFLICT (workspace_id) DO UPDATE SET data = $2::jsonb
`, id, cfgJSON)
db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'failed', last_sample_error = 'Docker not available — workspace containers require a Docker daemon or external provisioning.', updated_at = now() WHERE id = $1`, id)
`UPDATE workspaces SET status = $1, last_sample_error = 'Docker not available — workspace containers require a Docker daemon or external provisioning.', updated_at = now() WHERE id = $2`, models.StatusFailed, id)
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", id, map[string]interface{}{
"error": "Docker not available on this platform instance",
})

View File

@ -16,6 +16,7 @@ import (
"time"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
@ -359,8 +360,8 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
// existing `status NOT IN ('removed', ...)` guards.
allIDs := append([]string{id}, descendantIDs...)
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'removed', updated_at = now() WHERE id = ANY($1::uuid[])`,
pq.Array(allIDs)); err != nil {
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = ANY($2::uuid[])`,
models.StatusRemoved, pq.Array(allIDs)); err != nil {
log.Printf("Delete status update error for %s: %v", id, err)
}
if _, err := db.DB.ExecContext(ctx,

View File

@ -214,8 +214,8 @@ func (h *WorkspaceHandler) markProvisionFailed(ctx context.Context, workspaceID,
}
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", workspaceID, extra)
if _, dbErr := db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'failed', last_sample_error = $2, updated_at = now() WHERE id = $1`,
workspaceID, msg); dbErr != nil {
`UPDATE workspaces SET status = $3, last_sample_error = $2, updated_at = now() WHERE id = $1`,
workspaceID, msg, models.StatusFailed); dbErr != nil {
// Non-fatal: the broadcast already fired, the operator sees the
// failure event in the canvas. The DB row stays at whatever
// status it had — provisioning event log is the source of truth.

View File

@ -1143,8 +1143,8 @@ func TestProvisionWorkspace_NoInternalErrorsInBroadcast(t *testing.T) {
// path skipped last_sample_error; the shared helper now always
// persists it so users see the failure in the UI without having
// to grep server logs.
mock.ExpectExec(`UPDATE workspaces SET status = 'failed'`).
WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg()).
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(0, 1))
cap := &captureBroadcaster{}
@ -1241,8 +1241,8 @@ func TestProvisionWorkspaceCP_NoInternalErrorsInBroadcast(t *testing.T) {
// On cpProv.Start failure, provisionWorkspaceCP also marks the
// workspace failed. Match-anything on args so the test isn't
// coupled to the exact UPDATE column order.
mock.ExpectExec(`UPDATE workspaces SET status = 'failed'`).
WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg()).
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(0, 1))
cap := &captureBroadcaster{}

View File

@ -136,7 +136,7 @@ func (h *WorkspaceHandler) Restart(c *gin.Context) {
// Reset to provisioning
db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'provisioning', url = '', updated_at = now() WHERE id = $1`, id)
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusProvisioning, id)
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, map[string]interface{}{
"name": wsName,
"tier": tier,
@ -269,10 +269,10 @@ func (h *WorkspaceHandler) HibernateWorkspace(ctx context.Context, workspaceID s
// active_tasks = 0 predicate ensures we never interrupt a running task.
result, err := db.DB.ExecContext(ctx, `
UPDATE workspaces
SET status = 'hibernating', updated_at = now()
SET status = $2, updated_at = now()
WHERE id = $1
AND status IN ('online', 'degraded')
AND active_tasks = 0`, workspaceID)
AND active_tasks = 0`, workspaceID, models.StatusHibernating)
if err != nil {
log.Printf("Hibernate: atomic claim failed for %s: %v", workspaceID, err)
return
@ -306,8 +306,8 @@ func (h *WorkspaceHandler) HibernateWorkspace(ctx context.Context, workspaceID s
// ── Step 3: Mark fully hibernated ─────────────────────────────────────────
if _, err = db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'hibernated', url = '', updated_at = now() WHERE id = $1`,
workspaceID); err != nil {
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`,
models.StatusHibernated, workspaceID); err != nil {
log.Printf("Hibernate: failed to mark hibernated for %s: %v", workspaceID, err)
return
}
@ -453,7 +453,7 @@ func (h *WorkspaceHandler) runRestartCycle(workspaceID string) {
h.stopForRestart(ctx, workspaceID)
db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'provisioning', url = '', updated_at = now() WHERE id = $1`, workspaceID)
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusProvisioning, workspaceID)
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", workspaceID, map[string]interface{}{
"name": wsName, "tier": tier, "runtime": dbRuntime,
})
@ -534,7 +534,7 @@ func (h *WorkspaceHandler) Pause(c *gin.Context) {
h.provisioner.Stop(ctx, ws.id)
}
db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'paused', url = '', updated_at = now() WHERE id = $1`, ws.id)
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusPaused, ws.id)
db.ClearWorkspaceKeys(ctx, ws.id)
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PAUSED", ws.id, map[string]interface{}{
"name": ws.name,
@ -604,7 +604,7 @@ func (h *WorkspaceHandler) Resume(c *gin.Context) {
// Re-provision all
for _, ws := range toResume {
db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'provisioning', updated_at = now() WHERE id = $1`, ws.id)
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusProvisioning, ws.id)
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", ws.id, map[string]interface{}{
"name": ws.name, "tier": ws.tier, "runtime": ws.runtime,
})

View File

@ -12,6 +12,7 @@ import (
"testing"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/gin-gonic/gin"
)
@ -223,7 +224,7 @@ func TestPauseHandler_SuccessNoChildren(t *testing.T) {
WithArgs("ws-pause-ok").
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
mock.ExpectExec("UPDATE workspaces SET status = 'paused'").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs("ws-pause-ok").
WillReturnResult(sqlmock.NewResult(0, 1))
@ -357,7 +358,7 @@ func TestHibernateWorkspace_ActiveTasksNotHibernated(t *testing.T) {
// The atomic claim UPDATE returns 0 rows because active_tasks > 0 fails the WHERE.
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-active").
WithArgs("ws-active", models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 0)) // rowsAffected = 0
handler.HibernateWorkspace(context.Background(), "ws-active")
@ -388,7 +389,7 @@ func TestHibernateWorkspace_AlreadyHibernatingNotHibernated(t *testing.T) {
// Another goroutine already transitioned the workspace to 'hibernating',
// so this UPDATE finds nothing matching the WHERE clause.
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-already").
WithArgs("ws-already", models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 0))
handler.HibernateWorkspace(context.Background(), "ws-already")
@ -417,7 +418,7 @@ func TestHibernateWorkspace_SuccessPath(t *testing.T) {
// Step 1: atomic claim succeeds
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-ok").
WithArgs("ws-ok", models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 1)) // rowsAffected = 1
// Name/tier fetch after claim
@ -426,8 +427,8 @@ func TestHibernateWorkspace_SuccessPath(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"name", "tier"}).AddRow("My Agent", 1))
// Step 3: final hibernated UPDATE
mock.ExpectExec(`UPDATE workspaces SET status = 'hibernated'`).
WithArgs("ws-ok").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusHibernated, "ws-ok").
WillReturnResult(sqlmock.NewResult(0, 1))
// broadcaster INSERT
@ -468,20 +469,20 @@ func TestHibernateWorkspace_ConcurrentOnlyOneStop(t *testing.T) {
// ── Caller A wins the race ────────────────────────────────────────────────
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-race").
WithArgs("ws-race", models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectQuery(`SELECT name, tier FROM workspaces WHERE id`).
WithArgs("ws-race").
WillReturnRows(sqlmock.NewRows([]string{"name", "tier"}).AddRow("Race Agent", 2))
mock.ExpectExec(`UPDATE workspaces SET status = 'hibernated'`).
WithArgs("ws-race").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusHibernated, "ws-race").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec(`INSERT INTO structure_events`).
WillReturnResult(sqlmock.NewResult(0, 1))
// ── Caller B loses — workspace is already 'hibernating' ───────────────────
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-race").
WithArgs("ws-race", models.StatusHibernating).
WillReturnResult(sqlmock.NewResult(0, 0))
// Execute sequentially (sqlmock is not safe for concurrent goroutines);
@ -517,7 +518,7 @@ func TestHibernateWorkspace_DBErrorOnClaim(t *testing.T) {
}
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-dberr").
WithArgs("ws-dberr", models.StatusHibernating).
WillReturnError(sql.ErrConnDone)
handler.HibernateWorkspace(context.Background(), "ws-dberr")

View File

@ -568,7 +568,7 @@ func TestWorkspaceDelete_CascadeWithChildren(t *testing.T) {
// #73: single batch UPDATE covering [self + descendants] BEFORE stopping
// containers (prevents heartbeat/restart resurrection races).
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(2, 2))
// Batch canvas_layouts DELETE for the same id set.
mock.ExpectExec("DELETE FROM canvas_layouts WHERE workspace_id = ANY").
@ -631,7 +631,7 @@ func TestWorkspaceDelete_DisablesSchedules(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
// Mark workspace as removed
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(0, 1))
// Canvas layouts cleanup
mock.ExpectExec("DELETE FROM canvas_layouts WHERE workspace_id = ANY").
@ -689,7 +689,7 @@ func TestWorkspaceDelete_CascadeDisablesDescendantSchedules(t *testing.T) {
AddRow(grandchildID))
// Mark all 3 as removed
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(0, 3))
// Canvas layouts
mock.ExpectExec("DELETE FROM canvas_layouts WHERE workspace_id = ANY").
@ -753,7 +753,7 @@ func TestWorkspaceDelete_ScheduleDisableOnlyTargetsDeletedWorkspace(t *testing.T
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
// Mark only workspace A as removed
mock.ExpectExec("UPDATE workspaces SET status = 'removed'").
mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("DELETE FROM canvas_layouts WHERE workspace_id = ANY").
WillReturnResult(sqlmock.NewResult(0, 0))

View File

@ -0,0 +1,68 @@
package models
// Workspace status — typed constants that mirror the `workspace_status`
// Postgres enum (migrations 043 + 046). Every UPDATE/INSERT against
// `workspaces.status` MUST use one of these constants; raw string
// literals are forbidden (see internal/db/workspace_status_enum_drift_test.go,
// which fails the build if a literal sneaks in).
//
// Why typed: pre-2026-04-30 the enum migrated without `awaiting_agent`
// and `hibernating` even though application code wrote those values.
// Every UPDATE silently failed with `invalid input value for enum
// workspace_status: ...` for five days because:
//
// - Status values were ad-hoc string literals scattered across
// ~15 raw SQL strings in 8 files. Typos (e.g. 'hibernating' vs
// 'hibernated') and missing-from-enum cases were invisible to
// the compiler.
// - sqlmock (the unit-test layer) matched SQL by regex, not against
// the live enum constraint.
// - Errors were dropped or log-and-continued at every call site.
//
// Typed constants close the first leg by making the bug uncompilable:
// adding a new status forces both this file AND the migration to
// change in the same PR; typos at call sites become "undefined: ..."
// at build time, not silent runtime failures.
// WorkspaceStatus is the type-safe alias for values written into
// `workspaces.status`. Its underlying type is string so it flows
// through database/sql args and JSON marshalling unchanged.
type WorkspaceStatus string
// Recognised values. The set MUST be a subset of the workspace_status
// Postgres enum (migrations 043 + 046). The drift gate in
// internal/db/workspace_status_enum_drift_test.go enforces this.
const (
StatusProvisioning WorkspaceStatus = "provisioning"
StatusOnline WorkspaceStatus = "online"
StatusOffline WorkspaceStatus = "offline"
StatusDegraded WorkspaceStatus = "degraded"
StatusFailed WorkspaceStatus = "failed"
StatusRemoved WorkspaceStatus = "removed"
StatusPaused WorkspaceStatus = "paused"
StatusHibernated WorkspaceStatus = "hibernated"
StatusHibernating WorkspaceStatus = "hibernating"
StatusAwaitingAgent WorkspaceStatus = "awaiting_agent"
)
// AllWorkspaceStatuses is the source-of-truth list the drift gate
// parses. Keep in sync with the const block above. Deliberately a
// var (not derivable from the const block at compile time without
// reflection) — the gate parses the const block AST directly, which
// is more robust than reflection.
var AllWorkspaceStatuses = []WorkspaceStatus{
StatusProvisioning,
StatusOnline,
StatusOffline,
StatusDegraded,
StatusFailed,
StatusRemoved,
StatusPaused,
StatusHibernated,
StatusHibernating,
StatusAwaitingAgent,
}
// String allows fmt.Sprintf("%s", status) without callers having to
// cast. Returns the underlying enum string.
func (s WorkspaceStatus) String() string { return string(s) }

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
)
// ContainerChecker checks if a workspace container is running via Docker API.
@ -99,8 +100,9 @@ func sweepOnlineWorkspaces(ctx context.Context, checker ContainerChecker, onOffl
log.Printf("Health sweep: container for %s is gone — marking offline", id)
_, err = db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'offline', updated_at = now()
WHERE id = $1 AND status NOT IN ('removed', 'provisioning')`, id)
`UPDATE workspaces SET status = $1, updated_at = now()
WHERE id = $2 AND status NOT IN ('removed', 'provisioning')`,
models.StatusOffline, id)
if err != nil {
log.Printf("Health sweep: failed to mark %s offline: %v", id, err)
continue
@ -164,8 +166,9 @@ func sweepStaleRemoteWorkspaces(ctx context.Context, onOffline OfflineHandler) {
log.Printf("Health sweep (remote): %s heartbeat stale (>%s) — marking awaiting_agent", id, staleAfter)
_, err = db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = 'awaiting_agent', updated_at = now()
WHERE id = $1 AND status NOT IN ('removed', 'provisioning', 'paused')`, id)
`UPDATE workspaces SET status = $1, updated_at = now()
WHERE id = $2 AND status NOT IN ('removed', 'provisioning', 'paused')`,
models.StatusAwaitingAgent, id)
if err != nil {
log.Printf("Health sweep (remote): failed to mark %s awaiting_agent: %v", id, err)
continue

View File

@ -8,6 +8,7 @@ import (
"github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
)
@ -62,8 +63,8 @@ func TestSweepOnlineWorkspaces_DeadContainer(t *testing.T) {
// Mock: update to offline (Docker sweep keeps 'offline' status —
// 'awaiting_agent' is the external-runtime path).
mock.ExpectExec("UPDATE workspaces SET status = 'offline'").
WithArgs("ws-dead-123").
mock.ExpectExec("UPDATE workspaces SET status =").
WithArgs(models.StatusOffline, "ws-dead-123").
WillReturnResult(sqlmock.NewResult(0, 1))
checker := &mockChecker{running: map[string]bool{
@ -165,11 +166,11 @@ func TestSweepStaleRemoteWorkspaces_MarksStaleAwaitingAgent(t *testing.T) {
WillReturnRows(sqlmock.NewRows([]string{"id"}).
AddRow("ws-stale-1").
AddRow("ws-stale-2"))
mock.ExpectExec(`UPDATE workspaces SET status = 'awaiting_agent'`).
WithArgs("ws-stale-1").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusAwaitingAgent, "ws-stale-1").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec(`UPDATE workspaces SET status = 'awaiting_agent'`).
WithArgs("ws-stale-2").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusAwaitingAgent, "ws-stale-2").
WillReturnResult(sqlmock.NewResult(0, 1))
var offlineCalls []string
@ -210,8 +211,8 @@ func TestSweepStaleRemoteWorkspaces_NilCallbackNoPanic(t *testing.T) {
mock.ExpectQuery(`FROM workspaces`).
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow("ws-x"))
mock.ExpectExec(`UPDATE workspaces SET status = 'awaiting_agent'`).
WithArgs("ws-x").
mock.ExpectExec(`UPDATE workspaces SET status =`).
WithArgs(models.StatusAwaitingAgent, "ws-x").
WillReturnResult(sqlmock.NewResult(0, 1))
// Must not panic with nil callback

View File

@ -6,6 +6,7 @@ import (
"strings"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
)
// OfflineHandler is called when a workspace's liveness key expires.
@ -54,12 +55,16 @@ func StartLivenessMonitor(ctx context.Context, onOffline OfflineHandler) {
// non-external case stays cheap (no extra round-trip)
// and there's no TOCTOU between the runtime read and the
// status write.
// CASE branches use placeholders so the typed constants drive
// the values — keeps the atomicity of the single UPDATE while
// pinning external→awaiting_agent and other→offline at compile
// time. $2 = external arm, $3 = non-external arm.
_, err := db.DB.ExecContext(ctx, `
UPDATE workspaces
SET status = CASE WHEN runtime = 'external' THEN 'awaiting_agent' ELSE 'offline' END,
SET status = CASE WHEN runtime = 'external' THEN $2 ELSE $3 END,
updated_at = now()
WHERE id = $1 AND status NOT IN ('removed', 'paused', 'hibernated')
`, workspaceID)
`, workspaceID, models.StatusAwaitingAgent, models.StatusOffline)
if err != nil {
log.Printf("Liveness: failed to mark %s offline: %v", workspaceID, err)
continue

View File

@ -7,6 +7,7 @@ import (
"github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/alicebob/miniredis/v2"
"github.com/redis/go-redis/v9"
)
@ -84,8 +85,8 @@ func TestStartLivenessMonitor_KeyExpiryTriggersOffline(t *testing.T) {
// CASE-expression-driven on runtime: external → 'awaiting_agent',
// other → 'offline'. sqlmock matches on regex so the SET clause
// just needs to mention the conditional.
mock.ExpectExec(`UPDATE workspaces\s+SET status = CASE WHEN runtime = 'external' THEN 'awaiting_agent' ELSE 'offline' END`).
WithArgs("ws-expire-test").
mock.ExpectExec(`UPDATE workspaces\s+SET status = CASE WHEN runtime = 'external' THEN \$2 ELSE \$3 END`).
WithArgs("ws-expire-test", models.StatusAwaitingAgent, models.StatusOffline).
WillReturnResult(sqlmock.NewResult(0, 1))
go StartLivenessMonitor(ctx, onOffline)
@ -148,8 +149,8 @@ func TestStartLivenessMonitor_NilCallback(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
mock.ExpectExec("UPDATE workspaces SET status = 'offline'").
WithArgs("ws-nocallback").
mock.ExpectExec(`UPDATE workspaces\s+SET status = CASE WHEN runtime = 'external' THEN \$2 ELSE \$3 END`).
WithArgs("ws-nocallback", models.StatusAwaitingAgent, models.StatusOffline).
WillReturnResult(sqlmock.NewResult(0, 1))
go StartLivenessMonitor(ctx, nil)

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
)
// ProvisionTimeoutEmitter is the narrow broadcaster dependency the sweeper
@ -148,13 +149,13 @@ func sweepStuckProvisioning(ctx context.Context, emitter ProvisionTimeoutEmitter
msg := "provisioning timed out — container started but never called /registry/register. Check container logs and network connectivity to the platform."
res, err := db.DB.ExecContext(ctx, `
UPDATE workspaces
SET status = 'failed',
SET status = $4,
last_sample_error = $2,
updated_at = now()
WHERE id = $1
AND status = 'provisioning'
AND updated_at < now() - ($3 || ' seconds')::interval
`, c.id, msg, timeoutSec)
`, c.id, msg, timeoutSec, models.StatusFailed)
if err != nil {
log.Printf("Provision-timeout sweep: failed to flip %s to failed: %v", c.id, err)
continue

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
)
// fakeEmitter records every RecordAndBroadcast call so tests can assert
@ -61,7 +62,7 @@ func TestSweepStuckProvisioning_FlipsOverdue(t *testing.T) {
WillReturnRows(candidateRows([3]any{"ws-stuck", "claude-code", 700}))
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-stuck", sqlmock.AnyArg(), sqlmock.AnyArg()).
WithArgs("ws-stuck", sqlmock.AnyArg(), sqlmock.AnyArg(), models.StatusFailed).
WillReturnResult(sqlmock.NewResult(0, 1))
emit := &fakeEmitter{}
@ -116,7 +117,7 @@ func TestSweepStuckProvisioning_HermesPastDeadline(t *testing.T) {
mock.ExpectQuery(`SELECT id, COALESCE\(runtime, ''\), EXTRACT`).
WillReturnRows(candidateRows([3]any{"ws-hermes-stuck", "hermes", 1860}))
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-hermes-stuck", sqlmock.AnyArg(), sqlmock.AnyArg()).
WithArgs("ws-hermes-stuck", sqlmock.AnyArg(), sqlmock.AnyArg(), models.StatusFailed).
WillReturnResult(sqlmock.NewResult(0, 1))
emit := &fakeEmitter{}
@ -146,7 +147,7 @@ func TestSweepStuckProvisioning_RaceSafe(t *testing.T) {
WillReturnRows(candidateRows([3]any{"ws-raced", "claude-code", 700}))
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-raced", sqlmock.AnyArg(), sqlmock.AnyArg()).
WithArgs("ws-raced", sqlmock.AnyArg(), sqlmock.AnyArg(), models.StatusFailed).
WillReturnResult(sqlmock.NewResult(0, 0)) // 0 rows — raced
emit := &fakeEmitter{}
@ -193,10 +194,10 @@ func TestSweepStuckProvisioning_MultipleStuck(t *testing.T) {
))
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-claude-code", sqlmock.AnyArg(), sqlmock.AnyArg()).
WithArgs("ws-claude-code", sqlmock.AnyArg(), sqlmock.AnyArg(), models.StatusFailed).
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-hermes", sqlmock.AnyArg(), sqlmock.AnyArg()).
WithArgs("ws-hermes", sqlmock.AnyArg(), sqlmock.AnyArg(), models.StatusFailed).
WillReturnResult(sqlmock.NewResult(0, 1))
emit := &fakeEmitter{}
@ -216,7 +217,7 @@ func TestSweepStuckProvisioning_BroadcastFailureDoesNotCrash(t *testing.T) {
mock.ExpectQuery(`SELECT id, COALESCE\(runtime, ''\), EXTRACT`).
WillReturnRows(candidateRows([3]any{"ws-stuck", "claude-code", 700}))
mock.ExpectExec(`UPDATE workspaces`).
WithArgs("ws-stuck", sqlmock.AnyArg(), sqlmock.AnyArg()).
WithArgs("ws-stuck", sqlmock.AnyArg(), sqlmock.AnyArg(), models.StatusFailed).
WillReturnResult(sqlmock.NewResult(0, 1))
emit := &fakeEmitter{fail: true}