Merge pull request 'fix(workspace-server): SSOT-route container check + 422 on external runtimes (closes #10)' (#12) from fix/issue10-runtime-aware-plugin-install into main
Some checks failed
Auto-sync main → staging / sync-staging (push) Failing after 13s
Block internal-flavored paths / Block forbidden paths (push) Successful in 9s
CI / Detect changes (push) Successful in 11s
E2E API Smoke Test / detect-changes (push) Successful in 10s
E2E Staging Canvas (Playwright) / detect-changes (push) Successful in 11s
Handlers Postgres Integration / detect-changes (push) Successful in 11s
Harness Replays / detect-changes (push) Successful in 13s
Runtime PR-Built Compatibility / detect-changes (push) Successful in 14s
Secret scan / Scan diff for credential-shaped strings (push) Successful in 11s
CI / Shellcheck (E2E scripts) (push) Successful in 5s
CI / Python Lint & Test (push) Successful in 7s
CodeQL / Analyze (${{ matrix.language }}) (go) (push) Failing after 1m3s
publish-workspace-server-image / build-and-push (push) Failing after 54s
Runtime PR-Built Compatibility / PR-built wheel + import smoke (push) Successful in 6s
E2E Staging Canvas (Playwright) / Canvas tabs E2E (push) Successful in 41s
CodeQL / Analyze (${{ matrix.language }}) (javascript-typescript) (push) Failing after 1m34s
CI / Canvas (Next.js) (push) Successful in 58s
CodeQL / Analyze (${{ matrix.language }}) (python) (push) Failing after 1m39s
CI / Canvas Deploy Reminder (push) Has been skipped
Harness Replays / Harness Replays (push) Failing after 46s
Handlers Postgres Integration / Handlers Postgres Integration (push) Failing after 1m14s
CI / Platform (Go) (push) Successful in 4m46s
E2E API Smoke Test / E2E API Smoke Test (push) Failing after 6m14s
Railway pin audit (drift detection) / Audit Railway env vars for drift-prone pins (push) Failing after 11s
Runtime Pin Compatibility / PyPI-latest install + import smoke (push) Successful in 9m58s
branch-protection drift check / Branch protection drift (push) Failing after 6s
Canary — staging SaaS smoke (every 30 min) / Canary smoke (push) Failing after 6s

This commit is contained in:
claude-ceo-assistant 2026-05-07 11:27:52 +00:00
commit 6fac24e3de
5 changed files with 448 additions and 12 deletions

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"io"
"log"
"os"
"path/filepath"
"strings"
@ -177,16 +178,42 @@ func strDefault(m map[string]interface{}, key, fallback string) string {
return fallback
}
// findRunningContainer returns the live container name for workspaceID, or ""
// when the container is genuinely not running OR the daemon errored
// transiently. Routed through provisioner.RunningContainerName as the SSOT
// (molecule-core#10) so this handler agrees with healthsweep on the same
// inputs. Transient daemon errors are logged distinctly so triage doesn't
// confuse a flaky daemon with a stopped container.
func (h *PluginsHandler) findRunningContainer(ctx context.Context, workspaceID string) string {
if h.docker == nil {
name, err := provisioner.RunningContainerName(ctx, h.docker, workspaceID)
if err != nil {
log.Printf("plugins: docker inspect transient error for %s: %v (treating as not-running for this request)", workspaceID, err)
return ""
}
name := provisioner.ContainerName(workspaceID)
info, err := h.docker.ContainerInspect(ctx, name)
if err == nil && info.State.Running {
return name
}
return ""
// isExternalRuntime reports whether the workspace's runtime is the
// `external` (remote-pull) shape introduced in Phase 30. External
// workspaces have no local container — `POST /plugins` (push-install via
// docker exec) doesn't apply to them; they pull via the download endpoint
// instead. Returns false (allow-install) if the lookup is unwired or
// errors — failing open here is safe because the downstream
// findRunningContainer step still gates on a real container being there.
//
// Background — molecule-core#10: without this check, external workspaces
// fall through to findRunningContainer's NotFound path and return a
// misleading 503 "container not running" instead of a clear "use the
// pull endpoint" message.
func (h *PluginsHandler) isExternalRuntime(workspaceID string) bool {
if h.runtimeLookup == nil {
return false
}
runtime, err := h.runtimeLookup(workspaceID)
if err != nil {
return false
}
return runtime == "external"
}
func (h *PluginsHandler) execAsRoot(ctx context.Context, containerName string, cmd []string) (string, error) {

View File

@ -0,0 +1,176 @@
package handlers
import (
"go/ast"
"go/parser"
"go/token"
"strings"
"testing"
)
// TestFindRunningContainer_RoutesThroughProvisionerSSOT is a behavior-based
// AST gate: it pins the invariant that PluginsHandler.findRunningContainer
// MUST go through provisioner.RunningContainerName for its is-running check,
// instead of carrying its own copy of cli.ContainerInspect logic.
//
// Background — molecule-core#10: a parallel impl of "is the workspace's
// container running" used to live in plugins.go. It drifted from the
// canonical impl in healthsweep (which goes through Provisioner.IsRunning
// → RunningContainerName) on edge cases like "transient daemon error" —
// the duplicate would 503 with a misleading message while healthsweep
// correctly stayed defensive. Consolidating onto RunningContainerName as
// the SSOT prevents any future copy from re-introducing that drift.
//
// Mutation invariant: if a future PR replaces the provisioner call with
// `h.docker.ContainerInspect(...)` directly, this test fails. That's the
// signal to either (a) extend RunningContainerName's contract OR (b)
// document why this call site needs to differ. Either way: the drift
// gets a reviewer's attention instead of shipping silently.
func TestFindRunningContainer_RoutesThroughProvisionerSSOT(t *testing.T) {
fset := token.NewFileSet()
file, err := parser.ParseFile(fset, "plugins.go", nil, parser.ParseComments)
if err != nil {
t.Fatalf("parse plugins.go: %v", err)
}
var fn *ast.FuncDecl
ast.Inspect(file, func(n ast.Node) bool {
f, ok := n.(*ast.FuncDecl)
if !ok || f.Name.Name != "findRunningContainer" {
return true
}
// Confirm receiver is *PluginsHandler so we don't pick up an unrelated
// helper of the same name. ast.Recv is a FieldList — receivers carry
// at most one field.
if f.Recv == nil || len(f.Recv.List) == 0 {
return true
}
fn = f
return false
})
if fn == nil {
t.Fatal("findRunningContainer not found in plugins.go — was it renamed? update this test or the SSOT routing assumption")
}
var (
callsRunningContainerName bool
callsContainerInspectRaw bool
)
ast.Inspect(fn.Body, func(n ast.Node) bool {
call, ok := n.(*ast.CallExpr)
if !ok {
return true
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return true
}
// Pkg.Func form: provisioner.RunningContainerName(...)
if pkgIdent, ok := sel.X.(*ast.Ident); ok {
if pkgIdent.Name == "provisioner" && sel.Sel.Name == "RunningContainerName" {
callsRunningContainerName = true
}
}
// Receiver-then-method form: h.docker.ContainerInspect(...) /
// p.cli.ContainerInspect(...) — anything ending in
// .ContainerInspect that's NOT routed through provisioner.
if sel.Sel.Name == "ContainerInspect" {
callsContainerInspectRaw = true
}
return true
})
if !callsRunningContainerName {
t.Errorf(
"findRunningContainer must call provisioner.RunningContainerName for the SSOT inspect — see molecule-core#10. Found no such call.",
)
}
if callsContainerInspectRaw {
t.Errorf(
"findRunningContainer carries a direct ContainerInspect call. This is the parallel-impl drift molecule-core#10 fixed. " +
"Either route through provisioner.RunningContainerName OR — if a new use case truly needs a different inspect — extend RunningContainerName's contract first and update this gate to allow the specific delta.",
)
}
}
// TestProvisionerIsRunning_RoutesThroughRunningContainerName mirrors the
// gate above but for the OTHER consumer of the SSOT — Provisioner.IsRunning
// (called by healthsweep). If a future refactor makes IsRunning carry its
// own ContainerInspect again, the two consumers' edge-case behaviors will
// silently drift. Keep them yoked.
func TestProvisionerIsRunning_RoutesThroughRunningContainerName(t *testing.T) {
fset := token.NewFileSet()
file, err := parser.ParseFile(fset, "../provisioner/provisioner.go", nil, parser.ParseComments)
if err != nil {
t.Fatalf("parse provisioner.go: %v", err)
}
var fn *ast.FuncDecl
ast.Inspect(file, func(n ast.Node) bool {
f, ok := n.(*ast.FuncDecl)
if !ok || f.Name.Name != "IsRunning" || f.Recv == nil {
return true
}
// The receiver type must be *Provisioner specifically. CPProvisioner
// has its own IsRunning that talks HTTP to the controlplane and is
// out of scope for this gate.
if !receiverIs(f, "Provisioner") {
return true
}
fn = f
return false
})
if fn == nil {
t.Fatal("Provisioner.IsRunning not found — was it renamed? update this test")
}
var (
callsRunningContainerName bool
callsContainerInspectRaw bool
)
ast.Inspect(fn.Body, func(n ast.Node) bool {
call, ok := n.(*ast.CallExpr)
if !ok {
return true
}
// Same-package call: bare identifier (e.g. RunningContainerName(...)).
if id, ok := call.Fun.(*ast.Ident); ok && id.Name == "RunningContainerName" {
callsRunningContainerName = true
return true
}
// Selector call: pkg.Func (e.g. provisioner.RunningContainerName)
// OR recv.Method (e.g. p.cli.ContainerInspect).
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return true
}
switch sel.Sel.Name {
case "RunningContainerName":
callsRunningContainerName = true
case "ContainerInspect":
callsContainerInspectRaw = true
}
return true
})
if !callsRunningContainerName {
t.Errorf("Provisioner.IsRunning must call RunningContainerName for the SSOT inspect — see molecule-core#10")
}
if callsContainerInspectRaw {
t.Errorf("Provisioner.IsRunning carries a direct ContainerInspect call; route through RunningContainerName instead")
}
}
// receiverIs reports whether fn's receiver is `*<typeName>` or `<typeName>`.
func receiverIs(fn *ast.FuncDecl, typeName string) bool {
if fn.Recv == nil || len(fn.Recv.List) == 0 {
return false
}
expr := fn.Recv.List[0].Type
if star, ok := expr.(*ast.StarExpr); ok {
expr = star.X
}
id, ok := expr.(*ast.Ident)
return ok && strings.EqualFold(id.Name, typeName)
}

View File

@ -32,6 +32,18 @@ import (
// inside the workspace at startup.
func (h *PluginsHandler) Install(c *gin.Context) {
workspaceID := c.Param("id")
// External-runtime guard (molecule-core#10): push-install via docker
// exec is meaningless for `runtime='external'` workspaces — they have
// no local container. Reject early with a hint pointing at the
// pull-mode endpoint, instead of falling through to a misleading
// "container not running" 503 from findRunningContainer.
if h.isExternalRuntime(workspaceID) {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"error": "plugin install via push is not supported for external runtimes",
"hint": "external workspaces pull plugins via GET /workspaces/:id/plugins/:name/download",
})
return
}
// Cap the JSON body so a pathological POST can't exhaust parser memory.
bodyMax := envx.Int64("PLUGIN_INSTALL_BODY_MAX_BYTES", defaultInstallBodyMaxBytes)
c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, bodyMax)
@ -93,6 +105,16 @@ func (h *PluginsHandler) Uninstall(c *gin.Context) {
pluginName := c.Param("name")
ctx := c.Request.Context()
// Mirror Install's external-runtime guard (molecule-core#10) so the
// two endpoints reject the same shape with the same message.
if h.isExternalRuntime(workspaceID) {
c.JSON(http.StatusUnprocessableEntity, gin.H{
"error": "plugin uninstall via docker exec is not supported for external runtimes",
"hint": "external workspaces manage their own plugin directory; remove it locally",
})
return
}
if err := validatePluginName(pluginName); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid plugin name"})
return

View File

@ -0,0 +1,176 @@
package handlers
import (
"bytes"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gin-gonic/gin"
)
// TestPluginInstall_ExternalRuntime_Returns422 — molecule-core#10.
// Install on a `runtime='external'` workspace must NOT fall through to
// findRunningContainer (which would 503 with a misleading "container not
// running"). It must return 422 with a hint pointing at the pull-mode
// download endpoint.
func TestPluginInstall_ExternalRuntime_Returns422(t *testing.T) {
h := NewPluginsHandler(t.TempDir(), nil, nil).
WithRuntimeLookup(func(workspaceID string) (string, error) {
return "external", nil
})
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Params = gin.Params{{Key: "id", Value: "ba1789b0-4d21-4f4f-a878-fa226bf77cf5"}}
c.Request = httptest.NewRequest(
"POST",
"/workspaces/ba1789b0-4d21-4f4f-a878-fa226bf77cf5/plugins",
bytes.NewBufferString(`{"source":"local://my-plugin"}`),
)
c.Request.Header.Set("Content-Type", "application/json")
h.Install(c)
if w.Code != http.StatusUnprocessableEntity {
t.Errorf("expected 422 (Unprocessable Entity) for runtime='external', got %d: %s", w.Code, w.Body.String())
}
if !strings.Contains(w.Body.String(), "external runtimes") {
t.Errorf("expected error body to mention 'external runtimes', got: %s", w.Body.String())
}
if !strings.Contains(w.Body.String(), "download") {
t.Errorf("expected error body to point at the download endpoint, got: %s", w.Body.String())
}
}
// TestPluginUninstall_ExternalRuntime_Returns422 — symmetric guard on the
// uninstall path (DELETE /workspaces/:id/plugins/:name). External
// workspaces manage their own plugin directory locally; the platform
// can't docker-exec into them.
func TestPluginUninstall_ExternalRuntime_Returns422(t *testing.T) {
h := NewPluginsHandler(t.TempDir(), nil, nil).
WithRuntimeLookup(func(workspaceID string) (string, error) {
return "external", nil
})
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Params = gin.Params{
{Key: "id", Value: "ba1789b0-4d21-4f4f-a878-fa226bf77cf5"},
{Key: "name", Value: "my-plugin"},
}
c.Request = httptest.NewRequest(
"DELETE",
"/workspaces/ba1789b0-4d21-4f4f-a878-fa226bf77cf5/plugins/my-plugin",
nil,
)
h.Uninstall(c)
if w.Code != http.StatusUnprocessableEntity {
t.Errorf("expected 422 for runtime='external', got %d: %s", w.Code, w.Body.String())
}
if !strings.Contains(w.Body.String(), "external runtimes") {
t.Errorf("expected error body to mention 'external runtimes', got: %s", w.Body.String())
}
}
// TestPluginInstall_ContainerBackedRuntime_FallsThroughGuard — the runtime
// guard MUST NOT short-circuit container-backed runtimes. With
// `runtime='claude-code'` the install proceeds past the guard; without a
// real plugin source it'll fail downstream (here: 404 from local resolver
// because no plugin staged), which is the correct error to surface.
//
// This is the mutation-test partner: deleting the `runtime == "external"`
// check would still pass TestPluginInstall_ExternalRuntime (because Install
// would 404 instead of 422 — but the test asserts 422), and would still
// pass this test (because both pre-fix and post-fix produce 404 here).
// What this case pins is "non-external still falls through," catching
// any over-eager guard that rejects all runtimes.
func TestPluginInstall_ContainerBackedRuntime_FallsThroughGuard(t *testing.T) {
h := NewPluginsHandler(t.TempDir(), nil, nil).
WithRuntimeLookup(func(workspaceID string) (string, error) {
return "claude-code", nil
})
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Params = gin.Params{{Key: "id", Value: "c7c28c0b-4ea5-4e75-9728-3ba860081708"}}
c.Request = httptest.NewRequest(
"POST",
"/workspaces/c7c28c0b-4ea5-4e75-9728-3ba860081708/plugins",
bytes.NewBufferString(`{"source":"local://nonexistent-plugin"}`),
)
c.Request.Header.Set("Content-Type", "application/json")
h.Install(c)
if w.Code == http.StatusUnprocessableEntity {
t.Errorf("runtime='claude-code' must fall through the external guard; got 422: %s", w.Body.String())
}
// The local resolver will fail to find the plugin → 404. Anything
// other than 422 (which would mean we mis-classified) is fine.
if w.Code != http.StatusNotFound {
t.Errorf("expected 404 (plugin not found in registry), got %d: %s", w.Code, w.Body.String())
}
}
// TestPluginInstall_NoRuntimeLookup_FailsOpen — when the runtime lookup
// is unwired (test fixtures, niche deploy shapes) the guard MUST default
// to allowing the install attempt. The downstream findRunningContainer
// step still gates on a real container, so failing open here doesn't
// expose a bypass — it just preserves backwards-compat with deployments
// that haven't wired the lookup.
func TestPluginInstall_NoRuntimeLookup_FailsOpen(t *testing.T) {
h := NewPluginsHandler(t.TempDir(), nil, nil) // NO WithRuntimeLookup
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Params = gin.Params{{Key: "id", Value: "ws-no-lookup"}}
c.Request = httptest.NewRequest(
"POST",
"/workspaces/ws-no-lookup/plugins",
bytes.NewBufferString(`{"source":"local://nonexistent"}`),
)
c.Request.Header.Set("Content-Type", "application/json")
h.Install(c)
if w.Code == http.StatusUnprocessableEntity {
t.Errorf("nil runtimeLookup must fall through (fail-open); got 422: %s", w.Body.String())
}
}
// TestPluginInstall_RuntimeLookupErrors_FailsOpen — same fail-open story
// for transient DB errors in the lookup. We don't want a momentary
// Postgres hiccup to flip every plugin install into a 422.
func TestPluginInstall_RuntimeLookupErrors_FailsOpen(t *testing.T) {
h := NewPluginsHandler(t.TempDir(), nil, nil).
WithRuntimeLookup(func(workspaceID string) (string, error) {
return "", errFakeDB
})
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Params = gin.Params{{Key: "id", Value: "ws-db-flake"}}
c.Request = httptest.NewRequest(
"POST",
"/workspaces/ws-db-flake/plugins",
bytes.NewBufferString(`{"source":"local://nonexistent"}`),
)
c.Request.Header.Set("Content-Type", "application/json")
h.Install(c)
if w.Code == http.StatusUnprocessableEntity {
t.Errorf("runtimeLookup error must fall through (fail-open); got 422: %s", w.Body.String())
}
}
// errFakeDB is a sentinel for the fail-open lookup-error case.
var errFakeDB = &fakeError{msg: "synthetic db error"}
type fakeError struct{ msg string }
func (e *fakeError) Error() string { return e.msg }

View File

@ -1073,18 +1073,53 @@ func (p *Provisioner) IsRunning(ctx context.Context, workspaceID string) (bool,
if p == nil || p.cli == nil {
return false, ErrNoBackend
}
name := ContainerName(workspaceID)
info, err := p.cli.ContainerInspect(ctx, name)
name, err := RunningContainerName(ctx, p.cli, workspaceID)
if err != nil {
if isContainerNotFound(err) {
return false, nil
}
// Transient daemon error: caller treats !running as dead + restarts.
// Returning true + the underlying error preserves the error for
// metrics/logging without triggering the destructive path.
return true, err
}
return info.State.Running, nil
return name != "", nil
}
// RunningContainerName returns the container name for workspaceID iff the
// container exists AND is in the Running state. Single source of truth for
// "what live container should I exec into for this workspace?" — used by
// both Provisioner.IsRunning (healthsweep) and the plugins handler.
//
// Distinguishes three outcomes so callers can pick their own policy:
//
// - ("ws-<id>", nil): container is running. Caller can exec into it.
// - ("", nil): container does not exist OR exists but is stopped
// (NotFound, Exited, Created, Restarting…). Caller
// should treat as a definitive "not running."
// - ("", err): transient daemon error (timeout, socket EOF, ctx
// cancel). Caller should NOT infer "not running" —
// this could be a flaky daemon under load. Decide
// per-callsite whether to fail soft or hard.
//
// Background — molecule-core#10: the plugins handler used to carry its own
// copy of this inspect logic (`findRunningContainer`) which collapsed
// transient errors into the same "" return as a genuinely-stopped container.
// That hid daemon flakes as misleading 503 "container not running" responses
// AND let the two impls drift on edge-case behavior. This is the SSOT.
func RunningContainerName(ctx context.Context, cli *client.Client, workspaceID string) (string, error) {
if cli == nil {
return "", ErrNoBackend
}
name := ContainerName(workspaceID)
info, err := cli.ContainerInspect(ctx, name)
if err != nil {
if isContainerNotFound(err) {
return "", nil
}
return "", err
}
if info.State.Running {
return name, nil
}
return "", nil
}
// isContainerNotFound returns true when the Docker client indicates the