Compare commits

..

No commits in common. "main" and "feat/plugin-version-subscription" have entirely different histories.

34 changed files with 333 additions and 1718 deletions

View File

@ -1,118 +0,0 @@
#!/usr/bin/env bash
# audit-force-merge — detect a §SOP-6 force-merge after PR close, emit
# `incident.force_merge` to stdout as structured JSON.
#
# Vector's docker_logs source picks up runner stdout; the JSON gets
# shipped to Loki on molecule-canonical-obs, indexable by event_type.
# Query example:
#
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
#
# A force-merge is detected when a PR closed-with-merged=true had at
# least one of the repo's required-status-check contexts in a state
# other than "success" at the merge commit's SHA. That's exactly what
# the Gitea force_merge:true API call lets through, so it's a faithful
# detector of the override path.
#
# Triggers on `pull_request_target: closed` (loaded from base branch
# per §SOP-6 security model). No-op when merged=false.
#
# Required env (set by the workflow):
# GITEA_TOKEN, GITEA_HOST, REPO, PR_NUMBER, REQUIRED_CHECKS
#
# REQUIRED_CHECKS is a newline-separated list of status-check context
# names that branch protection requires. Declared in the workflow YAML
# rather than fetched from /branch_protections (which needs admin
# scope — sop-tier-bot has read-only). Trade dynamism for simplicity:
# when the required-check set changes, update both branch protection
# AND this env. Keeping them in sync is less complexity than granting
# the audit bot admin perms on every repo.
set -euo pipefail
: "${GITEA_TOKEN:?required}"
: "${GITEA_HOST:?required}"
: "${REPO:?required}"
: "${PR_NUMBER:?required}"
: "${REQUIRED_CHECKS:?required (newline-separated context names)}"
OWNER="${REPO%%/*}"
NAME="${REPO##*/}"
API="https://${GITEA_HOST}/api/v1"
AUTH="Authorization: token ${GITEA_TOKEN}"
# 1. Fetch the PR. If not merged, no-op.
PR=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}")
MERGED=$(echo "$PR" | jq -r '.merged // false')
if [ "$MERGED" != "true" ]; then
echo "::notice::PR #${PR_NUMBER} closed without merge — no audit emission."
exit 0
fi
MERGE_SHA=$(echo "$PR" | jq -r '.merge_commit_sha // empty')
MERGED_BY=$(echo "$PR" | jq -r '.merged_by.login // "unknown"')
TITLE=$(echo "$PR" | jq -r '.title // ""')
BASE_BRANCH=$(echo "$PR" | jq -r '.base.ref // "main"')
HEAD_SHA=$(echo "$PR" | jq -r '.head.sha // empty')
if [ -z "$MERGE_SHA" ]; then
echo "::warning::PR #${PR_NUMBER} merged=true but no merge_commit_sha — cannot evaluate force-merge."
exit 0
fi
# 2. Required status checks declared in the workflow env.
REQUIRED="$REQUIRED_CHECKS"
if [ -z "${REQUIRED//[[:space:]]/}" ]; then
echo "::notice::REQUIRED_CHECKS empty — force-merge not applicable."
exit 0
fi
# 3. Status-check state at the PR HEAD (where checks ran). The merge
# commit doesn't get its own checks; we evaluate the PR's last
# commit, which is what branch protection compared against.
STATUS=$(curl -sS -H "$AUTH" \
"${API}/repos/${OWNER}/${NAME}/commits/${HEAD_SHA}/status")
declare -A CHECK_STATE
while IFS=$'\t' read -r ctx state; do
[ -n "$ctx" ] && CHECK_STATE[$ctx]="$state"
done < <(echo "$STATUS" | jq -r '.statuses // [] | .[] | "\(.context)\t\(.status)"')
# 4. For each required check, was it green at merge? YAML block scalars
# (`|`) leave a trailing newline; skip blank/whitespace-only lines.
FAILED_CHECKS=()
while IFS= read -r req; do
trimmed="${req#"${req%%[![:space:]]*}"}" # ltrim
trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" # rtrim
[ -z "$trimmed" ] && continue
state="${CHECK_STATE[$trimmed]:-missing}"
if [ "$state" != "success" ]; then
FAILED_CHECKS+=("${trimmed}=${state}")
fi
done <<< "$REQUIRED"
if [ "${#FAILED_CHECKS[@]}" -eq 0 ]; then
echo "::notice::PR #${PR_NUMBER} merged with all required checks green — not a force-merge."
exit 0
fi
# 5. Emit structured audit event.
NOW=$(date -u +%Y-%m-%dT%H:%M:%SZ)
FAILED_JSON=$(printf '%s\n' "${FAILED_CHECKS[@]}" | jq -R . | jq -s .)
# Print as a single-line JSON so Vector's parse_json transform can pick
# it up cleanly from docker_logs.
jq -nc \
--arg event_type "incident.force_merge" \
--arg ts "$NOW" \
--arg repo "$REPO" \
--argjson pr "$PR_NUMBER" \
--arg title "$TITLE" \
--arg base "$BASE_BRANCH" \
--arg merged_by "$MERGED_BY" \
--arg merge_sha "$MERGE_SHA" \
--argjson failed_checks "$FAILED_JSON" \
'{event_type: $event_type, ts: $ts, repo: $repo, pr: $pr, title: $title,
base_branch: $base, merged_by: $merged_by, merge_sha: $merge_sha,
failed_checks: $failed_checks}'
echo "::warning::FORCE-MERGE detected on PR #${PR_NUMBER} by ${MERGED_BY}: ${#FAILED_CHECKS[@]} required check(s) not green at merge time."

View File

@ -1,149 +0,0 @@
#!/usr/bin/env bash
# sop-tier-check — verify a Gitea PR satisfies the §SOP-6 approval gate.
#
# Reads the PR's tier label, walks approving reviewers, and checks each
# approver's Gitea team membership against the tier's eligible-team set.
# Marks pass only when at least one non-author approver is in an eligible
# team.
#
# Invoked from `.gitea/workflows/sop-tier-check.yml`. The workflow sets
# the env vars below; this script does no IO outside of stdout/stderr +
# the Gitea API.
#
# Required env:
# GITEA_TOKEN — bot PAT with read:organization,read:user,
# read:issue,read:repository scopes
# GITEA_HOST — e.g. git.moleculesai.app
# REPO — owner/name (from github.repository)
# PR_NUMBER — int (from github.event.pull_request.number)
# PR_AUTHOR — login (from github.event.pull_request.user.login)
#
# Optional:
# SOP_DEBUG=1 — print per-API-call diagnostic lines (HTTP codes,
# raw response bodies). Default: off.
#
# Stale-status caveat: Gitea Actions does not always re-fire workflows
# on `labeled` / `pull_request_review:submitted` events. If the
# sop-tier-check status is stale (e.g. red after labels/approvals were
# added), push an empty commit to the PR branch to force a synchronize
# event, OR re-request reviews. Tracked: internal#46.
set -euo pipefail
debug() {
if [ "${SOP_DEBUG:-}" = "1" ]; then
echo " [debug] $*" >&2
fi
}
# Validate env
: "${GITEA_TOKEN:?GITEA_TOKEN required}"
: "${GITEA_HOST:?GITEA_HOST required}"
: "${REPO:?REPO required (owner/name)}"
: "${PR_NUMBER:?PR_NUMBER required}"
: "${PR_AUTHOR:?PR_AUTHOR required}"
OWNER="${REPO%%/*}"
NAME="${REPO##*/}"
API="https://${GITEA_HOST}/api/v1"
AUTH="Authorization: token ${GITEA_TOKEN}"
echo "::notice::tier-check start: repo=$OWNER/$NAME pr=$PR_NUMBER author=$PR_AUTHOR"
# Sanity: token resolves to a user
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""')
if [ -z "$WHOAMI" ]; then
echo "::error::GITEA_TOKEN cannot resolve a user via /api/v1/user — check the token scope and that the secret is wired correctly."
exit 1
fi
echo "::notice::token resolves to user: $WHOAMI"
# 1. Read tier label
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name')
TIER=""
for L in $LABELS; do
case "$L" in
tier:low|tier:medium|tier:high)
if [ -n "$TIER" ]; then
echo "::error::Multiple tier labels: $TIER + $L. Apply exactly one."
exit 1
fi
TIER="$L"
;;
esac
done
if [ -z "$TIER" ]; then
echo "::error::PR has no tier:low|tier:medium|tier:high label. Apply one before merge."
exit 1
fi
debug "tier=$TIER"
# 2. Tier → eligible teams
case "$TIER" in
tier:low) ELIGIBLE="engineers managers ceo" ;;
tier:medium) ELIGIBLE="managers ceo" ;;
tier:high) ELIGIBLE="ceo" ;;
esac
debug "eligible_teams=$ELIGIBLE"
# Resolve team-name → team-id once. /orgs/{org}/teams/{slug}/... endpoints
# don't exist on Gitea 1.22; we have to use /teams/{id}.
ORG_TEAMS_FILE=$(mktemp)
trap 'rm -f "$ORG_TEAMS_FILE"' EXIT
HTTP_CODE=$(curl -sS -o "$ORG_TEAMS_FILE" -w '%{http_code}' -H "$AUTH" \
"${API}/orgs/${OWNER}/teams")
debug "teams-list HTTP=$HTTP_CODE size=$(wc -c <"$ORG_TEAMS_FILE")"
if [ "${SOP_DEBUG:-}" = "1" ]; then
echo " [debug] teams-list body (first 300 chars):" >&2
head -c 300 "$ORG_TEAMS_FILE" >&2; echo >&2
fi
if [ "$HTTP_CODE" != "200" ]; then
echo "::error::GET /orgs/${OWNER}/teams returned HTTP $HTTP_CODE — token likely lacks read:org scope. Add a SOP_TIER_CHECK_TOKEN secret with read:organization scope at the org level."
exit 1
fi
declare -A TEAM_ID
for T in $ELIGIBLE; do
ID=$(jq -r --arg t "$T" '.[] | select(.name==$t) | .id' <"$ORG_TEAMS_FILE" | head -1)
if [ -z "$ID" ] || [ "$ID" = "null" ]; then
VISIBLE=$(jq -r '.[]?.name? // empty' <"$ORG_TEAMS_FILE" 2>/dev/null | tr '\n' ' ')
echo "::error::Team \"$T\" not found in org $OWNER. Teams visible: $VISIBLE"
exit 1
fi
TEAM_ID[$T]="$ID"
debug "team-id: $T$ID"
done
# 3. Read approving reviewers
REVIEWS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}/reviews")
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]')
if [ -z "$APPROVERS" ]; then
echo "::error::No approving reviews. Tier $TIER requires approval from {$ELIGIBLE} (non-author)."
exit 1
fi
debug "approvers: $(echo "$APPROVERS" | tr '\n' ' ')"
# 4. For each approver: check non-author + team membership (by id)
OK=""
for U in $APPROVERS; do
if [ "$U" = "$PR_AUTHOR" ]; then
debug "skip self-review by $U"
continue
fi
for T in $ELIGIBLE; do
ID="${TEAM_ID[$T]}"
CODE=$(curl -sS -o /dev/null -w '%{http_code}' -H "$AUTH" \
"${API}/teams/${ID}/members/${U}")
debug "probe: $U in team $T (id=$ID) → HTTP $CODE"
if [ "$CODE" = "200" ] || [ "$CODE" = "204" ]; then
echo "::notice::approver $U is in team $T (eligible for $TIER)"
OK="yes"
break
fi
done
[ -n "$OK" ] && break
done
if [ -z "$OK" ]; then
echo "::error::Tier $TIER requires approval from a non-author member of {$ELIGIBLE}. Got approvers: $APPROVERS — none of them satisfied team membership. Set SOP_DEBUG=1 to see per-probe HTTP codes."
exit 1
fi
echo "::notice::sop-tier-check passed: $TIER, approver in {$ELIGIBLE}"

View File

@ -1,58 +0,0 @@
# audit-force-merge — emit `incident.force_merge` to runner stdout when
# a PR is merged with required-status-checks not green. Vector picks
# the JSON line off docker_logs and ships to Loki on
# molecule-canonical-obs (per `reference_obs_stack_phase1`); query as:
#
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
#
# Closes the §SOP-6 audit gap (the doc says force-merges write to
# `structure_events`, but that table lives in the platform DB, not
# Gitea-side; Loki is the practical equivalent for Gitea Actions
# events). When the credential / observability stack converges later,
# this can sync into structure_events from Loki via a backfill job —
# the structured JSON shape is forward-compatible.
#
# Logic in `.gitea/scripts/audit-force-merge.sh` per the same script-
# extract pattern as sop-tier-check.
name: audit-force-merge
# pull_request_target loads from the base branch — same security model
# as sop-tier-check. Without this, an attacker could rewrite the
# workflow on a PR and skip the audit emission for their own
# force-merge. See `.gitea/workflows/sop-tier-check.yml` for the full
# rationale.
on:
pull_request_target:
types: [closed]
jobs:
audit:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
# Skip when PR is closed without merge — saves a runner.
if: github.event.pull_request.merged == true
steps:
- name: Check out base branch (for the script)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event.pull_request.base.sha }}
- name: Detect force-merge + emit audit event
env:
# Same org-level secret the sop-tier-check workflow uses.
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.pull_request.number }}
# Required-status-check contexts to evaluate at merge time.
# Newline-separated. Mirror this against branch protection
# (settings → branches → protected branch → required checks).
# Declared here rather than fetched from /branch_protections
# because that endpoint requires admin write — sop-tier-bot is
# read-only by design (least-privilege).
REQUIRED_CHECKS: |
sop-tier-check / tier-check (pull_request)
Secret scan / Scan diff for credential-shaped strings (pull_request)
run: bash .gitea/scripts/audit-force-merge.sh

View File

@ -1,191 +0,0 @@
name: Secret scan
# Hard CI gate. Refuses any PR / push whose diff additions contain a
# recognisable credential. Defense-in-depth for the #2090-class incident
# (2026-04-24): GitHub's hosted Copilot Coding Agent leaked a ghs_*
# installation token into tenant-proxy/package.json via `npm init`
# slurping the URL from a token-embedded origin remote. We can't fix
# upstream's clone hygiene, so we gate here.
#
# Same regex set as the runtime's bundled pre-commit hook
# (molecule-ai-workspace-runtime: molecule_runtime/scripts/pre-commit-checks.sh).
# Keep the two sides aligned when adding patterns.
#
# Ported from .github/workflows/secret-scan.yml so the gate actually
# fires on Gitea Actions. Differences from the GitHub version:
# - drops `merge_group` event (Gitea has no merge queue)
# - drops `workflow_call` (no cross-repo reusable invocation on Gitea)
# - SELF path updated to .gitea/workflows/secret-scan.yml
# The job name + step name are identical to the GitHub workflow so the
# status-check context (`Secret scan / Scan diff for credential-shaped
# strings (pull_request)`) matches branch protection on molecule-core/main.
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [main, staging]
jobs:
scan:
name: Scan diff for credential-shaped strings
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 2 # need previous commit to diff against on push events
# For pull_request events the diff base may be many commits behind
# HEAD and absent from the shallow clone. Fetch it explicitly.
- name: Fetch PR base SHA (pull_request events only)
if: github.event_name == 'pull_request'
run: git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }}
- name: Refuse if credential-shaped strings appear in diff additions
env:
# Plumb event-specific SHAs through env so the script doesn't
# need conditional `${{ ... }}` interpolation per event type.
# github.event.before/after only exist on push events;
# pull_request has pull_request.base.sha / pull_request.head.sha.
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
PUSH_BEFORE: ${{ github.event.before }}
PUSH_AFTER: ${{ github.event.after }}
run: |
# Pattern set covers GitHub family (the actual #2090 vector),
# Anthropic / OpenAI / Slack / AWS. Anchored on prefixes with low
# false-positive rates against agent-generated content. Mirror of
# molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh
# — keep aligned.
SECRET_PATTERNS=(
'ghp_[A-Za-z0-9]{36,}' # GitHub PAT (classic)
'ghs_[A-Za-z0-9]{36,}' # GitHub App installation token
'gho_[A-Za-z0-9]{36,}' # GitHub OAuth user-to-server
'ghu_[A-Za-z0-9]{36,}' # GitHub OAuth user
'ghr_[A-Za-z0-9]{36,}' # GitHub OAuth refresh
'github_pat_[A-Za-z0-9_]{82,}' # GitHub fine-grained PAT
'sk-ant-[A-Za-z0-9_-]{40,}' # Anthropic API key
'sk-proj-[A-Za-z0-9_-]{40,}' # OpenAI project key
'sk-svcacct-[A-Za-z0-9_-]{40,}' # OpenAI service-account key
'sk-cp-[A-Za-z0-9_-]{60,}' # MiniMax API key (F1088 vector — caught only after the fact)
'xox[baprs]-[A-Za-z0-9-]{20,}' # Slack tokens
'AKIA[0-9A-Z]{16}' # AWS access key ID
'ASIA[0-9A-Z]{16}' # AWS STS temp access key ID
)
# Determine the diff base. Each event type stores its SHAs in
# a different place — see the env block above.
case "${{ github.event_name }}" in
pull_request)
BASE="$PR_BASE_SHA"
HEAD="$PR_HEAD_SHA"
;;
*)
BASE="$PUSH_BEFORE"
HEAD="$PUSH_AFTER"
;;
esac
# On push events with shallow clones, BASE may be present in
# the event payload but absent from the local object DB
# (fetch-depth=2 doesn't always reach the previous commit
# across true merges). Try fetching it on demand. If the
# fetch fails — e.g. the SHA was force-overwritten — we fall
# through to the empty-BASE branch below, which scans the
# entire tree as if every file were new. Correct, just slow.
if [ -n "$BASE" ] && ! echo "$BASE" | grep -qE '^0+$'; then
if ! git cat-file -e "$BASE" 2>/dev/null; then
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
fi
fi
# Files added or modified in this change.
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$' || ! git cat-file -e "$BASE" 2>/dev/null; then
# New branch / no previous SHA / BASE unreachable — check the
# entire tree as added content. Slower, but correct on first
# push.
CHANGED=$(git ls-tree -r --name-only HEAD)
DIFF_RANGE=""
else
CHANGED=$(git diff --name-only --diff-filter=AM "$BASE" "$HEAD")
DIFF_RANGE="$BASE $HEAD"
fi
if [ -z "$CHANGED" ]; then
echo "No changed files to inspect."
exit 0
fi
# Self-exclude: this workflow file legitimately contains the
# pattern strings as regex literals. Without an exclude it would
# block its own merge. Both the .github/ original and this
# .gitea/ port are excluded so a sync between them stays clean.
SELF_GITHUB=".github/workflows/secret-scan.yml"
SELF_GITEA=".gitea/workflows/secret-scan.yml"
OFFENDING=""
# `while IFS= read -r` (not `for f in $CHANGED`) so filenames
# containing whitespace don't word-split silently — a path
# with a space would otherwise produce two iterations on
# tokens that aren't real filenames, breaking the
# self-exclude + diff lookup.
while IFS= read -r f; do
[ -z "$f" ] && continue
[ "$f" = "$SELF_GITHUB" ] && continue
[ "$f" = "$SELF_GITEA" ] && continue
if [ -n "$DIFF_RANGE" ]; then
ADDED=$(git diff --no-color --unified=0 "$BASE" "$HEAD" -- "$f" 2>/dev/null | grep -E '^\+[^+]' || true)
else
# No diff range (new branch first push) — scan the full file
# contents as if every line were new.
ADDED=$(cat "$f" 2>/dev/null || true)
fi
[ -z "$ADDED" ] && continue
for pattern in "${SECRET_PATTERNS[@]}"; do
if echo "$ADDED" | grep -qE "$pattern"; then
OFFENDING="${OFFENDING}${f} (matched: ${pattern})\n"
break
fi
done
done <<< "$CHANGED"
if [ -n "$OFFENDING" ]; then
echo "::error::Credential-shaped strings detected in diff additions:"
# `printf '%b' "$OFFENDING"` interprets backslash escapes
# (the literal `\n` we appended above becomes a newline)
# WITHOUT treating OFFENDING as a format string. Plain
# `printf "$OFFENDING"` is a format-string sink: a filename
# containing `%` would be interpreted as a conversion
# specifier, corrupting the error message (or printing
# `%(missing)` artifacts).
printf '%b' "$OFFENDING"
echo ""
echo "The actual matched values are NOT echoed here, deliberately —"
echo "round-tripping a leaked credential into CI logs widens the blast"
echo "radius (logs are searchable + retained)."
echo ""
echo "Recovery:"
echo " 1. Remove the secret from the file. Replace with an env var"
echo " reference (e.g. \${{ secrets.GITHUB_TOKEN }} in workflows,"
echo " process.env.X in code)."
echo " 2. If the credential was already pushed (this PR's commit"
echo " history reaches a public ref), treat it as compromised —"
echo " ROTATE it immediately, do not just remove it. The token"
echo " remains valid in git history forever and may be in any"
echo " log/cache that consumed this branch."
echo " 3. Force-push the cleaned commit (or stack a revert) and"
echo " re-run CI."
echo ""
echo "If the match is a false positive (test fixture, docs example,"
echo "or this workflow's own regex literals): use a clearly-fake"
echo "placeholder like ghs_EXAMPLE_DO_NOT_USE that doesn't satisfy"
echo "the length suffix, OR add the file path to the SELF exclude"
echo "list in this workflow with a short reason."
echo ""
echo "Mirror of the regex set lives in the runtime's bundled"
echo "pre-commit hook (molecule-ai-workspace-runtime:"
echo "molecule_runtime/scripts/pre-commit-checks.sh) — keep aligned."
exit 1
fi
echo "✓ No credential-shaped strings in this change."

View File

@ -1,81 +0,0 @@
# sop-tier-check — canonical Gitea Actions workflow for §SOP-6 enforcement.
#
# Logic lives in `.gitea/scripts/sop-tier-check.sh` (extracted 2026-05-09
# from the previous inline-bash version). The script is the single source
# of truth; this workflow file just sets env + invokes it.
#
# Copy BOTH files (`.gitea/workflows/sop-tier-check.yml` +
# `.gitea/scripts/sop-tier-check.sh`) into any repo that wants the
# §SOP-6 PR gate enforced. Pair with branch protection on the protected
# branch:
# required_status_checks: ["sop-tier-check / tier-check (pull_request)"]
# required_approving_reviews: 1
# approving_review_teams: ["ceo", "managers", "engineers"]
#
# Tier → eligible-team mapping (mirror of dev-sop §SOP-6):
# tier:low → engineers, managers, ceo
# tier:medium → managers, ceo
# tier:high → ceo
#
# Force-merge: Owners-team override remains available out-of-band via
# the Gitea merge API; force-merge writes `incident.force_merge` to
# `structure_events` per §Persistent structured logging gate (Phase 3).
#
# Set `SOP_DEBUG: '1'` in the env block to enable per-API-call diagnostic
# lines — useful when diagnosing token-scope or team-id-resolution
# issues. Default off.
name: sop-tier-check
# SECURITY: triggers MUST use `pull_request_target`, not `pull_request`.
# `pull_request_target` loads the workflow definition from the BASE
# branch (i.e. `main`), not the PR's HEAD. With `pull_request`, anyone
# with write access to a feature branch could rewrite this file in
# their PR to dump SOP_TIER_CHECK_TOKEN (org-read scope) to logs and
# exfiltrate it. Verified 2026-05-09 against Gitea 1.22.6 —
# `pull_request_target` (added in Gitea 1.21 via go-gitea/gitea#25229)
# is the documented mitigation.
#
# This workflow does NOT call `actions/checkout` of PR HEAD code, so no
# untrusted code is ever executed in the runner — we only HTTP-call the
# Gitea API. If a future change adds a checkout step, it MUST pin to
# `${{ github.event.pull_request.base.sha }}` (NOT `head.sha`) to keep
# the trust boundary.
on:
pull_request_target:
types: [opened, edited, synchronize, reopened, labeled, unlabeled]
pull_request_review:
types: [submitted, dismissed, edited]
jobs:
tier-check:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
steps:
- name: Check out base branch (for the script)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# Pin to base.sha — pull_request_target's protection only
# works if we never check out PR HEAD. Same SHA the workflow
# itself was loaded from.
ref: ${{ github.event.pull_request.base.sha }}
- name: Verify tier label + reviewer team membership
env:
# SOP_TIER_CHECK_TOKEN is the org-level secret for the
# sop-tier-bot PAT (read:organization,read:user,read:issue,
# read:repository). Stored at the org level
# (/api/v1/orgs/molecule-ai/actions/secrets) so per-repo
# configuration is unnecessary — every repo in the org
# picks it up automatically.
# Falls back to GITHUB_TOKEN with a clear error if missing.
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
# Set to '1' for diagnostic per-API-call output. Off by default
# so production logs aren't noisy.
SOP_DEBUG: '0'
run: bash .gitea/scripts/sop-tier-check.sh

View File

@ -20,19 +20,6 @@ on:
# a few minutes under load — that's fine for a canary. # a few minutes under load — that's fine for a canary.
- cron: '*/30 * * * *' - cron: '*/30 * * * *'
workflow_dispatch: workflow_dispatch:
inputs:
keep_on_failure:
description: >-
Skip teardown when the canary fails (debugging only). The
tenant org + EC2 + CF tunnel + DNS stay alive so an operator
can SSM into the workspace EC2 and capture docker logs of the
failing claude-code container. REMEMBER to manually delete
via DELETE /cp/admin/tenants/<slug> when done so the org
doesn't accumulate cost. Only honored on workflow_dispatch;
cron runs always tear down (we don't want unattended cron
to leak resources).
type: boolean
default: false
# Serialise with the full-SaaS workflow so they don't contend for the # Serialise with the full-SaaS workflow so they don't contend for the
# same org-create quota on staging. Different group key from # same org-create quota on staging. Different group key from
@ -93,14 +80,6 @@ jobs:
# is "Token Plan only" but cheap-per-token and fast. # is "Token Plan only" but cheap-per-token and fast.
E2E_MODEL_SLUG: MiniMax-M2.7-highspeed E2E_MODEL_SLUG: MiniMax-M2.7-highspeed
E2E_RUN_ID: "canary-${{ github.run_id }}" E2E_RUN_ID: "canary-${{ github.run_id }}"
# Debug-only: when an operator dispatches with keep_on_failure=true,
# the canary script's E2E_KEEP_ORG=1 path skips teardown so the
# tenant org + EC2 stay alive for SSM-based log capture. Cron runs
# never set this (the input only exists on workflow_dispatch) so
# unattended cron always tears down. See molecule-core#129
# failure mode #1 — capturing the actual exception requires
# docker logs from the live container.
E2E_KEEP_ORG: ${{ github.event.inputs.keep_on_failure == 'true' && '1' || '0' }}
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@ -158,28 +137,27 @@ jobs:
id: canary id: canary
run: bash tests/e2e/test_staging_full_saas.sh run: bash tests/e2e/test_staging_full_saas.sh
# Alerting: open a sticky issue on the FIRST failure; comment on # Alerting: open an issue only after THREE consecutive failures so
# subsequent failures; auto-close on next green. Comment-on-existing # transient flakes (Cloudflare DNS hiccup, AWS API blip) don't spam
# de-duplicates so a single open issue accumulates the streak — # the issue list. If an issue is already open, we still comment on
# ops sees one issue with N comments rather than N issues. # every failure so ops sees the streak. Auto-close on next green.
# #
# Why no consecutive-failures threshold (e.g., wait 3 runs before # Threshold rationale: canary fires every 30 min, so 3 failures =
# filing): the prior threshold check used # ~90 min of consecutive red — well past any single-run flake but
# `github.rest.actions.listWorkflowRuns()` which Gitea 1.22.6 does # still tight enough that a real outage gets surfaced before the
# not expose (returns 404). On Gitea Actions the threshold call # next deploy window.
# ALWAYS failed, breaking the entire alerting step and going days
# silent on real regressions (38h+ chronic red on 2026-05-07/08
# before this fix; tracked in molecule-core#129). Filing on first
# failure is also better UX — we want to know about the first red,
# not wait 90 min for it to "count." Real flakes get one issue +
# a quick close-on-green; persistent reds accumulate comments.
- name: Open issue on failure - name: Open issue on failure
if: failure() if: failure()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0 uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
# Inject the workflow path explicitly — context.workflow is
# the *name*, not the file path the actions API needs.
WORKFLOW_PATH: '.github/workflows/canary-staging.yml'
CONSECUTIVE_THRESHOLD: '3'
with: with:
script: | script: |
const title = '🔴 Canary failing: staging SaaS smoke'; const title = '🔴 Canary failing: staging SaaS smoke';
const runURL = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
// Find an existing open canary issue (stable title match). // Find an existing open canary issue (stable title match).
// If one exists, this isn't a "first failure" — comment and exit. // If one exists, this isn't a "first failure" — comment and exit.
@ -199,12 +177,32 @@ jobs:
return; return;
} }
// No open issue yet — file one on this first failure. The // No open issue yet — check the last N-1 runs' conclusions.
// comment-on-existing branch above means subsequent failures // We open the issue only if the last (THRESHOLD-1) runs ALSO
// accumulate as comments on this same issue, so we don't // failed (so this is the 3rd consecutive red).
// spam new issues per run. const threshold = parseInt(process.env.CONSECUTIVE_THRESHOLD, 10);
const { data: runs } = await github.rest.actions.listWorkflowRuns({
owner: context.repo.owner, repo: context.repo.repo,
workflow_id: process.env.WORKFLOW_PATH,
status: 'completed',
per_page: threshold,
// Skip the current in-progress run; it isn't 'completed' yet.
});
// listWorkflowRuns returns recent first. We need (threshold-1)
// prior failures (current run is the threshold-th).
const priorFailures = (runs.workflow_runs || [])
.slice(0, threshold - 1)
.filter(r => r.id !== context.runId)
.filter(r => r.conclusion === 'failure')
.length;
if (priorFailures < threshold - 1) {
core.info(`Below threshold: ${priorFailures + 1}/${threshold} consecutive failures — not filing yet`);
return;
}
const body = const body =
`Canary run failed at ${new Date().toISOString()}.\n\n` + `Canary run failed at ${new Date().toISOString()}, ` +
`${threshold} consecutive runs red.\n\n` +
`Run: ${runURL}\n\n` + `Run: ${runURL}\n\n` +
`This issue auto-closes on the next green canary run. ` + `This issue auto-closes on the next green canary run. ` +
`Consecutive failures add a comment here rather than a new issue.`; `Consecutive failures add a comment here rather than a new issue.`;
@ -213,7 +211,7 @@ jobs:
title, body, title, body,
labels: ['canary-staging', 'bug'], labels: ['canary-staging', 'bug'],
}); });
core.info('Opened canary failure issue (first red)'); core.info(`Opened canary failure issue (${threshold} consecutive reds)`);
- name: Auto-close canary issue on success - name: Auto-close canary issue on success
if: success() if: success()

View File

@ -119,17 +119,6 @@ jobs:
# symptom, different root cause: staging still has the in-image # symptom, different root cause: staging still has the in-image
# clone path, hits the auth error directly). # clone path, hits the auth error directly).
# #
# 2026-05-08 sub-finding (#192): the clone step ALSO fails when
# any referenced workspace-template repo is private and the
# AUTO_SYNC_TOKEN bearer (devops-engineer persona) lacks read
# access. Root cause: 5 of 9 workspace-template repos
# (openclaw, codex, crewai, deepagents, gemini-cli) had been
# marked private with no team grant. Resolution: flipped them
# to public per `feedback_oss_first_repo_visibility_default`
# (the OSS surface should be public). Layer-3 (customer-private +
# marketplace third-party repos) tracked separately in
# internal#102.
#
# Token shape matches publish-workspace-server-image.yml: AUTO_SYNC_TOKEN # Token shape matches publish-workspace-server-image.yml: AUTO_SYNC_TOKEN
# is the devops-engineer persona PAT, NOT the founder PAT (per # is the devops-engineer persona PAT, NOT the founder PAT (per
# `feedback_per_agent_gitea_identity_default`). clone-manifest.sh # `feedback_per_agent_gitea_identity_default`). clone-manifest.sh

View File

@ -1,10 +0,0 @@
# Excluded from `docker build` context. Without this, the COPY . . step in
# canvas/Dockerfile clobbers the freshly-installed node_modules with the
# host's (potentially broken / wrong-arch) copy — the @tailwindcss/oxide
# native binary disagreed and broke `next build`.
node_modules
.next
.git
*.log
.env*
!.env.example

View File

@ -1,11 +1,7 @@
FROM node:22-alpine AS builder FROM node:22-alpine AS builder
WORKDIR /app WORKDIR /app
COPY package.json package-lock.json* ./ COPY package.json package-lock.json* ./
# `npm ci` (not `install`) for lockfile-exact reproducibility. RUN npm install
# `--include=optional` ensures the platform-specific @tailwindcss/oxide
# native binary lands — without it, postcss fails with "Cannot read
# properties of undefined (reading 'All')" at build time.
RUN npm ci --include=optional
COPY . . COPY . .
ARG NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080 ARG NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080
ARG NEXT_PUBLIC_WS_URL=ws://localhost:8080/ws ARG NEXT_PUBLIC_WS_URL=ws://localhost:8080/ws

View File

@ -17,24 +17,6 @@ import { dirname, join } from "node:path";
// update one heuristic. Production is unaffected: `output: "standalone"` // update one heuristic. Production is unaffected: `output: "standalone"`
// bakes resolved env into the build, and the marker file isn't shipped. // bakes resolved env into the build, and the marker file isn't shipped.
loadMonorepoEnv(); loadMonorepoEnv();
// Boot-time matched-pair guard for ADMIN_TOKEN / NEXT_PUBLIC_ADMIN_TOKEN.
// When ADMIN_TOKEN is set on the workspace-server (server-side bearer
// gate, wsauth_middleware.go ~L245), the canvas MUST send the matching
// NEXT_PUBLIC_ADMIN_TOKEN as `Authorization: Bearer ...` on every API
// call. If only one is set, every workspace API call 401s silently —
// the canvas hydrates with empty data and the user sees a broken page
// with no console hint about the auth-config mismatch.
//
// Pre-fix the matched-pair contract was descriptive only (a comment in
// .env): future devs/agents could re-misconfigure with one of the two
// unset and silently 401. Closes the post-PR-#174 self-review gap.
//
// Warn-only (not exit) — production canvas Docker images bake these
// vars into the build at image-build time, and a missed pair there
// would still emit the warning at runtime via the standalone server's
// startup. Killing the process on misconfiguration would turn a
// recoverable auth issue into a hard crashloop.
checkAdminTokenPair();
const nextConfig: NextConfig = { const nextConfig: NextConfig = {
output: "standalone", output: "standalone",
@ -75,43 +57,6 @@ function loadMonorepoEnv() {
); );
} }
// Boot-time matched-pair guard. Runs after .env has been loaded so the
// check sees the post-load state. The two env vars must be set or
// unset together; one-without-the-other is the silent-401 footgun.
//
// Treats empty string ("") as unset. An explicitly-empty `KEY=` in
// .env counts as set-to-empty in `process.env`, but for auth purposes
// an empty bearer token is equivalent to no token — so both
// `ADMIN_TOKEN=` and an unset ADMIN_TOKEN are equivalent relative to
// the matched-pair invariant.
//
// Returns void; side effect is the console.error warning. Kept as a
// separate function (exported) so a future test can reset env, call
// this, and assert on captured stderr.
export function checkAdminTokenPair(): void {
const serverSet = !!process.env.ADMIN_TOKEN;
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (serverSet === clientSet) return;
// Distinct messages so the operator can tell which half is missing
// — the fix is symmetric (set the other one) but the diagnostic
// mentions which side is currently set so they don't have to grep.
if (serverSet && !clientSet) {
// eslint-disable-next-line no-console
console.error(
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
"canvas will 401 against workspace-server because the bearer header " +
"is never attached. Set both to the same value, or unset both.",
);
} else {
// eslint-disable-next-line no-console
console.error(
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
"workspace-server will reject the bearer because no AdminAuth gate " +
"is configured. Set both to the same value, or unset both.",
);
}
}
function findMonorepoRoot(start: string): string | null { function findMonorepoRoot(start: string): string | null {
let dir = start; let dir = start;
for (let i = 0; i < 6; i++) { for (let i = 0; i < 6; i++) {

View File

@ -9,7 +9,6 @@
// AttachmentLightbox). // AttachmentLightbox).
import { useState, useEffect, useRef } from "react"; import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types"; import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads"; import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentChip } from "./AttachmentViews"; import { AttachmentChip } from "./AttachmentViews";
@ -44,8 +43,13 @@ export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: P
void (async () => { void (async () => {
try { try {
const href = resolveAttachmentHref(workspaceId, attachment.uri); const href = resolveAttachmentHref(workspaceId, attachment.uri);
const headers: Record<string, string> = {};
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const res = await fetch(href, { const res = await fetch(href, {
headers: platformAuthHeaders(), headers,
credentials: "include", credentials: "include",
signal: AbortSignal.timeout(60_000), signal: AbortSignal.timeout(60_000),
}); });
@ -112,5 +116,9 @@ export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: P
); );
} }
// Local getTenantSlug() removed — auth-header construction now goes function getTenantSlug(): string | null {
// through platformAuthHeaders() from @/lib/api (#178). if (typeof window === "undefined") return null;
const host = window.location.hostname;
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
return m ? m[1] : null;
}

View File

@ -35,7 +35,6 @@
// downscale via canvas, but defer that to v2. // downscale via canvas, but defer that to v2.
import { useState, useEffect, useRef } from "react"; import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types"; import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads"; import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentLightbox } from "./AttachmentLightbox"; import { AttachmentLightbox } from "./AttachmentLightbox";
@ -76,14 +75,22 @@ export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: P
} }
// Platform-auth path: identical to downloadChatFile but we keep // Platform-auth path: identical to downloadChatFile but we keep
// the blob (don't trigger a Save-As). Auth headers come from the // the blob (don't trigger a Save-As). Use the same headers it does
// shared `platformAuthHeaders()` helper — one source of truth for // by going through it indirectly — no, downloadChatFile triggers a
// every authenticated raw fetch in the canvas (#178). // Save-As. Need a separate fetch.
void (async () => { void (async () => {
try { try {
const href = resolveAttachmentHref(workspaceId, attachment.uri); const href = resolveAttachmentHref(workspaceId, attachment.uri);
const headers: Record<string, string> = {};
// Read the same env var downloadChatFile reads — single source
// of truth would be cleaner; refactor opportunity for PR-2 if
// we add the same path to AttachmentVideo.
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const res = await fetch(href, { const res = await fetch(href, {
headers: platformAuthHeaders(), headers,
credentials: "include", credentials: "include",
signal: AbortSignal.timeout(30_000), signal: AbortSignal.timeout(30_000),
}); });
@ -177,7 +184,15 @@ export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: P
); );
} }
// Local getTenantSlug() removed — auth-header construction now goes // Internal helper — duplicated from uploads.ts (it's not exported
// through platformAuthHeaders() from @/lib/api which uses the canonical // there). Kept local so this component doesn't reach into private
// getTenantSlug() from @/lib/tenant. This eliminates the duplicate // surface; if AttachmentVideo / AttachmentPDF in PR-2/PR-3 also need
// hostname-regex + the duplicate bearer-token-attach pattern (#178). // it, lift to an exported helper at that point (the third-caller
// rule).
function getTenantSlug(): string | null {
if (typeof window === "undefined") return null;
const host = window.location.hostname;
// Tenant subdomain shape: <slug>.moleculesai.app
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
return m ? m[1] : null;
}

View File

@ -33,7 +33,6 @@
// timeout, swap to chip. Implemented as a 3-second watchdog. // timeout, swap to chip. Implemented as a 3-second watchdog.
import { useState, useEffect, useRef } from "react"; import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types"; import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads"; import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentLightbox } from "./AttachmentLightbox"; import { AttachmentLightbox } from "./AttachmentLightbox";
@ -70,8 +69,13 @@ export function AttachmentPDF({ workspaceId, attachment, onDownload, tone }: Pro
void (async () => { void (async () => {
try { try {
const href = resolveAttachmentHref(workspaceId, attachment.uri); const href = resolveAttachmentHref(workspaceId, attachment.uri);
const headers: Record<string, string> = {};
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const res = await fetch(href, { const res = await fetch(href, {
headers: platformAuthHeaders(), headers,
credentials: "include", credentials: "include",
signal: AbortSignal.timeout(60_000), signal: AbortSignal.timeout(60_000),
}); });
@ -185,5 +189,9 @@ function PdfGlyph() {
); );
} }
// Local getTenantSlug() removed — auth-header construction now goes function getTenantSlug(): string | null {
// through platformAuthHeaders() from @/lib/api (#178). if (typeof window === "undefined") return null;
const host = window.location.hostname;
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
return m ? m[1] : null;
}

View File

@ -26,7 +26,6 @@
// to download the full file. // to download the full file.
import { useState, useEffect } from "react"; import { useState, useEffect } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types"; import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads"; import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentChip } from "./AttachmentViews"; import { AttachmentChip } from "./AttachmentViews";
@ -58,13 +57,13 @@ export function AttachmentTextPreview({ workspaceId, attachment, onDownload, ton
void (async () => { void (async () => {
try { try {
const href = resolveAttachmentHref(workspaceId, attachment.uri); const href = resolveAttachmentHref(workspaceId, attachment.uri);
// Only attach platform auth headers for in-platform URIs — const headers: Record<string, string> = {};
// off-platform URLs (HTTP/HTTPS attachments) MUST NOT receive if (isPlatformAttachment(attachment.uri)) {
// our bearer token (it would leak the admin token to a third const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
// party). The branch is preserved with the new shared helper. if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const headers: Record<string, string> = isPlatformAttachment(attachment.uri) const slug = getTenantSlug();
? platformAuthHeaders() if (slug) headers["X-Molecule-Org-Slug"] = slug;
: {}; }
const res = await fetch(href, { const res = await fetch(href, {
headers, headers,
credentials: "include", credentials: "include",
@ -183,5 +182,9 @@ export function AttachmentTextPreview({ workspaceId, attachment, onDownload, ton
); );
} }
// Local getTenantSlug() removed — auth-header construction now goes function getTenantSlug(): string | null {
// through platformAuthHeaders() from @/lib/api (#178). if (typeof window === "undefined") return null;
const host = window.location.hostname;
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
return m ? m[1] : null;
}

View File

@ -25,7 +25,6 @@
// fetch via service worker. v2 if measured-needed. // fetch via service worker. v2 if measured-needed.
import { useState, useEffect, useRef } from "react"; import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types"; import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads"; import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentChip } from "./AttachmentViews"; import { AttachmentChip } from "./AttachmentViews";
@ -62,8 +61,13 @@ export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: P
void (async () => { void (async () => {
try { try {
const href = resolveAttachmentHref(workspaceId, attachment.uri); const href = resolveAttachmentHref(workspaceId, attachment.uri);
const headers: Record<string, string> = {};
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const res = await fetch(href, { const res = await fetch(href, {
headers: platformAuthHeaders(), headers,
credentials: "include", credentials: "include",
// Videos are larger than images on average; give the request // Videos are larger than images on average; give the request
// more headroom. The server's per-request body cap (50MB) is // more headroom. The server's per-request body cap (50MB) is
@ -143,5 +147,11 @@ export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: P
); );
} }
// Local getTenantSlug() removed — auth-header construction now goes // Internal helper — same shape as AttachmentImage's. Lifted to a
// through platformAuthHeaders() from @/lib/api (#178). // shared util in PR-2.5 if a third caller needs it (PDF, audio).
function getTenantSlug(): string | null {
if (typeof window === "undefined") return null;
const host = window.location.hostname;
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
return m ? m[1] : null;
}

View File

@ -1,16 +1,12 @@
import { PLATFORM_URL, platformAuthHeaders } from "@/lib/api"; import { PLATFORM_URL } from "@/lib/api";
import { getTenantSlug } from "@/lib/tenant";
import type { ChatAttachment } from "./types"; import type { ChatAttachment } from "./types";
/** Chat attachments are intentionally uploaded via a direct fetch() /** Chat attachments are intentionally uploaded via a direct fetch()
* instead of the `api.post` helper `api.post` JSON-stringifies the * instead of the `api.post` helper `api.post` JSON-stringifies the
* body, which would 500 on a Blob. Auth headers (tenant slug, admin * body, which would 500 on a Blob. Mirrors the header plumbing
* token, credentials) come from `platformAuthHeaders()` the same * (tenant slug, admin token, credentials) so SaaS + self-hosted
* helper `request()` uses, so a missing bearer surfaces as a single * callers work the same way. */
* fix site instead of N copies. We deliberately do NOT set
* Content-Type so the browser writes the multipart boundary into the
* header; setting it manually would yield a multipart body the server
* can't parse. See lib/api.ts platformAuthHeaders() for the full
* rationale on why this pair must stay matched. */
export async function uploadChatFiles( export async function uploadChatFiles(
workspaceId: string, workspaceId: string,
files: File[], files: File[],
@ -20,12 +16,18 @@ export async function uploadChatFiles(
const form = new FormData(); const form = new FormData();
for (const f of files) form.append("files", f, f.name); for (const f of files) form.append("files", f, f.name);
const headers: Record<string, string> = {};
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
// Uploads legitimately take a while on cold cache (tar write + // Uploads legitimately take a while on cold cache (tar write +
// docker cp into the container). 60s is comfortable for the 25MB/ // docker cp into the container). 60s is comfortable for the 25MB/
// 50MB caps the server enforces. // 50MB caps the server enforces.
const res = await fetch(`${PLATFORM_URL}/workspaces/${workspaceId}/chat/uploads`, { const res = await fetch(`${PLATFORM_URL}/workspaces/${workspaceId}/chat/uploads`, {
method: "POST", method: "POST",
headers: platformAuthHeaders(), headers,
body: form, body: form,
credentials: "include", credentials: "include",
signal: AbortSignal.timeout(60_000), signal: AbortSignal.timeout(60_000),
@ -141,8 +143,14 @@ export async function downloadChatFile(
return; return;
} }
const headers: Record<string, string> = {};
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const res = await fetch(href, { const res = await fetch(href, {
headers: platformAuthHeaders(), headers,
credentials: "include", credentials: "include",
signal: AbortSignal.timeout(60_000), signal: AbortSignal.timeout(60_000),
}); });

View File

@ -1,130 +0,0 @@
// @vitest-environment node
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
// Tests for the boot-time matched-pair guard added to next.config.ts.
//
// Why this lives in src/lib/__tests__ even though the function is in
// canvas/next.config.ts:
// - next.config.ts runs as ESM-but-also-CJS depending on which
// consumer loads it (Next.js dev server vs Next.js build); we
// want the test to be a plain ESM module Vitest already handles.
// - Importing from "../../../next.config" pulls in the rest of the
// file (loadMonorepoEnv, the default export, etc.) which has
// side effects on module load (it runs loadMonorepoEnv()
// immediately). To keep the test hermetic we don't import — we
// duplicate the function under test.
//
// Sourcing the function from a shared module would be cleaner, but
// next.config.ts is required to be a single self-contained file by
// Next.js's loader on some host configurations. Pin invariant: the
// duplicated function below MUST stay byte-identical to the one in
// next.config.ts. If you change one, change the other and bump this
// comment.
function checkAdminTokenPair(): void {
const serverSet = !!process.env.ADMIN_TOKEN;
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (serverSet === clientSet) return;
if (serverSet && !clientSet) {
// eslint-disable-next-line no-console
console.error(
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
"canvas will 401 against workspace-server because the bearer header " +
"is never attached. Set both to the same value, or unset both.",
);
} else {
// eslint-disable-next-line no-console
console.error(
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
"workspace-server will reject the bearer because no AdminAuth gate " +
"is configured. Set both to the same value, or unset both.",
);
}
}
describe("checkAdminTokenPair", () => {
// Snapshot env so individual tests can stomp on it without leaking.
// Rebuild from snapshot in afterEach so the next test sees a known
// baseline regardless of mutation pattern.
let originalEnv: Record<string, string | undefined>;
let errorSpy: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
originalEnv = {
ADMIN_TOKEN: process.env.ADMIN_TOKEN,
NEXT_PUBLIC_ADMIN_TOKEN: process.env.NEXT_PUBLIC_ADMIN_TOKEN,
};
delete process.env.ADMIN_TOKEN;
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
errorSpy = vi.spyOn(console, "error").mockImplementation(() => {});
});
afterEach(() => {
if (originalEnv.ADMIN_TOKEN === undefined) delete process.env.ADMIN_TOKEN;
else process.env.ADMIN_TOKEN = originalEnv.ADMIN_TOKEN;
if (originalEnv.NEXT_PUBLIC_ADMIN_TOKEN === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalEnv.NEXT_PUBLIC_ADMIN_TOKEN;
errorSpy.mockRestore();
});
it("emits no warning when both are unset", () => {
checkAdminTokenPair();
expect(errorSpy).not.toHaveBeenCalled();
});
it("emits no warning when both are set (matched pair, the happy path)", () => {
process.env.ADMIN_TOKEN = "local-dev-admin";
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
checkAdminTokenPair();
expect(errorSpy).not.toHaveBeenCalled();
});
it("warns when ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not", () => {
process.env.ADMIN_TOKEN = "local-dev-admin";
checkAdminTokenPair();
expect(errorSpy).toHaveBeenCalledTimes(1);
// Exact-string assertion — substring would also pass when the
// function's branch logic is broken (e.g. emits both messages, or
// emits the wrong one). Pin the exact message that operators will
// see in their dev console so regressions are visible.
expect(errorSpy).toHaveBeenCalledWith(
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
"canvas will 401 against workspace-server because the bearer header " +
"is never attached. Set both to the same value, or unset both.",
);
});
it("warns when NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not", () => {
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
checkAdminTokenPair();
expect(errorSpy).toHaveBeenCalledTimes(1);
expect(errorSpy).toHaveBeenCalledWith(
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
"workspace-server will reject the bearer because no AdminAuth gate " +
"is configured. Set both to the same value, or unset both.",
);
});
// Empty string in process.env is the JS-side representation of `KEY=`
// (no value) in a .env file. Treating "" as unset makes the pair
// invariant symmetric: `KEY=` and `unset KEY` produce the same
// verdict. Without this branch, an operator who comments out the
// value but leaves the line would get a false-positive warning.
it("treats empty string as unset (so KEY= and unset KEY are equivalent)", () => {
process.env.ADMIN_TOKEN = "";
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
checkAdminTokenPair();
expect(errorSpy).not.toHaveBeenCalled();
});
it("warns when ADMIN_TOKEN is set and NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
process.env.ADMIN_TOKEN = "local-dev-admin";
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
checkAdminTokenPair();
expect(errorSpy).toHaveBeenCalledTimes(1);
// First branch — server set, client unset.
expect(errorSpy).toHaveBeenCalledWith(
expect.stringContaining("ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not"),
);
});
});

View File

@ -1,97 +0,0 @@
// @vitest-environment jsdom
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
// Tests for platformAuthHeaders — the shared helper extracted in #178
// to consolidate the bearer-token-attach + tenant-slug-attach pattern
// that was previously duplicated across 7 raw-fetch callsites in the
// canvas (uploads + 5 Attachment* components + the api.ts request()
// function).
//
// What we pin here:
// - Returns a fresh object each call (so callers can mutate without
// leaking into each other).
// - Empty result on a non-tenant host with no admin token (the
// localhost / self-hosted shape).
// - Bearer attached when NEXT_PUBLIC_ADMIN_TOKEN is set.
// - X-Molecule-Org-Slug attached when window.location.hostname is a
// tenant subdomain (<slug>.moleculesai.app).
// - Both attached when both apply (the production SaaS shape).
//
// Why jsdom: getTenantSlug() reads window.location.hostname. Node-only
// environment yields no window and getTenantSlug returns null
// unconditionally — wouldn't exercise the slug branch.
import { platformAuthHeaders } from "../api";
describe("platformAuthHeaders", () => {
let originalAdminToken: string | undefined;
beforeEach(() => {
originalAdminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
});
afterEach(() => {
if (originalAdminToken === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalAdminToken;
// jsdom resets hostname between tests via the @vitest-environment
// pragma's per-test isolation. No explicit reset needed.
});
it("returns an empty object on a non-tenant host with no admin token", () => {
// jsdom default hostname is "localhost" — not a tenant slug, so
// getTenantSlug() returns null and no X-Molecule-Org-Slug is added.
const headers = platformAuthHeaders();
expect(headers).toEqual({});
});
it("attaches Authorization when NEXT_PUBLIC_ADMIN_TOKEN is set", () => {
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
const headers = platformAuthHeaders();
expect(headers).toEqual({ Authorization: "Bearer local-dev-admin" });
});
it("does NOT attach Authorization when NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
// Empty-string env is the JS-side shape of `KEY=` in .env.
// Treating it as unset matches the matched-pair guard in
// next.config.ts (admin-token-pair.test.ts) — symmetric semantics.
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
const headers = platformAuthHeaders();
expect(headers).toEqual({});
});
it("attaches X-Molecule-Org-Slug on a tenant subdomain", () => {
Object.defineProperty(window, "location", {
value: { hostname: "reno-stars.moleculesai.app" },
writable: true,
});
const headers = platformAuthHeaders();
expect(headers).toEqual({ "X-Molecule-Org-Slug": "reno-stars" });
});
it("attaches both when both apply (production SaaS shape)", () => {
Object.defineProperty(window, "location", {
value: { hostname: "reno-stars.moleculesai.app" },
writable: true,
});
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tenant-bearer";
const headers = platformAuthHeaders();
// Pin exact-equality on the full shape — substring/contains
// assertions would also pass for an extra-header bug.
expect(headers).toEqual({
"X-Molecule-Org-Slug": "reno-stars",
Authorization: "Bearer tenant-bearer",
});
});
it("returns a fresh object each call (callers can mutate safely)", () => {
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tok";
const a = platformAuthHeaders();
const b = platformAuthHeaders();
expect(a).not.toBe(b); // distinct refs
expect(a).toEqual(b); // same content
a["Content-Type"] = "application/json";
// Mutation on `a` does not leak into `b`.
expect(b["Content-Type"]).toBeUndefined();
});
});

View File

@ -21,45 +21,6 @@ export interface RequestOptions {
timeoutMs?: number; timeoutMs?: number;
} }
/**
* Build the platform auth header set used by every authenticated fetch
* from the canvas. Returns a fresh object so callers can mutate (e.g.
* append `Content-Type` for JSON requests, omit it for FormData).
*
* SaaS cross-origin shape:
* - `X-Molecule-Org-Slug` derived from `window.location.hostname`
* by `getTenantSlug()`. Control plane uses it for fly-replay
* routing. Empty on localhost / non-tenant hosts safe to omit.
* - `Authorization: Bearer <token>` `NEXT_PUBLIC_ADMIN_TOKEN` baked
* into the canvas build (see canvas/Dockerfile L8/L11). Required by
* the workspace-server when `ADMIN_TOKEN` is set on the server side
* (Tier-2b AdminAuth gate, wsauth_middleware.go ~L245). Empty when
* no admin token was provisioned the Tier-1 session-cookie path
* handles that case via `credentials:"include"`.
*
* Why a shared helper: the two-line "read env, attach bearer; read
* slug, attach header" pattern was duplicated across `request()` and
* 7 raw-fetch callsites (chat uploads/download + 5 Attachment*
* components) before this consolidation. A new poller or raw fetch
* that forgets one of the two headers silently 401s against
* workspace-server when ADMIN_TOKEN is set the exact bug shape
* called out in #178 / closes the post-#176 self-review gap.
*
* Callers that want JSON Content-Type should spread this and add it
* themselves; FormData callers should NOT add Content-Type (the
* browser sets the multipart boundary). Centralizing the auth pair
* but leaving Content-Type up to the caller is the minimum viable
* shared shape.
*/
export function platformAuthHeaders(): Record<string, string> {
const headers: Record<string, string> = {};
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
return headers;
}
async function request<T>( async function request<T>(
method: string, method: string,
path: string, path: string,
@ -67,16 +28,17 @@ async function request<T>(
retryCount = 0, retryCount = 0,
options?: RequestOptions, options?: RequestOptions,
): Promise<T> { ): Promise<T> {
// JSON-bodied request — Content-Type is JSON. Auth pair comes from // SaaS cross-origin shape:
// the shared helper; see its doc comment for the SaaS-shape rationale. // - X-Molecule-Org-Slug: derived from window.location.hostname by
const headers: Record<string, string> = { // getTenantSlug(). Control plane uses it for fly-replay routing.
"Content-Type": "application/json", // Empty on localhost / non-tenant hosts — safe to omit.
...platformAuthHeaders(), // - credentials:"include": sends the session cookie cross-origin.
}; // Cookie's Domain=.moleculesai.app attribute + cp's CORS allow this.
// Re-read slug locally for the 401 handler below — `headers` already const headers: Record<string, string> = { "Content-Type": "application/json" };
// has it, but the 401 branch needs the bare value to gate the
// session-probe + redirect logic on tenant context.
const slug = getTenantSlug(); const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const res = await fetch(`${PLATFORM_URL}${path}`, { const res = await fetch(`${PLATFORM_URL}${path}`, {
method, method,

View File

@ -13,7 +13,6 @@ services:
- pgdata:/var/lib/postgresql/data - pgdata:/var/lib/postgresql/data
networks: networks:
- molecule-monorepo-net - molecule-monorepo-net
restart: unless-stopped
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-dev}"] test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-dev}"]
interval: 2s interval: 2s
@ -51,7 +50,6 @@ services:
- redisdata:/data - redisdata:/data
networks: networks:
- molecule-monorepo-net - molecule-monorepo-net
restart: unless-stopped
healthcheck: healthcheck:
test: ["CMD", "redis-cli", "ping"] test: ["CMD", "redis-cli", "ping"]
interval: 2s interval: 2s
@ -128,10 +126,6 @@ services:
REDIS_URL: redis://redis:6379 REDIS_URL: redis://redis:6379
PORT: "${PLATFORM_PORT:-8080}" PORT: "${PLATFORM_PORT:-8080}"
PLATFORM_URL: "http://platform:${PLATFORM_PORT:-8080}" PLATFORM_URL: "http://platform:${PLATFORM_PORT:-8080}"
# Container network namespace is already isolated; "all interfaces"
# inside the container = the bridge interface only. The fail-open
# default (127.0.0.1) would block host-to-container access.
BIND_ADDR: "${BIND_ADDR:-0.0.0.0}"
# Default MOLECULE_ENV=development so the WorkspaceAuth / AdminAuth # Default MOLECULE_ENV=development so the WorkspaceAuth / AdminAuth
# middleware fail-open path activates when ADMIN_TOKEN is unset — # middleware fail-open path activates when ADMIN_TOKEN is unset —
# otherwise the canvas (which runs without a bearer in pure local # otherwise the canvas (which runs without a bearer in pure local
@ -201,28 +195,12 @@ services:
# App private key — read-only bind-mount. The host-side path is # App private key — read-only bind-mount. The host-side path is
# gitignored per .gitignore rules (/.secrets/ + *.pem). # gitignored per .gitignore rules (/.secrets/ + *.pem).
- ./.secrets/github-app.pem:/secrets/github-app.pem:ro - ./.secrets/github-app.pem:/secrets/github-app.pem:ro
# Per-role persona credentials (molecule-core#242 local surface).
# Sourced at workspace creation time by org_import.go::loadPersonaEnvFile
# when a workspace.yaml carries `role: <name>`. The host-side dir is
# populated by the operator-host bootstrap kit (28 dev-tree personas);
# /etc/molecule-bootstrap/personas is the in-container path the
# platform expects (matches the prod tenant-EC2 path so the same code
# works in both modes).
#
# Read-only mount — workspace-server only reads, never writes here.
# If the host dir is empty/missing the platform's loadPersonaEnvFile
# silently no-ops per its existing semantics, so this mount is safe
# even on a fresh machine that hasn't run the bootstrap kit yet.
- ${MOLECULE_PERSONA_ROOT_HOST:-${HOME}/.molecule-ai/personas}:/etc/molecule-bootstrap/personas:ro
ports: ports:
- "${PLATFORM_PUBLISH_PORT:-8080}:${PLATFORM_PORT:-8080}" - "${PLATFORM_PUBLISH_PORT:-8080}:${PLATFORM_PORT:-8080}"
networks: networks:
- molecule-monorepo-net - molecule-monorepo-net
restart: unless-stopped
healthcheck: healthcheck:
# Plain GET — `--spider` would issue HEAD, which returns 404 because test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
# /health is registered as GET only.
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
interval: 5s interval: 5s
timeout: 5s timeout: 5s
retries: 10 retries: 10
@ -260,7 +238,7 @@ services:
networks: networks:
- molecule-monorepo-net - molecule-monorepo-net
healthcheck: healthcheck:
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"] test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
interval: 10s interval: 10s
timeout: 5s timeout: 5s
retries: 10 retries: 10

View File

@ -1,5 +1,5 @@
{ {
"_comment": "OSS surface registry — every repo listed here MUST be public on git.moleculesai.app. Layer-3 customer/private templates are NOT registered here; they are handled at provision-time via the per-tenant credential resolver (see internal#102 RFC). 'main' refs are pinned to tags before broad rollout.", "_comment": "Pin refs to release tags for reproducible builds. 'main' is OK while all repos are internal.",
"version": 1, "version": 1,
"plugins": [ "plugins": [
{"name": "browser-automation", "repo": "molecule-ai/molecule-ai-plugin-browser-automation", "ref": "main"}, {"name": "browser-automation", "repo": "molecule-ai/molecule-ai-plugin-browser-automation", "ref": "main"},
@ -40,6 +40,7 @@
{"name": "free-beats-all", "repo": "molecule-ai/molecule-ai-org-template-free-beats-all", "ref": "main"}, {"name": "free-beats-all", "repo": "molecule-ai/molecule-ai-org-template-free-beats-all", "ref": "main"},
{"name": "medo-smoke", "repo": "molecule-ai/molecule-ai-org-template-medo-smoke", "ref": "main"}, {"name": "medo-smoke", "repo": "molecule-ai/molecule-ai-org-template-medo-smoke", "ref": "main"},
{"name": "molecule-worker-gemini", "repo": "molecule-ai/molecule-ai-org-template-molecule-worker-gemini", "ref": "main"}, {"name": "molecule-worker-gemini", "repo": "molecule-ai/molecule-ai-org-template-molecule-worker-gemini", "ref": "main"},
{"name": "reno-stars", "repo": "molecule-ai/molecule-ai-org-template-reno-stars", "ref": "main"},
{"name": "ux-ab-lab", "repo": "molecule-ai/molecule-ai-org-template-ux-ab-lab", "ref": "main"}, {"name": "ux-ab-lab", "repo": "molecule-ai/molecule-ai-org-template-ux-ab-lab", "ref": "main"},
{"name": "mock-bigorg", "repo": "molecule-ai/molecule-ai-org-template-mock-bigorg", "ref": "main"} {"name": "mock-bigorg", "repo": "molecule-ai/molecule-ai-org-template-mock-bigorg", "ref": "main"}
] ]

View File

@ -8,24 +8,27 @@
# Requires: git, jq (lighter than python3 — ~2MB vs ~50MB in Alpine) # Requires: git, jq (lighter than python3 — ~2MB vs ~50MB in Alpine)
# #
# Auth (optional): # Auth (optional):
# Post-2026-05-08 (#192): every repo in manifest.json is public on # When MOLECULE_GITEA_TOKEN is set, embed it as the basic-auth password so
# git.moleculesai.app. Anonymous clone works for the entire registered # private Gitea repos clone successfully. When unset, clone anonymously
# set. The OSS-surface contract is recorded in manifest.json's _comment # (works only for repos that are public on git.moleculesai.app).
# — Layer-3 customer/private templates (e.g. reno-stars) are NOT in the
# manifest; they are handled at provision-time via the per-tenant
# credential resolver (internal#102 RFC).
# #
# MOLECULE_GITEA_TOKEN is therefore optional today. Kept supported for # This is the path the publish-workspace-server-image.yml workflow uses:
# two reasons: (a) historical CI configs that still inject # it injects AUTO_SYNC_TOKEN (devops-engineer persona PAT, repo:read on
# AUTO_SYNC_TOKEN remain harmless, (b) reserved for the case where a # the molecule-ai org) so the in-CI pre-clone step succeeds for ALL
# private internal-only template is later registered via a ci-readonly # manifest entries — including the 5 private workspace-template-* repos
# team grant — review must explicitly sign off on that, since it # (codex, crewai, deepagents, gemini-cli, langgraph) and all 7
# violates the public-OSS-surface contract. # org-template-* repos.
# #
# The token (when set) never enters the Docker image: this script runs # The token never enters the Docker image: this script runs in the
# in the trusted CI context BEFORE `docker buildx build`, populates # trusted CI context BEFORE `docker buildx build`, populates
# .tenant-bundle-deps/, then `Dockerfile.tenant` COPYs from there with # .tenant-bundle-deps/, then `Dockerfile.tenant` COPYs from there with
# the .git directories already stripped (see line ~67 below). # the .git directories already stripped (see line ~67 below).
#
# For backward compatibility — and so a fresh clone works without
# secrets when (eventually) the workspace-template-* repos flip public —
# the unset path remains a plain anonymous HTTPS clone. That path will
# FAIL with "could not read Username" on private repos today; CI MUST
# set MOLECULE_GITEA_TOKEN.
set -euo pipefail set -euo pipefail

View File

@ -1,5 +1,2 @@
# The compiled binary, not the cmd/server package. # The compiled binary, not the cmd/server package.
/server /server
# air live-reload build cache (Dockerfile.dev + docker-compose.dev.yml).
/tmp/

View File

@ -15,14 +15,8 @@
FROM golang:1.25-alpine FROM golang:1.25-alpine
# air + git (for go mod) + ca-certs (for TLS) + tzdata (for time-zone DB) # air + git (for go mod) + ca-certs (for TLS) + tzdata (for time-zone DB).
# + docker-cli + docker-cli-buildx so the platform binary can shell out to RUN apk add --no-cache git ca-certificates tzdata wget \
# /var/run/docker.sock (bind-mounted from host) for local-build provisioning.
# docker-cli alone is insufficient: alpine's docker-cli enables BuildKit by
# default but ships without buildx, producing
# `ERROR: BuildKit is enabled but the buildx component is missing or broken`
# on every `docker build`. docker-cli-buildx provides the buildx subcommand.
RUN apk add --no-cache git ca-certificates tzdata wget docker-cli docker-cli-buildx \
&& go install github.com/air-verse/air@latest && go install github.com/air-verse/air@latest
WORKDIR /app/workspace-server WORKDIR /app/workspace-server
@ -37,7 +31,7 @@ RUN go mod download
# block) so the Dockerfile doesn't need to COPY it. air watches the # block) so the Dockerfile doesn't need to COPY it. air watches the
# bind-mounted dir for changes. # bind-mounted dir for changes.
ENV CGO_ENABLED=0 ENV CGO_ENABLED=1
ENV GOFLAGS="-buildvcs=false" ENV GOFLAGS="-buildvcs=false"
# Run air with the .air.toml in the bind-mounted source dir. # Run air with the .air.toml in the bind-mounted source dir.

View File

@ -26,14 +26,6 @@ func TestExtended_WorkspaceDelete(t *testing.T) {
WithArgs(wsDelID). WithArgs(wsDelID).
WillReturnRows(sqlmock.NewRows([]string{"id", "name"})) WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
// CascadeDelete walks descendants unconditionally (the 0-children
// optimization in the old inline path was dropped during the
// CascadeDelete extraction — descendant CTE returns 0 rows here,
// same end state, one extra cheap query).
mock.ExpectQuery("WITH RECURSIVE descendants").
WithArgs(wsDelID).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
// #73: batch UPDATE happens BEFORE any container teardown. // #73: batch UPDATE happens BEFORE any container teardown.
// Uses ANY($1::uuid[]) even with a single ID for consistency. // Uses ANY($1::uuid[]) even with a single ID for consistency.
mock.ExpectExec("UPDATE workspaces SET status ="). mock.ExpectExec("UPDATE workspaces SET status =").

View File

@ -13,15 +13,12 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/channels" "github.com/Molecule-AI/molecule-monorepo/platform/internal/channels"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner" "github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/lib/pq"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
@ -425,16 +422,6 @@ type OrgWorkspace struct {
Tier int `yaml:"tier" json:"tier"` Tier int `yaml:"tier" json:"tier"`
Template string `yaml:"template" json:"template"` Template string `yaml:"template" json:"template"`
FilesDir string `yaml:"files_dir" json:"files_dir"` FilesDir string `yaml:"files_dir" json:"files_dir"`
// Spawning gates whether this workspace (AND its descendants) gets
// provisioned during /org/import. Pointer so we can distinguish
// "explicitly set to false" from "unset" (default = spawn). Use case:
// the dev-tree org template declares the full team structure but a
// developer's local machine only has RAM for a subset; setting
// spawning: false on a leaf or a sub-tree root skips that branch
// entirely without editing the canonical template structure.
// Counted in countWorkspaces same as actual; subtree-skip happens
// at provision time in createWorkspaceTree.
Spawning *bool `yaml:"spawning,omitempty" json:"spawning,omitempty"`
// SystemPrompt is an inline override. Normally each role's system-prompt.md // SystemPrompt is an inline override. Normally each role's system-prompt.md
// lives at `<files_dir>/system-prompt.md` and is copied via the files_dir // lives at `<files_dir>/system-prompt.md` and is copied via the files_dir
// template-copy step; inline overrides that path for ad-hoc workspaces. // template-copy step; inline overrides that path for ad-hoc workspaces.
@ -571,19 +558,6 @@ func (h *OrgHandler) Import(c *gin.Context) {
var body struct { var body struct {
Dir string `json:"dir"` // org template directory name Dir string `json:"dir"` // org template directory name
Template OrgTemplate `json:"template"` // or inline template Template OrgTemplate `json:"template"` // or inline template
// Mode controls cleanup behavior of pre-existing workspaces:
// "" / "merge" — additive (default; current behavior).
// Existing workspaces matched by
// (parent_id, name) are skipped; nothing
// outside the new tree is touched.
// "reconcile" — additive + cleanup. After import, any
// online workspace whose name matches an
// imported workspace's name but whose id
// isn't in the import result set is
// cascade-deleted. Catches "previous
// import survived a re-import" zombies
// (the 20:13→21:17 dev-tree case).
Mode string `json:"mode"`
} }
if err := c.ShouldBindJSON(&body); err != nil { if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"}) c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
@ -629,19 +603,6 @@ func (h *OrgHandler) Import(c *gin.Context) {
return return
} }
// Emit started AFTER the YAML is loaded so payload.name carries the
// resolved template name (was: empty when caller passed `dir` instead
// of inline `template`). Pre-parse error paths above return without
// emitting — semantically "we couldn't even start an import" — so
// every started event is guaranteed a paired completed/failed below
// (no orphan started rows in structure_events).
importStart := time.Now()
emitOrgEvent(c.Request.Context(), "org.import.started", map[string]any{
"name": tmpl.Name,
"dir": body.Dir,
"mode": body.Mode,
})
// Required-env preflight — refuses import when any required_env is // Required-env preflight — refuses import when any required_env is
// missing from global_secrets. No bypass: the prior `force: true` // missing from global_secrets. No bypass: the prior `force: true`
// escape hatch was removed (issue #2290) because it was the silent // escape hatch was removed (issue #2290) because it was the silent
@ -747,171 +708,18 @@ func (h *OrgHandler) Import(c *gin.Context) {
} }
} }
// Reconcile mode: prune workspaces present from a previous import that
// share a name with the new tree but are NOT in the new result set.
// Catches the additive-import bug where re-running /org/import with a
// changed tree shape (different parent_id for the same role name) leaves
// the prior workspace online — visible to the canvas, consuming
// containers, and looking like a duplicate. Default mode "" / "merge"
// preserves the old additive behavior.
reconcileRemovedCount := 0
reconcileSkipped := 0
reconcileErrs := []string{}
if body.Mode == "reconcile" && createErr == nil {
ctx := c.Request.Context()
importedNames := []string{}
walkOrgWorkspaceNames(tmpl.Workspaces, &importedNames)
importedIDs := make([]string, 0, len(results))
for _, r := range results {
if id, ok := r["id"].(string); ok && id != "" {
importedIDs = append(importedIDs, id)
}
}
// Empty-set guards: if the import didn't produce any names or any
// IDs, skip — querying with empty arrays would either match
// nothing (harmless) or, worse, match every workspace if a future
// query rewrite drops the IN clause. Belt-and-suspenders.
if len(importedNames) > 0 && len(importedIDs) > 0 {
rows, err := db.DB.QueryContext(ctx, `
SELECT id FROM workspaces
WHERE name = ANY($1::text[])
AND id != ALL($2::uuid[])
AND status != 'removed'
`, pq.Array(importedNames), pq.Array(importedIDs))
if err != nil {
log.Printf("Org import reconcile: orphan query failed: %v", err)
reconcileErrs = append(reconcileErrs, fmt.Sprintf("orphan query: %v", err))
} else {
orphanIDs := []string{}
for rows.Next() {
var orphanID string
if rows.Scan(&orphanID) == nil {
orphanIDs = append(orphanIDs, orphanID)
}
}
rows.Close()
for _, oid := range orphanIDs {
descendantIDs, stopErrs, err := h.workspace.CascadeDelete(ctx, oid)
if err != nil {
log.Printf("Org import reconcile: CascadeDelete(%s) failed: %v", oid, err)
reconcileErrs = append(reconcileErrs, fmt.Sprintf("delete %s: %v", oid, err))
reconcileSkipped++
continue
}
reconcileRemovedCount += 1 + len(descendantIDs)
if len(stopErrs) > 0 {
log.Printf("Org import reconcile: %s had %d stop errors (orphan sweeper will retry)", oid, len(stopErrs))
}
}
log.Printf("Org import reconcile: %d orphans removed (%d cascade descendants), %d skipped", len(orphanIDs), reconcileRemovedCount-len(orphanIDs), reconcileSkipped)
}
}
}
status := http.StatusCreated status := http.StatusCreated
resp := gin.H{ resp := gin.H{
"org": tmpl.Name, "org": tmpl.Name,
"workspaces": results, "workspaces": results,
"count": len(results), "count": len(results),
} }
if body.Mode == "reconcile" {
resp["mode"] = "reconcile"
resp["reconcile_removed_count"] = reconcileRemovedCount
if len(reconcileErrs) > 0 {
resp["reconcile_errors"] = reconcileErrs
}
}
if createErr != nil { if createErr != nil {
status = http.StatusMultiStatus status = http.StatusMultiStatus
resp["error"] = createErr.Error() resp["error"] = createErr.Error()
} }
// results contains both freshly-created AND lookupExistingChild skips log.Printf("Org import: %s — %d workspaces created", tmpl.Name, len(results))
// (entries with "skipped":true). Splitting the count here so the audit
// row reflects "what changed" vs "what was already there" — telemetry
// readers shouldn't need to grep stdout to tell an idempotent re-run
// apart from a fresh-create.
createdCount, skippedCount := 0, 0
for _, r := range results {
if skipped, _ := r["skipped"].(bool); skipped {
skippedCount++
} else {
createdCount++
}
}
log.Printf("Org import: %s — %d created, %d skipped, %d reconciled",
tmpl.Name, createdCount, skippedCount, reconcileRemovedCount)
emitOrgEvent(c.Request.Context(), "org.import.completed", map[string]any{
"name": tmpl.Name,
"dir": body.Dir,
"mode": body.Mode,
"created_count": createdCount,
"skipped_count": skippedCount,
"reconcile_removed_count": reconcileRemovedCount,
"reconcile_errors": len(reconcileErrs),
"duration_ms": time.Since(importStart).Milliseconds(),
"create_error": errString(createErr),
})
c.JSON(status, resp) c.JSON(status, resp)
} }
// walkOrgWorkspaceNames collects every Name in the tree (in any order) into
// names. Used by reconcile to detect orphan workspaces — workspaces with the
// same role name as a freshly-imported one but a different id, surviving from
// a prior import.
func walkOrgWorkspaceNames(workspaces []OrgWorkspace, names *[]string) {
for _, w := range workspaces {
// spawning:false subtrees are still part of the imported tree
// from a logical-tree perspective — DON'T skip the recursion,
// or reconcile would orphan the rest of the subtree on every
// re-import where spawning is toggled. Names of skipped
// workspaces remain registered so reconcile won't double-create
// them when spawning flips back to true.
if w.Name != "" {
*names = append(*names, w.Name)
}
walkOrgWorkspaceNames(w.Children, names)
}
}
// emitOrgEvent records an org-lifecycle event in structure_events so the
// import history is queryable independent of stdout log retention. Errors
// are logged and swallowed — never block the request path on telemetry.
//
// Event-type taxonomy (extend by appending; never rename):
//
// org.import.started — handler entered, request body parsed
// org.import.completed — handler exiting (success or partial)
// org.import.failed — handler exiting with an unrecoverable error
//
// payload fields are documented at each call site.
func emitOrgEvent(ctx context.Context, eventType string, payload map[string]any) {
if payload == nil {
payload = map[string]any{}
}
payloadJSON, err := json.Marshal(payload)
if err != nil {
log.Printf("emitOrgEvent: marshal %s payload failed: %v", eventType, err)
return
}
if _, err := db.DB.ExecContext(ctx, `
INSERT INTO structure_events (event_type, payload, created_at)
VALUES ($1, $2, now())
`, eventType, payloadJSON); err != nil {
log.Printf("emitOrgEvent: insert %s failed: %v", eventType, err)
}
}
// errString returns "" for a nil error, err.Error() otherwise. Lets us put
// nullable error strings in event payloads without checking for nil at every
// call site.
func errString(err error) string {
if err == nil {
return ""
}
return err.Error()
}

View File

@ -42,20 +42,6 @@ import (
// straight into the parent's child-coordinate space without doing a // straight into the parent's child-coordinate space without doing a
// canvas-wide absolute-position walk. // canvas-wide absolute-position walk.
func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX, absY, relX, relY float64, defaults OrgDefaults, orgBaseDir string, results *[]map[string]interface{}, provisionSem chan struct{}) error { func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX, absY, relX, relY float64, defaults OrgDefaults, orgBaseDir string, results *[]map[string]interface{}, provisionSem chan struct{}) error {
// spawning: false guard — skip this workspace AND all descendants.
// Pointer-typed so we distinguish "explicitly false" from "unset"
// (unset = default to spawn). The guard sits BEFORE any side effect
// (no DB row, no docker provision, no children recursion) so a
// false-spawning subtree is genuinely a no-op except for the log line.
// Use case: dev-tree org template ships the full role taxonomy but a
// developer's machine only has RAM for a subset; a per-workspace
// `spawning: false` lets them narrow without editing the parent
// template's structure.
if ws.Spawning != nil && !*ws.Spawning {
log.Printf("Org import: skipping workspace %q (spawning=false; %d descendant workspace(s) in subtree also skipped)", ws.Name, countWorkspaces(ws.Children))
return nil
}
// Apply defaults // Apply defaults
runtime := ws.Runtime runtime := ws.Runtime
if runtime == "" { if runtime == "" {
@ -467,25 +453,8 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX
envVars := map[string]string{} envVars := map[string]string{}
// 0. Persona env (lowest precedence; injects the role's Gitea identity: // 0. Persona env (lowest precedence; injects the role's Gitea identity:
// GITEA_USER, GITEA_TOKEN, GITEA_TOKEN_SCOPES, GITEA_USER_EMAIL, // GITEA_USER, GITEA_TOKEN, GITEA_TOKEN_SCOPES, GITEA_USER_EMAIL,
// GITEA_SSH_KEY_PATH, plus MODEL_PROVIDER/MODEL and the LLM auth // GITEA_SSH_KEY_PATH). Workspace and org .env can override.
// token like CLAUDE_CODE_OAUTH_TOKEN or MINIMAX_API_KEY). loadPersonaEnvFile(ws.Role, envVars)
// Workspace and org .env can override.
//
// Use ws.FilesDir as the persona-dir lookup key, NOT ws.Role. In the
// dev-tree org.yaml shape, `role:` carries the multi-line descriptive
// text the agent reads from its prompt ("Engineering planning and
// team coordination — leads Core Platform, Controlplane, ..."), while
// `files_dir:` holds the short slug (`core-lead`, `dev-lead`, etc.)
// matching `~/.molecule-ai/personas/<files_dir>/env`
// (bind-mounted to `/etc/molecule-bootstrap/personas/<files_dir>/env`).
//
// Pre-fix, this passed `ws.Role` whose multi-word content failed
// isSafeRoleName silently, so every imported workspace booted with
// zero persona-env rows in workspace_secrets — no ANTHROPIC /
// CLAUDE_CODE auth in the container env. The claude_agent_sdk
// then wedged on `query.initialize()` with a 60s control-request
// timeout (caught 2026-05-08 right after dev-only org/import).
loadPersonaEnvFile(ws.FilesDir, envVars)
if orgBaseDir != "" { if orgBaseDir != "" {
// 1. Org root .env (shared defaults) // 1. Org root .env (shared defaults)
parseEnvFile(filepath.Join(orgBaseDir, ".env"), envVars) parseEnvFile(filepath.Join(orgBaseDir, ".env"), envVars)

View File

@ -1,158 +0,0 @@
package handlers
import (
"context"
"sort"
"testing"
"github.com/DATA-DOG/go-sqlmock"
)
// Tests for the reconcile-mode + audit-event additions to OrgHandler.Import.
//
// Background: /org/import was purely additive — re-running with a tree that
// renamed/reparented a role left the prior workspace online (different
// parent_id from the new one, so lookupExistingChild's parent-scoped dedupe
// missed it). The 2026-05-08 dev-tree case left 8 orphans surviving a
// re-import. mode="reconcile" closes the gap; emitOrgEvent makes "what
// happened at 20:13?" queryable instead of stdout-grep archaeology.
func TestWalkOrgWorkspaceNames_FlatTree(t *testing.T) {
tree := []OrgWorkspace{
{Name: "Dev Lead"},
{Name: "Release Manager"},
}
var names []string
walkOrgWorkspaceNames(tree, &names)
sort.Strings(names)
want := []string{"Dev Lead", "Release Manager"}
if !equalStrings(names, want) {
t.Errorf("flat tree: got %v, want %v", names, want)
}
}
func TestWalkOrgWorkspaceNames_NestedTree(t *testing.T) {
tree := []OrgWorkspace{
{
Name: "Dev Lead",
Children: []OrgWorkspace{
{Name: "Core Platform Lead", Children: []OrgWorkspace{{Name: "Core-BE"}}},
{Name: "SDK Lead"},
},
},
}
var names []string
walkOrgWorkspaceNames(tree, &names)
sort.Strings(names)
want := []string{"Core Platform Lead", "Core-BE", "Dev Lead", "SDK Lead"}
if !equalStrings(names, want) {
t.Errorf("nested tree: got %v, want %v", names, want)
}
}
// Pins the contract that spawning:false subtrees still contribute their names
// to the reconcile working set. If the walker started skipping them, a
// re-import that toggled spawning would orphan whichever workspaces had been
// previously imported with spawning:true — the inverse of the bug being
// fixed. Spawning gates *provisioning*, not *reconcile membership*.
func TestWalkOrgWorkspaceNames_SpawningFalseStillCounted(t *testing.T) {
f := false
tree := []OrgWorkspace{
{Name: "Dev Lead", Children: []OrgWorkspace{
{Name: "Skipped Lead", Spawning: &f, Children: []OrgWorkspace{
{Name: "Skipped Child"},
}},
}},
}
var names []string
walkOrgWorkspaceNames(tree, &names)
sort.Strings(names)
want := []string{"Dev Lead", "Skipped Child", "Skipped Lead"}
if !equalStrings(names, want) {
t.Errorf("spawning:false subtree: got %v, want %v", names, want)
}
}
func TestWalkOrgWorkspaceNames_EmptyNamesSkipped(t *testing.T) {
tree := []OrgWorkspace{
{Name: "Dev Lead"},
{Name: ""}, // YAML default / placeholder
{Name: "Release Manager"},
}
var names []string
walkOrgWorkspaceNames(tree, &names)
sort.Strings(names)
want := []string{"Dev Lead", "Release Manager"}
if !equalStrings(names, want) {
t.Errorf("empty-name skip: got %v, want %v", names, want)
}
}
// emitOrgEvent must INSERT into structure_events with event_type + JSON
// payload. Verifies the SQL shape pinning so a future schema rename
// (e.g., switching to audit_events) breaks the test loudly instead of
// silently dropping telemetry.
func TestEmitOrgEvent_InsertsToStructureEvents(t *testing.T) {
mock := setupTestDB(t)
mock.ExpectExec(`INSERT INTO structure_events`).
WithArgs("org.import.started", sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(1, 1))
emitOrgEvent(context.Background(), "org.import.started", map[string]any{
"name": "test-org",
"mode": "reconcile",
})
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
// Insert failures are log-and-swallow — telemetry MUST NOT block the
// caller path. If this regresses (e.g., a future patch returns the err),
// org-import requests would fail with HTTP 500 every time a structure_events
// INSERT hiccups, which is strictly worse than losing the row.
func TestEmitOrgEvent_DBErrorIsSwallowed(t *testing.T) {
mock := setupTestDB(t)
mock.ExpectExec(`INSERT INTO structure_events`).
WithArgs("org.import.failed", sqlmock.AnyArg()).
WillReturnError(errSentinelTest)
// Must not panic; must not propagate. The function returns nothing,
// so the contract is "doesn't crash."
emitOrgEvent(context.Background(), "org.import.failed", map[string]any{
"err": "preflight failed",
})
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestErrString(t *testing.T) {
if got := errString(nil); got != "" {
t.Errorf("nil error: got %q, want empty", got)
}
if got := errString(errSentinelTest); got != "sentinel" {
t.Errorf("sentinel error: got %q, want \"sentinel\"", got)
}
}
// errSentinelTest is a marker error used for swallow-error assertions.
var errSentinelTest = sentinelErrTest{}
type sentinelErrTest struct{}
func (sentinelErrTest) Error() string { return "sentinel" }
func equalStrings(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}

View File

@ -323,19 +323,161 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
return return
} }
// Delegate the cascade to CascadeDelete so the HTTP path and the // Cascade delete: collect ALL descendants (not just direct children) via
// OrgImport reconcile path share one teardown sequence (#73 race // recursive CTE, then stop each container and remove each volume.
// guard, container stop, volume removal, token revocation, schedule // Previous bug: only direct children's containers were stopped, leaving
// disable, broadcast). The HTTP-specific bits — direct-children 409 // grandchildren as orphan running containers after a cascade delete.
// gate above, ?purge=true hard-delete below, response shaping — descendantIDs := []string{}
// stay in this handler. if len(children) > 0 {
descendantIDs, stopErrs, err := h.CascadeDelete(ctx, id) descRows, err := db.DB.QueryContext(ctx, `
if err != nil { WITH RECURSIVE descendants AS (
log.Printf("Delete: CascadeDelete(%s) failed: %v", id, err) SELECT id FROM workspaces WHERE parent_id = $1 AND status != 'removed'
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) UNION ALL
return SELECT w.id FROM workspaces w JOIN descendants d ON w.parent_id = d.id WHERE w.status != 'removed'
)
SELECT id FROM descendants
`, id)
if err != nil {
log.Printf("Delete: descendant query error for %s: %v", id, err)
} else {
for descRows.Next() {
var descID string
if descRows.Scan(&descID) == nil {
descendantIDs = append(descendantIDs, descID)
}
}
descRows.Close()
}
} }
// #73 fix: mark rows 'removed' in the DB FIRST, BEFORE stopping containers
// or removing volumes. Previously the sequence was stop → update-status,
// which left a gap where:
// - the container's last pre-teardown heartbeat could resurrect the row
// via the register-handler UPSERT (now also guarded in #73)
// - the liveness monitor could observe 'online' status + expired Redis
// TTL and trigger RestartByID, recreating a container we're trying
// to destroy
// Marking 'removed' first makes both of those paths no-op via their
// existing `status NOT IN ('removed', ...)` guards.
allIDs := append([]string{id}, descendantIDs...) allIDs := append([]string{id}, descendantIDs...)
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = ANY($2::uuid[])`,
models.StatusRemoved, pq.Array(allIDs)); err != nil {
log.Printf("Delete status update error for %s: %v", id, err)
}
if _, err := db.DB.ExecContext(ctx,
`DELETE FROM canvas_layouts WHERE workspace_id = ANY($1::uuid[])`,
pq.Array(allIDs)); err != nil {
log.Printf("Delete canvas_layouts error for %s: %v", id, err)
}
// Revoke all auth tokens for the deleted workspaces. Once the workspace is
// gone its tokens are meaningless; leaving them alive would keep
// HasAnyLiveTokenGlobal = true even after the platform is otherwise empty,
// which prevents AdminAuth from returning to fail-open and breaks the E2E
// test's count-zero assertion (and local re-run cleanup).
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspace_auth_tokens SET revoked_at = now()
WHERE workspace_id = ANY($1::uuid[]) AND revoked_at IS NULL`,
pq.Array(allIDs)); err != nil {
log.Printf("Delete token revocation error for %s: %v", id, err)
}
// #1027: cascade-disable all schedules for the deleted workspaces so
// the scheduler never fires a cron into a removed container.
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspace_schedules SET enabled = false, updated_at = now()
WHERE workspace_id = ANY($1::uuid[]) AND enabled = true`,
pq.Array(allIDs)); err != nil {
log.Printf("Delete schedule disable error for %s: %v", id, err)
}
// Now stop containers + remove volumes for all descendants (any depth).
// Any concurrent heartbeat / registration / liveness-triggered restart
// will see status='removed' and bail out early.
//
// Combines two concerns:
//
// 1. Detach cleanup from the request ctx via WithoutCancel + a 30s
// timeout, so when the canvas's `api.del` resolves on our 200
// (and gin cancels c.Request.Context()), in-flight Docker
// stop/remove calls don't get cancelled mid-operation. The
// previous shape leaked containers every time the canvas hung
// up promptly: Stop returned "context canceled", the container
// stayed up, and the next RemoveVolume failed with
// "volume in use". 30s is generous for Docker daemon round-
// trips (typical: <2s) and bounds a stuck daemon.
//
// 2. #1843: aggregate Stop() failures into stopErrs so the
// post-deletion block surfaces them as 500. On the CP/EC2
// backend, Stop() calls control plane's DELETE endpoint to
// terminate the EC2; if that errors (transient 5xx, network),
// the EC2 stays running with no DB row to track it (the
// "orphan EC2 on a 0-customer account" scenario). Loud-fail
// instead of silent-leak — clients retry, Stop's instance_id
// lookup is idempotent against status='removed'. RemoveVolume
// errors stay log-and-continue (local cleanup, not infra-leak).
cleanupCtx, cleanupCancel := context.WithTimeout(
context.WithoutCancel(ctx), 30*time.Second)
defer cleanupCancel()
var stopErrs []error
stopAndRemove := func(wsID string) {
// Stop the workload first via the backend dispatcher (CP for
// SaaS, Docker for self-hosted). Pre-2026-05-05 this gate was
// `if h.provisioner == nil { return }` — early-returning on
// every SaaS tenant left the EC2 running with no DB row to
// track it (issue #2814; the comment below claimed "loud-fail
// instead of silent-leak" but the early-return made it the
// silent path on SaaS).
//
// Check Stop's error before any volume cleanup — the previous
// code discarded it and immediately tried RemoveVolume, which
// always fails with "volume in use" when Stop didn't actually
// kill the container. The orphan sweeper
// (registry/orphan_sweeper.go) catches what we skip here on
// the next reconcile pass.
if err := h.StopWorkspaceAuto(cleanupCtx, wsID); err != nil {
log.Printf("Delete %s stop failed: %v — leaving cleanup for orphan sweeper", wsID, err)
stopErrs = append(stopErrs, fmt.Errorf("stop %s: %w", wsID, err))
return
}
// Volume cleanup is Docker-only — CP-managed workspaces have
// no host-bind volumes to remove. Skip silently when no Docker
// provisioner is wired (the SaaS path already terminated the
// EC2 above; nothing left to do).
if h.provisioner != nil {
if err := h.provisioner.RemoveVolume(cleanupCtx, wsID); err != nil {
log.Printf("Delete %s volume removal warning: %v", wsID, err)
}
}
}
for _, descID := range descendantIDs {
stopAndRemove(descID)
db.ClearWorkspaceKeys(cleanupCtx, descID)
// #2269: drop the per-workspace restartState entry so it
// doesn't accumulate across the platform's lifetime. The
// LoadOrStore that creates the entry (workspace_restart.go)
// has no companion remove path; without this Delete, every
// short-lived workspace leaks ~16 bytes forever.
restartStates.Delete(descID)
// Detach broadcaster ctx for the same reason as the cleanup
// above — RecordAndBroadcast does an INSERT INTO
// structure_events + Redis Publish. If the canvas hangs up,
// a request-ctx-bound INSERT can be cancelled mid-write,
// leaving other WS clients ignorant of the cascade. The DB
// row is already 'removed' so it's recoverable, but the
// inconsistency is avoidable.
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), descID, map[string]interface{}{})
}
stopAndRemove(id)
db.ClearWorkspaceKeys(cleanupCtx, id)
restartStates.Delete(id) // #2269: same as descendants above
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), id, map[string]interface{}{
"cascade_deleted": len(descendantIDs),
})
// If any Stop call failed, surface 500 so the client retries. The DB // If any Stop call failed, surface 500 so the client retries. The DB
// row is already 'removed' (idempotent), and Stop's instance_id // row is already 'removed' (idempotent), and Stop's instance_id
@ -401,104 +543,6 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"status": "removed", "cascade_deleted": len(descendantIDs)}) c.JSON(http.StatusOK, gin.H{"status": "removed", "cascade_deleted": len(descendantIDs)})
} }
// CascadeDelete performs the cascade-removal sequence used by the HTTP
// DELETE handler and by OrgImport's reconcile mode: walk descendants, mark
// self+descendants 'removed' first (#73 race guard), stop containers / EC2s,
// remove volumes, revoke tokens, disable schedules, broadcast events.
//
// Idempotent against already-removed rows (the descendant CTE and all UPDATE
// guards skip status='removed'). Returns the descendant id list so the HTTP
// caller can drive the optional `?purge=true` hard-delete path against the
// same set the cascade just touched, plus any per-workspace stop errors so
// callers can surface a retryable failure instead of a silent-leak.
//
// Caller is responsible for the children-confirmation gate (the HTTP handler
// returns 409 when children exist + ?confirm=true is missing); this helper
// always cascades.
func (h *WorkspaceHandler) CascadeDelete(ctx context.Context, id string) ([]string, []error, error) {
if err := validateWorkspaceID(id); err != nil {
return nil, nil, err
}
descendantIDs := []string{}
descRows, err := db.DB.QueryContext(ctx, `
WITH RECURSIVE descendants AS (
SELECT id FROM workspaces WHERE parent_id = $1 AND status != 'removed'
UNION ALL
SELECT w.id FROM workspaces w JOIN descendants d ON w.parent_id = d.id WHERE w.status != 'removed'
)
SELECT id FROM descendants
`, id)
if err != nil {
return nil, nil, fmt.Errorf("descendant query: %w", err)
}
for descRows.Next() {
var descID string
if descRows.Scan(&descID) == nil {
descendantIDs = append(descendantIDs, descID)
}
}
descRows.Close()
allIDs := append([]string{id}, descendantIDs...)
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = ANY($2::uuid[])`,
models.StatusRemoved, pq.Array(allIDs)); err != nil {
log.Printf("CascadeDelete status update for %s: %v", id, err)
}
if _, err := db.DB.ExecContext(ctx,
`DELETE FROM canvas_layouts WHERE workspace_id = ANY($1::uuid[])`,
pq.Array(allIDs)); err != nil {
log.Printf("CascadeDelete canvas_layouts for %s: %v", id, err)
}
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspace_auth_tokens SET revoked_at = now()
WHERE workspace_id = ANY($1::uuid[]) AND revoked_at IS NULL`,
pq.Array(allIDs)); err != nil {
log.Printf("CascadeDelete token revocation for %s: %v", id, err)
}
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspace_schedules SET enabled = false, updated_at = now()
WHERE workspace_id = ANY($1::uuid[]) AND enabled = true`,
pq.Array(allIDs)); err != nil {
log.Printf("CascadeDelete schedule disable for %s: %v", id, err)
}
cleanupCtx, cleanupCancel := context.WithTimeout(
context.WithoutCancel(ctx), 30*time.Second)
defer cleanupCancel()
var stopErrs []error
stopAndRemove := func(wsID string) {
if err := h.StopWorkspaceAuto(cleanupCtx, wsID); err != nil {
log.Printf("CascadeDelete %s stop failed: %v — leaving cleanup for orphan sweeper", wsID, err)
stopErrs = append(stopErrs, fmt.Errorf("stop %s: %w", wsID, err))
return
}
if h.provisioner != nil {
if err := h.provisioner.RemoveVolume(cleanupCtx, wsID); err != nil {
log.Printf("CascadeDelete %s volume removal warning: %v", wsID, err)
}
}
}
for _, descID := range descendantIDs {
stopAndRemove(descID)
db.ClearWorkspaceKeys(cleanupCtx, descID)
restartStates.Delete(descID)
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), descID, map[string]interface{}{})
}
stopAndRemove(id)
db.ClearWorkspaceKeys(cleanupCtx, id)
restartStates.Delete(id)
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), id, map[string]interface{}{
"cascade_deleted": len(descendantIDs),
})
return descendantIDs, stopErrs, nil
}
// validateWorkspaceID returns an error when id is not a valid UUID. // validateWorkspaceID returns an error when id is not a valid UUID.
// #687: prevents 500s from Postgres when a garbage string (e.g. ../../etc/passwd) // #687: prevents 500s from Postgres when a garbage string (e.g. ../../etc/passwd)
// is passed as the :id path parameter. // is passed as the :id path parameter.

View File

@ -715,30 +715,14 @@ func deriveProviderFromModelSlug(model string) string {
// payload.Model at boot), this is a no-op — no harm in the switch // payload.Model at boot), this is a no-op — no harm in the switch
// being empty for those cases. // being empty for those cases.
func applyRuntimeModelEnv(envVars map[string]string, runtime, model string) { func applyRuntimeModelEnv(envVars map[string]string, runtime, model string) {
// Resolution order (priority high → low): // Fall back to the MODEL_PROVIDER workspace secret when the caller
// 1. payload.Model (caller passed the canvas-picked model id verbatim) // didn't pass one explicitly. This is the path that "Save+Restart"
// 2. envVars["MODEL"] (workspace_secret persisted by /org/import via // hits — Restart builds its payload from the workspaces row (no model
// the persona env file — MODEL=MiniMax-M2.7-highspeed etc.) // column there) so payload.Model is always empty, but the user's
// 3. envVars["MODEL_PROVIDER"] (legacy: this secret was historically a // canvas selection was stored as MODEL_PROVIDER via PUT /model and
// *model id* set by canvas Save+Restart's PUT /model; on the // is already loaded into envVars here. Without this fallback hermes
// post-2026-05-08 persona-env convention it's a *provider slug* // silently boots with the template default and errors "No LLM
// (e.g. "minimax") which is NOT a valid model id, so this fallback // provider configured" even though the user picked a valid model.
// only fires when MODEL is absent.)
//
// Pre-fix bug: this function unconditionally OVERWROTE envVars["MODEL"]
// with the MODEL_PROVIDER slug (when payload.Model was empty), wiping
// the operator's explicit per-persona MODEL secret on every restart.
// Symptom: a workspace whose persona env said
// MODEL=MiniMax-M2.7-highspeed booted fine on first /org/import (the
// envVars map was populated direct from the env file), then on the
// next Restart the workspace_secrets-derived MODEL got clobbered by
// MODEL_PROVIDER="minimax" — the literal slug, not a valid model id —
// and the workspace template's adapter routed to providers[0]
// (anthropic-oauth) and wedged at SDK initialize. Caught 2026-05-08
// during Phase 4 verification of template-claude-code PR #9.
if model == "" {
model = envVars["MODEL"]
}
if model == "" { if model == "" {
model = envVars["MODEL_PROVIDER"] model = envVars["MODEL_PROVIDER"]
} }

View File

@ -724,68 +724,3 @@ func TestApplyRuntimeModelEnv_SetsUniversalMODELForAllRuntimes(t *testing.T) {
}) })
} }
} }
// TestApplyRuntimeModelEnv_PersonaEnvMODELSecretPreserved locks in the
// 2026-05-08 fix that prevents the MODEL_PROVIDER-as-slug fallback from
// silently overwriting a per-persona MODEL workspace_secret on restart.
//
// Pre-fix bug recurrence guard: when the persona env file (loaded into
// workspace_secrets at /org/import time) declares both MODEL=<id> and
// MODEL_PROVIDER=<slug>, the restart path used to overwrite envVars["MODEL"]
// with the MODEL_PROVIDER slug because applyRuntimeModelEnv'\''s
// payload.Model fallback consulted MODEL_PROVIDER first. Symptom: dev-tree
// workspaces booted fine on first /org/import, then on next restart the
// model id became literal "minimax" and the workspace template'\''s adapter
// failed to match any registry prefix, fell through to anthropic-oauth,
// and wedged at SDK initialize. Caught during Phase 4 verification of
// template-claude-code PR #9.
func TestApplyRuntimeModelEnv_PersonaEnvMODELSecretPreserved(t *testing.T) {
cases := []struct {
name string
envMODEL string
envMP string
wantMODEL string
}{
{
name: "MODEL secret wins over MODEL_PROVIDER slug (persona-env shape on restart)",
envMODEL: "MiniMax-M2.7-highspeed",
envMP: "minimax",
wantMODEL: "MiniMax-M2.7-highspeed",
},
{
name: "MODEL secret wins even when same as MODEL_PROVIDER",
envMODEL: "opus",
envMP: "claude-code",
wantMODEL: "opus",
},
{
name: "MODEL absent → fall back to MODEL_PROVIDER (legacy canvas Save+Restart shape)",
envMODEL: "",
envMP: "MiniMax-M2.7",
wantMODEL: "MiniMax-M2.7",
},
{
name: "Both absent → no MODEL set",
envMODEL: "",
envMP: "",
wantMODEL: "",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
envVars := map[string]string{}
if tc.envMODEL != "" {
envVars["MODEL"] = tc.envMODEL
}
if tc.envMP != "" {
envVars["MODEL_PROVIDER"] = tc.envMP
}
// payload.Model is empty (the restart case)
applyRuntimeModelEnv(envVars, "claude-code", "")
if got := envVars["MODEL"]; got != tc.wantMODEL {
t.Errorf("MODEL = %q, want %q (envMODEL=%q envMP=%q)",
got, tc.wantMODEL, tc.envMODEL, tc.envMP)
}
})
}
}

View File

@ -813,12 +813,6 @@ func TestWorkspaceDelete_DisablesSchedules(t *testing.T) {
WithArgs(wsID). WithArgs(wsID).
WillReturnRows(sqlmock.NewRows([]string{"id", "name"})) WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
// CascadeDelete walks descendants unconditionally — 0-children case
// returns 0 rows here.
mock.ExpectQuery("WITH RECURSIVE descendants").
WithArgs(wsID).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
// Mark workspace as removed // Mark workspace as removed
mock.ExpectExec("UPDATE workspaces SET status ="). mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(0, 1)) WillReturnResult(sqlmock.NewResult(0, 1))
@ -941,12 +935,6 @@ func TestWorkspaceDelete_ScheduleDisableOnlyTargetsDeletedWorkspace(t *testing.T
WithArgs(wsA). WithArgs(wsA).
WillReturnRows(sqlmock.NewRows([]string{"id", "name"})) WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
// CascadeDelete walks descendants unconditionally — 0-children case
// returns 0 rows here.
mock.ExpectQuery("WITH RECURSIVE descendants").
WithArgs(wsA).
WillReturnRows(sqlmock.NewRows([]string{"id"}))
// Mark only workspace A as removed // Mark only workspace A as removed
mock.ExpectExec("UPDATE workspaces SET status ="). mock.ExpectExec("UPDATE workspaces SET status =").
WillReturnResult(sqlmock.NewResult(0, 1)) WillReturnResult(sqlmock.NewResult(0, 1))

View File

@ -1,2 +0,0 @@
DROP INDEX IF EXISTS workspaces_update_tier_canary;
ALTER TABLE workspaces DROP COLUMN IF EXISTS update_tier;

View File

@ -1,26 +0,0 @@
-- workspaces.update_tier — canary vs production filter for plugin updates
-- (core#115). Composes with the version-subscription DB foundation
-- (core#113, merged) and the upcoming drift+queue+apply endpoint
-- (core#123).
--
-- Tiers:
-- 'production' (default) — fan-out target; only updated AFTER canary soak
-- 'canary' — early-adopter target; updates land here first
--
-- Default 'production' so existing customers (Reno-Stars + any future
-- live tenant) are default-safe. Synthetic dogfooding workspaces opt
-- INTO 'canary' explicitly.
--
-- The column is just metadata at this layer; the actual filter logic
-- ('apply this update only to canary tier first') lives in the future
-- POST /admin/plugin-updates/:id/apply endpoint (core#123).
ALTER TABLE workspaces
ADD COLUMN IF NOT EXISTS update_tier TEXT NOT NULL DEFAULT 'production'
CHECK (update_tier IN ('canary', 'production'));
-- Partial index for the apply endpoint's canary-tier scan: only
-- index canary rows since the apply path queries them most often
-- and the production set is the much larger default.
CREATE INDEX IF NOT EXISTS workspaces_update_tier_canary
ON workspaces(update_tier) WHERE update_tier = 'canary';