Compare commits
No commits in common. "main" and "runtime-v1.0.0" have entirely different histories.
main
...
runtime-v1
@ -1,118 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# audit-force-merge — detect a §SOP-6 force-merge after PR close, emit
|
||||
# `incident.force_merge` to stdout as structured JSON.
|
||||
#
|
||||
# Vector's docker_logs source picks up runner stdout; the JSON gets
|
||||
# shipped to Loki on molecule-canonical-obs, indexable by event_type.
|
||||
# Query example:
|
||||
#
|
||||
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
|
||||
#
|
||||
# A force-merge is detected when a PR closed-with-merged=true had at
|
||||
# least one of the repo's required-status-check contexts in a state
|
||||
# other than "success" at the merge commit's SHA. That's exactly what
|
||||
# the Gitea force_merge:true API call lets through, so it's a faithful
|
||||
# detector of the override path.
|
||||
#
|
||||
# Triggers on `pull_request_target: closed` (loaded from base branch
|
||||
# per §SOP-6 security model). No-op when merged=false.
|
||||
#
|
||||
# Required env (set by the workflow):
|
||||
# GITEA_TOKEN, GITEA_HOST, REPO, PR_NUMBER, REQUIRED_CHECKS
|
||||
#
|
||||
# REQUIRED_CHECKS is a newline-separated list of status-check context
|
||||
# names that branch protection requires. Declared in the workflow YAML
|
||||
# rather than fetched from /branch_protections (which needs admin
|
||||
# scope — sop-tier-bot has read-only). Trade dynamism for simplicity:
|
||||
# when the required-check set changes, update both branch protection
|
||||
# AND this env. Keeping them in sync is less complexity than granting
|
||||
# the audit bot admin perms on every repo.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
: "${GITEA_TOKEN:?required}"
|
||||
: "${GITEA_HOST:?required}"
|
||||
: "${REPO:?required}"
|
||||
: "${PR_NUMBER:?required}"
|
||||
: "${REQUIRED_CHECKS:?required (newline-separated context names)}"
|
||||
|
||||
OWNER="${REPO%%/*}"
|
||||
NAME="${REPO##*/}"
|
||||
API="https://${GITEA_HOST}/api/v1"
|
||||
AUTH="Authorization: token ${GITEA_TOKEN}"
|
||||
|
||||
# 1. Fetch the PR. If not merged, no-op.
|
||||
PR=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}")
|
||||
MERGED=$(echo "$PR" | jq -r '.merged // false')
|
||||
if [ "$MERGED" != "true" ]; then
|
||||
echo "::notice::PR #${PR_NUMBER} closed without merge — no audit emission."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MERGE_SHA=$(echo "$PR" | jq -r '.merge_commit_sha // empty')
|
||||
MERGED_BY=$(echo "$PR" | jq -r '.merged_by.login // "unknown"')
|
||||
TITLE=$(echo "$PR" | jq -r '.title // ""')
|
||||
BASE_BRANCH=$(echo "$PR" | jq -r '.base.ref // "main"')
|
||||
HEAD_SHA=$(echo "$PR" | jq -r '.head.sha // empty')
|
||||
|
||||
if [ -z "$MERGE_SHA" ]; then
|
||||
echo "::warning::PR #${PR_NUMBER} merged=true but no merge_commit_sha — cannot evaluate force-merge."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 2. Required status checks declared in the workflow env.
|
||||
REQUIRED="$REQUIRED_CHECKS"
|
||||
if [ -z "${REQUIRED//[[:space:]]/}" ]; then
|
||||
echo "::notice::REQUIRED_CHECKS empty — force-merge not applicable."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 3. Status-check state at the PR HEAD (where checks ran). The merge
|
||||
# commit doesn't get its own checks; we evaluate the PR's last
|
||||
# commit, which is what branch protection compared against.
|
||||
STATUS=$(curl -sS -H "$AUTH" \
|
||||
"${API}/repos/${OWNER}/${NAME}/commits/${HEAD_SHA}/status")
|
||||
declare -A CHECK_STATE
|
||||
while IFS=$'\t' read -r ctx state; do
|
||||
[ -n "$ctx" ] && CHECK_STATE[$ctx]="$state"
|
||||
done < <(echo "$STATUS" | jq -r '.statuses // [] | .[] | "\(.context)\t\(.status)"')
|
||||
|
||||
# 4. For each required check, was it green at merge? YAML block scalars
|
||||
# (`|`) leave a trailing newline; skip blank/whitespace-only lines.
|
||||
FAILED_CHECKS=()
|
||||
while IFS= read -r req; do
|
||||
trimmed="${req#"${req%%[![:space:]]*}"}" # ltrim
|
||||
trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" # rtrim
|
||||
[ -z "$trimmed" ] && continue
|
||||
state="${CHECK_STATE[$trimmed]:-missing}"
|
||||
if [ "$state" != "success" ]; then
|
||||
FAILED_CHECKS+=("${trimmed}=${state}")
|
||||
fi
|
||||
done <<< "$REQUIRED"
|
||||
|
||||
if [ "${#FAILED_CHECKS[@]}" -eq 0 ]; then
|
||||
echo "::notice::PR #${PR_NUMBER} merged with all required checks green — not a force-merge."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 5. Emit structured audit event.
|
||||
NOW=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
FAILED_JSON=$(printf '%s\n' "${FAILED_CHECKS[@]}" | jq -R . | jq -s .)
|
||||
|
||||
# Print as a single-line JSON so Vector's parse_json transform can pick
|
||||
# it up cleanly from docker_logs.
|
||||
jq -nc \
|
||||
--arg event_type "incident.force_merge" \
|
||||
--arg ts "$NOW" \
|
||||
--arg repo "$REPO" \
|
||||
--argjson pr "$PR_NUMBER" \
|
||||
--arg title "$TITLE" \
|
||||
--arg base "$BASE_BRANCH" \
|
||||
--arg merged_by "$MERGED_BY" \
|
||||
--arg merge_sha "$MERGE_SHA" \
|
||||
--argjson failed_checks "$FAILED_JSON" \
|
||||
'{event_type: $event_type, ts: $ts, repo: $repo, pr: $pr, title: $title,
|
||||
base_branch: $base, merged_by: $merged_by, merge_sha: $merge_sha,
|
||||
failed_checks: $failed_checks}'
|
||||
|
||||
echo "::warning::FORCE-MERGE detected on PR #${PR_NUMBER} by ${MERGED_BY}: ${#FAILED_CHECKS[@]} required check(s) not green at merge time."
|
||||
@ -1,149 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# sop-tier-check — verify a Gitea PR satisfies the §SOP-6 approval gate.
|
||||
#
|
||||
# Reads the PR's tier label, walks approving reviewers, and checks each
|
||||
# approver's Gitea team membership against the tier's eligible-team set.
|
||||
# Marks pass only when at least one non-author approver is in an eligible
|
||||
# team.
|
||||
#
|
||||
# Invoked from `.gitea/workflows/sop-tier-check.yml`. The workflow sets
|
||||
# the env vars below; this script does no IO outside of stdout/stderr +
|
||||
# the Gitea API.
|
||||
#
|
||||
# Required env:
|
||||
# GITEA_TOKEN — bot PAT with read:organization,read:user,
|
||||
# read:issue,read:repository scopes
|
||||
# GITEA_HOST — e.g. git.moleculesai.app
|
||||
# REPO — owner/name (from github.repository)
|
||||
# PR_NUMBER — int (from github.event.pull_request.number)
|
||||
# PR_AUTHOR — login (from github.event.pull_request.user.login)
|
||||
#
|
||||
# Optional:
|
||||
# SOP_DEBUG=1 — print per-API-call diagnostic lines (HTTP codes,
|
||||
# raw response bodies). Default: off.
|
||||
#
|
||||
# Stale-status caveat: Gitea Actions does not always re-fire workflows
|
||||
# on `labeled` / `pull_request_review:submitted` events. If the
|
||||
# sop-tier-check status is stale (e.g. red after labels/approvals were
|
||||
# added), push an empty commit to the PR branch to force a synchronize
|
||||
# event, OR re-request reviews. Tracked: internal#46.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
debug() {
|
||||
if [ "${SOP_DEBUG:-}" = "1" ]; then
|
||||
echo " [debug] $*" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate env
|
||||
: "${GITEA_TOKEN:?GITEA_TOKEN required}"
|
||||
: "${GITEA_HOST:?GITEA_HOST required}"
|
||||
: "${REPO:?REPO required (owner/name)}"
|
||||
: "${PR_NUMBER:?PR_NUMBER required}"
|
||||
: "${PR_AUTHOR:?PR_AUTHOR required}"
|
||||
|
||||
OWNER="${REPO%%/*}"
|
||||
NAME="${REPO##*/}"
|
||||
API="https://${GITEA_HOST}/api/v1"
|
||||
AUTH="Authorization: token ${GITEA_TOKEN}"
|
||||
echo "::notice::tier-check start: repo=$OWNER/$NAME pr=$PR_NUMBER author=$PR_AUTHOR"
|
||||
|
||||
# Sanity: token resolves to a user
|
||||
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""')
|
||||
if [ -z "$WHOAMI" ]; then
|
||||
echo "::error::GITEA_TOKEN cannot resolve a user via /api/v1/user — check the token scope and that the secret is wired correctly."
|
||||
exit 1
|
||||
fi
|
||||
echo "::notice::token resolves to user: $WHOAMI"
|
||||
|
||||
# 1. Read tier label
|
||||
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name')
|
||||
TIER=""
|
||||
for L in $LABELS; do
|
||||
case "$L" in
|
||||
tier:low|tier:medium|tier:high)
|
||||
if [ -n "$TIER" ]; then
|
||||
echo "::error::Multiple tier labels: $TIER + $L. Apply exactly one."
|
||||
exit 1
|
||||
fi
|
||||
TIER="$L"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ -z "$TIER" ]; then
|
||||
echo "::error::PR has no tier:low|tier:medium|tier:high label. Apply one before merge."
|
||||
exit 1
|
||||
fi
|
||||
debug "tier=$TIER"
|
||||
|
||||
# 2. Tier → eligible teams
|
||||
case "$TIER" in
|
||||
tier:low) ELIGIBLE="engineers managers ceo" ;;
|
||||
tier:medium) ELIGIBLE="managers ceo" ;;
|
||||
tier:high) ELIGIBLE="ceo" ;;
|
||||
esac
|
||||
debug "eligible_teams=$ELIGIBLE"
|
||||
|
||||
# Resolve team-name → team-id once. /orgs/{org}/teams/{slug}/... endpoints
|
||||
# don't exist on Gitea 1.22; we have to use /teams/{id}.
|
||||
ORG_TEAMS_FILE=$(mktemp)
|
||||
trap 'rm -f "$ORG_TEAMS_FILE"' EXIT
|
||||
HTTP_CODE=$(curl -sS -o "$ORG_TEAMS_FILE" -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/orgs/${OWNER}/teams")
|
||||
debug "teams-list HTTP=$HTTP_CODE size=$(wc -c <"$ORG_TEAMS_FILE")"
|
||||
if [ "${SOP_DEBUG:-}" = "1" ]; then
|
||||
echo " [debug] teams-list body (first 300 chars):" >&2
|
||||
head -c 300 "$ORG_TEAMS_FILE" >&2; echo >&2
|
||||
fi
|
||||
if [ "$HTTP_CODE" != "200" ]; then
|
||||
echo "::error::GET /orgs/${OWNER}/teams returned HTTP $HTTP_CODE — token likely lacks read:org scope. Add a SOP_TIER_CHECK_TOKEN secret with read:organization scope at the org level."
|
||||
exit 1
|
||||
fi
|
||||
declare -A TEAM_ID
|
||||
for T in $ELIGIBLE; do
|
||||
ID=$(jq -r --arg t "$T" '.[] | select(.name==$t) | .id' <"$ORG_TEAMS_FILE" | head -1)
|
||||
if [ -z "$ID" ] || [ "$ID" = "null" ]; then
|
||||
VISIBLE=$(jq -r '.[]?.name? // empty' <"$ORG_TEAMS_FILE" 2>/dev/null | tr '\n' ' ')
|
||||
echo "::error::Team \"$T\" not found in org $OWNER. Teams visible: $VISIBLE"
|
||||
exit 1
|
||||
fi
|
||||
TEAM_ID[$T]="$ID"
|
||||
debug "team-id: $T → $ID"
|
||||
done
|
||||
|
||||
# 3. Read approving reviewers
|
||||
REVIEWS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}/reviews")
|
||||
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]')
|
||||
if [ -z "$APPROVERS" ]; then
|
||||
echo "::error::No approving reviews. Tier $TIER requires approval from {$ELIGIBLE} (non-author)."
|
||||
exit 1
|
||||
fi
|
||||
debug "approvers: $(echo "$APPROVERS" | tr '\n' ' ')"
|
||||
|
||||
# 4. For each approver: check non-author + team membership (by id)
|
||||
OK=""
|
||||
for U in $APPROVERS; do
|
||||
if [ "$U" = "$PR_AUTHOR" ]; then
|
||||
debug "skip self-review by $U"
|
||||
continue
|
||||
fi
|
||||
for T in $ELIGIBLE; do
|
||||
ID="${TEAM_ID[$T]}"
|
||||
CODE=$(curl -sS -o /dev/null -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/teams/${ID}/members/${U}")
|
||||
debug "probe: $U in team $T (id=$ID) → HTTP $CODE"
|
||||
if [ "$CODE" = "200" ] || [ "$CODE" = "204" ]; then
|
||||
echo "::notice::approver $U is in team $T (eligible for $TIER)"
|
||||
OK="yes"
|
||||
break
|
||||
fi
|
||||
done
|
||||
[ -n "$OK" ] && break
|
||||
done
|
||||
|
||||
if [ -z "$OK" ]; then
|
||||
echo "::error::Tier $TIER requires approval from a non-author member of {$ELIGIBLE}. Got approvers: $APPROVERS — none of them satisfied team membership. Set SOP_DEBUG=1 to see per-probe HTTP codes."
|
||||
exit 1
|
||||
fi
|
||||
echo "::notice::sop-tier-check passed: $TIER, approver in {$ELIGIBLE}"
|
||||
@ -1,58 +0,0 @@
|
||||
# audit-force-merge — emit `incident.force_merge` to runner stdout when
|
||||
# a PR is merged with required-status-checks not green. Vector picks
|
||||
# the JSON line off docker_logs and ships to Loki on
|
||||
# molecule-canonical-obs (per `reference_obs_stack_phase1`); query as:
|
||||
#
|
||||
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
|
||||
#
|
||||
# Closes the §SOP-6 audit gap (the doc says force-merges write to
|
||||
# `structure_events`, but that table lives in the platform DB, not
|
||||
# Gitea-side; Loki is the practical equivalent for Gitea Actions
|
||||
# events). When the credential / observability stack converges later,
|
||||
# this can sync into structure_events from Loki via a backfill job —
|
||||
# the structured JSON shape is forward-compatible.
|
||||
#
|
||||
# Logic in `.gitea/scripts/audit-force-merge.sh` per the same script-
|
||||
# extract pattern as sop-tier-check.
|
||||
|
||||
name: audit-force-merge
|
||||
|
||||
# pull_request_target loads from the base branch — same security model
|
||||
# as sop-tier-check. Without this, an attacker could rewrite the
|
||||
# workflow on a PR and skip the audit emission for their own
|
||||
# force-merge. See `.gitea/workflows/sop-tier-check.yml` for the full
|
||||
# rationale.
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
# Skip when PR is closed without merge — saves a runner.
|
||||
if: github.event.pull_request.merged == true
|
||||
steps:
|
||||
- name: Check out base branch (for the script)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Detect force-merge + emit audit event
|
||||
env:
|
||||
# Same org-level secret the sop-tier-check workflow uses.
|
||||
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITEA_HOST: git.moleculesai.app
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
# Required-status-check contexts to evaluate at merge time.
|
||||
# Newline-separated. Mirror this against branch protection
|
||||
# (settings → branches → protected branch → required checks).
|
||||
# Declared here rather than fetched from /branch_protections
|
||||
# because that endpoint requires admin write — sop-tier-bot is
|
||||
# read-only by design (least-privilege).
|
||||
REQUIRED_CHECKS: |
|
||||
sop-tier-check / tier-check (pull_request)
|
||||
Secret scan / Scan diff for credential-shaped strings (pull_request)
|
||||
run: bash .gitea/scripts/audit-force-merge.sh
|
||||
@ -1,191 +0,0 @@
|
||||
name: Secret scan
|
||||
|
||||
# Hard CI gate. Refuses any PR / push whose diff additions contain a
|
||||
# recognisable credential. Defense-in-depth for the #2090-class incident
|
||||
# (2026-04-24): GitHub's hosted Copilot Coding Agent leaked a ghs_*
|
||||
# installation token into tenant-proxy/package.json via `npm init`
|
||||
# slurping the URL from a token-embedded origin remote. We can't fix
|
||||
# upstream's clone hygiene, so we gate here.
|
||||
#
|
||||
# Same regex set as the runtime's bundled pre-commit hook
|
||||
# (molecule-ai-workspace-runtime: molecule_runtime/scripts/pre-commit-checks.sh).
|
||||
# Keep the two sides aligned when adding patterns.
|
||||
#
|
||||
# Ported from .github/workflows/secret-scan.yml so the gate actually
|
||||
# fires on Gitea Actions. Differences from the GitHub version:
|
||||
# - drops `merge_group` event (Gitea has no merge queue)
|
||||
# - drops `workflow_call` (no cross-repo reusable invocation on Gitea)
|
||||
# - SELF path updated to .gitea/workflows/secret-scan.yml
|
||||
# The job name + step name are identical to the GitHub workflow so the
|
||||
# status-check context (`Secret scan / Scan diff for credential-shaped
|
||||
# strings (pull_request)`) matches branch protection on molecule-core/main.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches: [main, staging]
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
name: Scan diff for credential-shaped strings
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 2 # need previous commit to diff against on push events
|
||||
|
||||
# For pull_request events the diff base may be many commits behind
|
||||
# HEAD and absent from the shallow clone. Fetch it explicitly.
|
||||
- name: Fetch PR base SHA (pull_request events only)
|
||||
if: github.event_name == 'pull_request'
|
||||
run: git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }}
|
||||
|
||||
- name: Refuse if credential-shaped strings appear in diff additions
|
||||
env:
|
||||
# Plumb event-specific SHAs through env so the script doesn't
|
||||
# need conditional `${{ ... }}` interpolation per event type.
|
||||
# github.event.before/after only exist on push events;
|
||||
# pull_request has pull_request.base.sha / pull_request.head.sha.
|
||||
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
||||
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
PUSH_BEFORE: ${{ github.event.before }}
|
||||
PUSH_AFTER: ${{ github.event.after }}
|
||||
run: |
|
||||
# Pattern set covers GitHub family (the actual #2090 vector),
|
||||
# Anthropic / OpenAI / Slack / AWS. Anchored on prefixes with low
|
||||
# false-positive rates against agent-generated content. Mirror of
|
||||
# molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh
|
||||
# — keep aligned.
|
||||
SECRET_PATTERNS=(
|
||||
'ghp_[A-Za-z0-9]{36,}' # GitHub PAT (classic)
|
||||
'ghs_[A-Za-z0-9]{36,}' # GitHub App installation token
|
||||
'gho_[A-Za-z0-9]{36,}' # GitHub OAuth user-to-server
|
||||
'ghu_[A-Za-z0-9]{36,}' # GitHub OAuth user
|
||||
'ghr_[A-Za-z0-9]{36,}' # GitHub OAuth refresh
|
||||
'github_pat_[A-Za-z0-9_]{82,}' # GitHub fine-grained PAT
|
||||
'sk-ant-[A-Za-z0-9_-]{40,}' # Anthropic API key
|
||||
'sk-proj-[A-Za-z0-9_-]{40,}' # OpenAI project key
|
||||
'sk-svcacct-[A-Za-z0-9_-]{40,}' # OpenAI service-account key
|
||||
'sk-cp-[A-Za-z0-9_-]{60,}' # MiniMax API key (F1088 vector — caught only after the fact)
|
||||
'xox[baprs]-[A-Za-z0-9-]{20,}' # Slack tokens
|
||||
'AKIA[0-9A-Z]{16}' # AWS access key ID
|
||||
'ASIA[0-9A-Z]{16}' # AWS STS temp access key ID
|
||||
)
|
||||
|
||||
# Determine the diff base. Each event type stores its SHAs in
|
||||
# a different place — see the env block above.
|
||||
case "${{ github.event_name }}" in
|
||||
pull_request)
|
||||
BASE="$PR_BASE_SHA"
|
||||
HEAD="$PR_HEAD_SHA"
|
||||
;;
|
||||
*)
|
||||
BASE="$PUSH_BEFORE"
|
||||
HEAD="$PUSH_AFTER"
|
||||
;;
|
||||
esac
|
||||
|
||||
# On push events with shallow clones, BASE may be present in
|
||||
# the event payload but absent from the local object DB
|
||||
# (fetch-depth=2 doesn't always reach the previous commit
|
||||
# across true merges). Try fetching it on demand. If the
|
||||
# fetch fails — e.g. the SHA was force-overwritten — we fall
|
||||
# through to the empty-BASE branch below, which scans the
|
||||
# entire tree as if every file were new. Correct, just slow.
|
||||
if [ -n "$BASE" ] && ! echo "$BASE" | grep -qE '^0+$'; then
|
||||
if ! git cat-file -e "$BASE" 2>/dev/null; then
|
||||
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Files added or modified in this change.
|
||||
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$' || ! git cat-file -e "$BASE" 2>/dev/null; then
|
||||
# New branch / no previous SHA / BASE unreachable — check the
|
||||
# entire tree as added content. Slower, but correct on first
|
||||
# push.
|
||||
CHANGED=$(git ls-tree -r --name-only HEAD)
|
||||
DIFF_RANGE=""
|
||||
else
|
||||
CHANGED=$(git diff --name-only --diff-filter=AM "$BASE" "$HEAD")
|
||||
DIFF_RANGE="$BASE $HEAD"
|
||||
fi
|
||||
|
||||
if [ -z "$CHANGED" ]; then
|
||||
echo "No changed files to inspect."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Self-exclude: this workflow file legitimately contains the
|
||||
# pattern strings as regex literals. Without an exclude it would
|
||||
# block its own merge. Both the .github/ original and this
|
||||
# .gitea/ port are excluded so a sync between them stays clean.
|
||||
SELF_GITHUB=".github/workflows/secret-scan.yml"
|
||||
SELF_GITEA=".gitea/workflows/secret-scan.yml"
|
||||
|
||||
OFFENDING=""
|
||||
# `while IFS= read -r` (not `for f in $CHANGED`) so filenames
|
||||
# containing whitespace don't word-split silently — a path
|
||||
# with a space would otherwise produce two iterations on
|
||||
# tokens that aren't real filenames, breaking the
|
||||
# self-exclude + diff lookup.
|
||||
while IFS= read -r f; do
|
||||
[ -z "$f" ] && continue
|
||||
[ "$f" = "$SELF_GITHUB" ] && continue
|
||||
[ "$f" = "$SELF_GITEA" ] && continue
|
||||
if [ -n "$DIFF_RANGE" ]; then
|
||||
ADDED=$(git diff --no-color --unified=0 "$BASE" "$HEAD" -- "$f" 2>/dev/null | grep -E '^\+[^+]' || true)
|
||||
else
|
||||
# No diff range (new branch first push) — scan the full file
|
||||
# contents as if every line were new.
|
||||
ADDED=$(cat "$f" 2>/dev/null || true)
|
||||
fi
|
||||
[ -z "$ADDED" ] && continue
|
||||
for pattern in "${SECRET_PATTERNS[@]}"; do
|
||||
if echo "$ADDED" | grep -qE "$pattern"; then
|
||||
OFFENDING="${OFFENDING}${f} (matched: ${pattern})\n"
|
||||
break
|
||||
fi
|
||||
done
|
||||
done <<< "$CHANGED"
|
||||
|
||||
if [ -n "$OFFENDING" ]; then
|
||||
echo "::error::Credential-shaped strings detected in diff additions:"
|
||||
# `printf '%b' "$OFFENDING"` interprets backslash escapes
|
||||
# (the literal `\n` we appended above becomes a newline)
|
||||
# WITHOUT treating OFFENDING as a format string. Plain
|
||||
# `printf "$OFFENDING"` is a format-string sink: a filename
|
||||
# containing `%` would be interpreted as a conversion
|
||||
# specifier, corrupting the error message (or printing
|
||||
# `%(missing)` artifacts).
|
||||
printf '%b' "$OFFENDING"
|
||||
echo ""
|
||||
echo "The actual matched values are NOT echoed here, deliberately —"
|
||||
echo "round-tripping a leaked credential into CI logs widens the blast"
|
||||
echo "radius (logs are searchable + retained)."
|
||||
echo ""
|
||||
echo "Recovery:"
|
||||
echo " 1. Remove the secret from the file. Replace with an env var"
|
||||
echo " reference (e.g. \${{ secrets.GITHUB_TOKEN }} in workflows,"
|
||||
echo " process.env.X in code)."
|
||||
echo " 2. If the credential was already pushed (this PR's commit"
|
||||
echo " history reaches a public ref), treat it as compromised —"
|
||||
echo " ROTATE it immediately, do not just remove it. The token"
|
||||
echo " remains valid in git history forever and may be in any"
|
||||
echo " log/cache that consumed this branch."
|
||||
echo " 3. Force-push the cleaned commit (or stack a revert) and"
|
||||
echo " re-run CI."
|
||||
echo ""
|
||||
echo "If the match is a false positive (test fixture, docs example,"
|
||||
echo "or this workflow's own regex literals): use a clearly-fake"
|
||||
echo "placeholder like ghs_EXAMPLE_DO_NOT_USE that doesn't satisfy"
|
||||
echo "the length suffix, OR add the file path to the SELF exclude"
|
||||
echo "list in this workflow with a short reason."
|
||||
echo ""
|
||||
echo "Mirror of the regex set lives in the runtime's bundled"
|
||||
echo "pre-commit hook (molecule-ai-workspace-runtime:"
|
||||
echo "molecule_runtime/scripts/pre-commit-checks.sh) — keep aligned."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ No credential-shaped strings in this change."
|
||||
@ -1,81 +0,0 @@
|
||||
# sop-tier-check — canonical Gitea Actions workflow for §SOP-6 enforcement.
|
||||
#
|
||||
# Logic lives in `.gitea/scripts/sop-tier-check.sh` (extracted 2026-05-09
|
||||
# from the previous inline-bash version). The script is the single source
|
||||
# of truth; this workflow file just sets env + invokes it.
|
||||
#
|
||||
# Copy BOTH files (`.gitea/workflows/sop-tier-check.yml` +
|
||||
# `.gitea/scripts/sop-tier-check.sh`) into any repo that wants the
|
||||
# §SOP-6 PR gate enforced. Pair with branch protection on the protected
|
||||
# branch:
|
||||
# required_status_checks: ["sop-tier-check / tier-check (pull_request)"]
|
||||
# required_approving_reviews: 1
|
||||
# approving_review_teams: ["ceo", "managers", "engineers"]
|
||||
#
|
||||
# Tier → eligible-team mapping (mirror of dev-sop §SOP-6):
|
||||
# tier:low → engineers, managers, ceo
|
||||
# tier:medium → managers, ceo
|
||||
# tier:high → ceo
|
||||
#
|
||||
# Force-merge: Owners-team override remains available out-of-band via
|
||||
# the Gitea merge API; force-merge writes `incident.force_merge` to
|
||||
# `structure_events` per §Persistent structured logging gate (Phase 3).
|
||||
#
|
||||
# Set `SOP_DEBUG: '1'` in the env block to enable per-API-call diagnostic
|
||||
# lines — useful when diagnosing token-scope or team-id-resolution
|
||||
# issues. Default off.
|
||||
|
||||
name: sop-tier-check
|
||||
|
||||
# SECURITY: triggers MUST use `pull_request_target`, not `pull_request`.
|
||||
# `pull_request_target` loads the workflow definition from the BASE
|
||||
# branch (i.e. `main`), not the PR's HEAD. With `pull_request`, anyone
|
||||
# with write access to a feature branch could rewrite this file in
|
||||
# their PR to dump SOP_TIER_CHECK_TOKEN (org-read scope) to logs and
|
||||
# exfiltrate it. Verified 2026-05-09 against Gitea 1.22.6 —
|
||||
# `pull_request_target` (added in Gitea 1.21 via go-gitea/gitea#25229)
|
||||
# is the documented mitigation.
|
||||
#
|
||||
# This workflow does NOT call `actions/checkout` of PR HEAD code, so no
|
||||
# untrusted code is ever executed in the runner — we only HTTP-call the
|
||||
# Gitea API. If a future change adds a checkout step, it MUST pin to
|
||||
# `${{ github.event.pull_request.base.sha }}` (NOT `head.sha`) to keep
|
||||
# the trust boundary.
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened, labeled, unlabeled]
|
||||
pull_request_review:
|
||||
types: [submitted, dismissed, edited]
|
||||
|
||||
jobs:
|
||||
tier-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
steps:
|
||||
- name: Check out base branch (for the script)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
# Pin to base.sha — pull_request_target's protection only
|
||||
# works if we never check out PR HEAD. Same SHA the workflow
|
||||
# itself was loaded from.
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Verify tier label + reviewer team membership
|
||||
env:
|
||||
# SOP_TIER_CHECK_TOKEN is the org-level secret for the
|
||||
# sop-tier-bot PAT (read:organization,read:user,read:issue,
|
||||
# read:repository). Stored at the org level
|
||||
# (/api/v1/orgs/molecule-ai/actions/secrets) so per-repo
|
||||
# configuration is unnecessary — every repo in the org
|
||||
# picks it up automatically.
|
||||
# Falls back to GITHUB_TOKEN with a clear error if missing.
|
||||
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITEA_HOST: git.moleculesai.app
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
# Set to '1' for diagnostic per-API-call output. Off by default
|
||||
# so production logs aren't noisy.
|
||||
SOP_DEBUG: '0'
|
||||
run: bash .gitea/scripts/sop-tier-check.sh
|
||||
82
.github/workflows/canary-staging.yml
vendored
82
.github/workflows/canary-staging.yml
vendored
@ -20,19 +20,6 @@ on:
|
||||
# a few minutes under load — that's fine for a canary.
|
||||
- cron: '*/30 * * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
keep_on_failure:
|
||||
description: >-
|
||||
Skip teardown when the canary fails (debugging only). The
|
||||
tenant org + EC2 + CF tunnel + DNS stay alive so an operator
|
||||
can SSM into the workspace EC2 and capture docker logs of the
|
||||
failing claude-code container. REMEMBER to manually delete
|
||||
via DELETE /cp/admin/tenants/<slug> when done so the org
|
||||
doesn't accumulate cost. Only honored on workflow_dispatch;
|
||||
cron runs always tear down (we don't want unattended cron
|
||||
to leak resources).
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
# Serialise with the full-SaaS workflow so they don't contend for the
|
||||
# same org-create quota on staging. Different group key from
|
||||
@ -93,14 +80,6 @@ jobs:
|
||||
# is "Token Plan only" but cheap-per-token and fast.
|
||||
E2E_MODEL_SLUG: MiniMax-M2.7-highspeed
|
||||
E2E_RUN_ID: "canary-${{ github.run_id }}"
|
||||
# Debug-only: when an operator dispatches with keep_on_failure=true,
|
||||
# the canary script's E2E_KEEP_ORG=1 path skips teardown so the
|
||||
# tenant org + EC2 stay alive for SSM-based log capture. Cron runs
|
||||
# never set this (the input only exists on workflow_dispatch) so
|
||||
# unattended cron always tears down. See molecule-core#129
|
||||
# failure mode #1 — capturing the actual exception requires
|
||||
# docker logs from the live container.
|
||||
E2E_KEEP_ORG: ${{ github.event.inputs.keep_on_failure == 'true' && '1' || '0' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
@ -158,28 +137,27 @@ jobs:
|
||||
id: canary
|
||||
run: bash tests/e2e/test_staging_full_saas.sh
|
||||
|
||||
# Alerting: open a sticky issue on the FIRST failure; comment on
|
||||
# subsequent failures; auto-close on next green. Comment-on-existing
|
||||
# de-duplicates so a single open issue accumulates the streak —
|
||||
# ops sees one issue with N comments rather than N issues.
|
||||
# Alerting: open an issue only after THREE consecutive failures so
|
||||
# transient flakes (Cloudflare DNS hiccup, AWS API blip) don't spam
|
||||
# the issue list. If an issue is already open, we still comment on
|
||||
# every failure so ops sees the streak. Auto-close on next green.
|
||||
#
|
||||
# Why no consecutive-failures threshold (e.g., wait 3 runs before
|
||||
# filing): the prior threshold check used
|
||||
# `github.rest.actions.listWorkflowRuns()` which Gitea 1.22.6 does
|
||||
# not expose (returns 404). On Gitea Actions the threshold call
|
||||
# ALWAYS failed, breaking the entire alerting step and going days
|
||||
# silent on real regressions (38h+ chronic red on 2026-05-07/08
|
||||
# before this fix; tracked in molecule-core#129). Filing on first
|
||||
# failure is also better UX — we want to know about the first red,
|
||||
# not wait 90 min for it to "count." Real flakes get one issue +
|
||||
# a quick close-on-green; persistent reds accumulate comments.
|
||||
# Threshold rationale: canary fires every 30 min, so 3 failures =
|
||||
# ~90 min of consecutive red — well past any single-run flake but
|
||||
# still tight enough that a real outage gets surfaced before the
|
||||
# next deploy window.
|
||||
- name: Open issue on failure
|
||||
if: failure()
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
env:
|
||||
# Inject the workflow path explicitly — context.workflow is
|
||||
# the *name*, not the file path the actions API needs.
|
||||
WORKFLOW_PATH: '.github/workflows/canary-staging.yml'
|
||||
CONSECUTIVE_THRESHOLD: '3'
|
||||
with:
|
||||
script: |
|
||||
const title = '🔴 Canary failing: staging SaaS smoke';
|
||||
const runURL = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
|
||||
const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
|
||||
|
||||
// Find an existing open canary issue (stable title match).
|
||||
// If one exists, this isn't a "first failure" — comment and exit.
|
||||
@ -199,12 +177,32 @@ jobs:
|
||||
return;
|
||||
}
|
||||
|
||||
// No open issue yet — file one on this first failure. The
|
||||
// comment-on-existing branch above means subsequent failures
|
||||
// accumulate as comments on this same issue, so we don't
|
||||
// spam new issues per run.
|
||||
// No open issue yet — check the last N-1 runs' conclusions.
|
||||
// We open the issue only if the last (THRESHOLD-1) runs ALSO
|
||||
// failed (so this is the 3rd consecutive red).
|
||||
const threshold = parseInt(process.env.CONSECUTIVE_THRESHOLD, 10);
|
||||
const { data: runs } = await github.rest.actions.listWorkflowRuns({
|
||||
owner: context.repo.owner, repo: context.repo.repo,
|
||||
workflow_id: process.env.WORKFLOW_PATH,
|
||||
status: 'completed',
|
||||
per_page: threshold,
|
||||
// Skip the current in-progress run; it isn't 'completed' yet.
|
||||
});
|
||||
// listWorkflowRuns returns recent first. We need (threshold-1)
|
||||
// prior failures (current run is the threshold-th).
|
||||
const priorFailures = (runs.workflow_runs || [])
|
||||
.slice(0, threshold - 1)
|
||||
.filter(r => r.id !== context.runId)
|
||||
.filter(r => r.conclusion === 'failure')
|
||||
.length;
|
||||
if (priorFailures < threshold - 1) {
|
||||
core.info(`Below threshold: ${priorFailures + 1}/${threshold} consecutive failures — not filing yet`);
|
||||
return;
|
||||
}
|
||||
|
||||
const body =
|
||||
`Canary run failed at ${new Date().toISOString()}.\n\n` +
|
||||
`Canary run failed at ${new Date().toISOString()}, ` +
|
||||
`${threshold} consecutive runs red.\n\n` +
|
||||
`Run: ${runURL}\n\n` +
|
||||
`This issue auto-closes on the next green canary run. ` +
|
||||
`Consecutive failures add a comment here rather than a new issue.`;
|
||||
@ -213,7 +211,7 @@ jobs:
|
||||
title, body,
|
||||
labels: ['canary-staging', 'bug'],
|
||||
});
|
||||
core.info('Opened canary failure issue (first red)');
|
||||
core.info(`Opened canary failure issue (${threshold} consecutive reds)`);
|
||||
|
||||
- name: Auto-close canary issue on success
|
||||
if: success()
|
||||
|
||||
8
.github/workflows/e2e-api.yml
vendored
8
.github/workflows/e2e-api.yml
vendored
@ -51,7 +51,7 @@ name: E2E API Smoke Test
|
||||
# * Pre-pull `alpine:latest` so the platform-server's provisioner
|
||||
# (`internal/handlers/container_files.go`) can stand up its
|
||||
# ephemeral token-write helper without a daemon.io round-trip.
|
||||
# * Create `molecule-core-net` bridge network if missing so the
|
||||
# * Create `molecule-monorepo-net` bridge network if missing so the
|
||||
# provisioner's container.HostConfig {NetworkMode: ...} attach
|
||||
# succeeds.
|
||||
# Item #1 (timeouts) — evidence on recent runs (77/3191, ae/4270, 0e/
|
||||
@ -163,12 +163,12 @@ jobs:
|
||||
# when the image is already present.
|
||||
docker pull alpine:latest >/dev/null
|
||||
# Provisioner attaches workspace containers to
|
||||
# molecule-core-net (workspace-server/internal/provisioner/
|
||||
# molecule-monorepo-net (workspace-server/internal/provisioner/
|
||||
# provisioner.go::DefaultNetwork). The bridge already exists on
|
||||
# the operator host's docker daemon — `network create` is
|
||||
# idempotent via `|| true`.
|
||||
docker network create molecule-core-net >/dev/null 2>&1 || true
|
||||
echo "alpine:latest pre-pulled; molecule-core-net ensured."
|
||||
docker network create molecule-monorepo-net >/dev/null 2>&1 || true
|
||||
echo "alpine:latest pre-pulled; molecule-monorepo-net ensured."
|
||||
- name: Start Postgres (docker)
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
|
||||
@ -34,7 +34,7 @@ name: Handlers Postgres Integration
|
||||
# So we sidestep `services:` entirely. The job container still uses
|
||||
# host-net (inherited from runner config; required for cache server
|
||||
# discovery on the bridge IP 172.18.0.17:42631). We launch a sibling
|
||||
# postgres on the existing `molecule-core-net` bridge with a
|
||||
# postgres on the existing `molecule-monorepo-net` bridge with a
|
||||
# UNIQUE name per run — `pg-handlers-${RUN_ID}-${RUN_ATTEMPT}` — and
|
||||
# read its bridge IP via `docker inspect`. A host-net job container
|
||||
# can reach a bridge-net container directly via the bridge IP (verified
|
||||
@ -44,7 +44,7 @@ name: Handlers Postgres Integration
|
||||
# + No host-port collision; N parallel runs share the bridge cleanly
|
||||
# + `if: always()` cleanup runs even on test-step failure
|
||||
# - One more step in the workflow (+~3 lines)
|
||||
# - Requires `molecule-core-net` to exist on the operator host
|
||||
# - Requires `molecule-monorepo-net` to exist on the operator host
|
||||
# (it does; declared in docker-compose.yml + docker-compose.infra.yml)
|
||||
#
|
||||
# Class B Hongming-owned CICD red sweep, 2026-05-08.
|
||||
@ -96,7 +96,7 @@ jobs:
|
||||
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
# Bridge network already exists on the operator host (declared
|
||||
# in docker-compose.yml + docker-compose.infra.yml).
|
||||
PG_NETWORK: molecule-core-net
|
||||
PG_NETWORK: molecule-monorepo-net
|
||||
defaults:
|
||||
run:
|
||||
working-directory: workspace-server
|
||||
|
||||
11
.github/workflows/harness-replays.yml
vendored
11
.github/workflows/harness-replays.yml
vendored
@ -119,17 +119,6 @@ jobs:
|
||||
# symptom, different root cause: staging still has the in-image
|
||||
# clone path, hits the auth error directly).
|
||||
#
|
||||
# 2026-05-08 sub-finding (#192): the clone step ALSO fails when
|
||||
# any referenced workspace-template repo is private and the
|
||||
# AUTO_SYNC_TOKEN bearer (devops-engineer persona) lacks read
|
||||
# access. Root cause: 5 of 9 workspace-template repos
|
||||
# (openclaw, codex, crewai, deepagents, gemini-cli) had been
|
||||
# marked private with no team grant. Resolution: flipped them
|
||||
# to public per `feedback_oss_first_repo_visibility_default`
|
||||
# (the OSS surface should be public). Layer-3 (customer-private +
|
||||
# marketplace third-party repos) tracked separately in
|
||||
# internal#102.
|
||||
#
|
||||
# Token shape matches publish-workspace-server-image.yml: AUTO_SYNC_TOKEN
|
||||
# is the devops-engineer persona PAT, NOT the founder PAT (per
|
||||
# `feedback_per_agent_gitea_identity_default`). clone-manifest.sh
|
||||
|
||||
@ -284,7 +284,7 @@ cp .env.example .env
|
||||
./infra/scripts/setup.sh
|
||||
# Boots Postgres (:5432), Redis (:6379), Langfuse (:3001),
|
||||
# and Temporal (:7233 gRPC, :8233 UI) on the shared
|
||||
# `molecule-core-net` Docker network. Temporal runs with
|
||||
# `molecule-monorepo-net` Docker network. Temporal runs with
|
||||
# no auth on localhost — dev-only; production must gate it.
|
||||
#
|
||||
# Also populates the template/plugin registry by cloning every repo
|
||||
|
||||
@ -283,7 +283,7 @@ cp .env.example .env
|
||||
./infra/scripts/setup.sh
|
||||
# 启动 Postgres (:5432)、Redis (:6379)、Langfuse (:3001)
|
||||
# 以及 Temporal (:7233 gRPC, :8233 UI),全部挂在共享的
|
||||
# `molecule-core-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# `molecule-monorepo-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# 仅用于本地开发;生产环境必须加 mTLS / API Key。
|
||||
#
|
||||
# 同时会根据 manifest.json 拉取所有模板/插件仓库到
|
||||
|
||||
@ -1,10 +0,0 @@
|
||||
# Excluded from `docker build` context. Without this, the COPY . . step in
|
||||
# canvas/Dockerfile clobbers the freshly-installed node_modules with the
|
||||
# host's (potentially broken / wrong-arch) copy — the @tailwindcss/oxide
|
||||
# native binary disagreed and broke `next build`.
|
||||
node_modules
|
||||
.next
|
||||
.git
|
||||
*.log
|
||||
.env*
|
||||
!.env.example
|
||||
@ -1,11 +1,7 @@
|
||||
FROM node:22-alpine AS builder
|
||||
WORKDIR /app
|
||||
COPY package.json package-lock.json* ./
|
||||
# `npm ci` (not `install`) for lockfile-exact reproducibility.
|
||||
# `--include=optional` ensures the platform-specific @tailwindcss/oxide
|
||||
# native binary lands — without it, postcss fails with "Cannot read
|
||||
# properties of undefined (reading 'All')" at build time.
|
||||
RUN npm ci --include=optional
|
||||
RUN npm install
|
||||
COPY . .
|
||||
ARG NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080
|
||||
ARG NEXT_PUBLIC_WS_URL=ws://localhost:8080/ws
|
||||
|
||||
@ -17,24 +17,6 @@ import { dirname, join } from "node:path";
|
||||
// update one heuristic. Production is unaffected: `output: "standalone"`
|
||||
// bakes resolved env into the build, and the marker file isn't shipped.
|
||||
loadMonorepoEnv();
|
||||
// Boot-time matched-pair guard for ADMIN_TOKEN / NEXT_PUBLIC_ADMIN_TOKEN.
|
||||
// When ADMIN_TOKEN is set on the workspace-server (server-side bearer
|
||||
// gate, wsauth_middleware.go ~L245), the canvas MUST send the matching
|
||||
// NEXT_PUBLIC_ADMIN_TOKEN as `Authorization: Bearer ...` on every API
|
||||
// call. If only one is set, every workspace API call 401s silently —
|
||||
// the canvas hydrates with empty data and the user sees a broken page
|
||||
// with no console hint about the auth-config mismatch.
|
||||
//
|
||||
// Pre-fix the matched-pair contract was descriptive only (a comment in
|
||||
// .env): future devs/agents could re-misconfigure with one of the two
|
||||
// unset and silently 401. Closes the post-PR-#174 self-review gap.
|
||||
//
|
||||
// Warn-only (not exit) — production canvas Docker images bake these
|
||||
// vars into the build at image-build time, and a missed pair there
|
||||
// would still emit the warning at runtime via the standalone server's
|
||||
// startup. Killing the process on misconfiguration would turn a
|
||||
// recoverable auth issue into a hard crashloop.
|
||||
checkAdminTokenPair();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
output: "standalone",
|
||||
@ -75,43 +57,6 @@ function loadMonorepoEnv() {
|
||||
);
|
||||
}
|
||||
|
||||
// Boot-time matched-pair guard. Runs after .env has been loaded so the
|
||||
// check sees the post-load state. The two env vars must be set or
|
||||
// unset together; one-without-the-other is the silent-401 footgun.
|
||||
//
|
||||
// Treats empty string ("") as unset. An explicitly-empty `KEY=` in
|
||||
// .env counts as set-to-empty in `process.env`, but for auth purposes
|
||||
// an empty bearer token is equivalent to no token — so both
|
||||
// `ADMIN_TOKEN=` and an unset ADMIN_TOKEN are equivalent relative to
|
||||
// the matched-pair invariant.
|
||||
//
|
||||
// Returns void; side effect is the console.error warning. Kept as a
|
||||
// separate function (exported) so a future test can reset env, call
|
||||
// this, and assert on captured stderr.
|
||||
export function checkAdminTokenPair(): void {
|
||||
const serverSet = !!process.env.ADMIN_TOKEN;
|
||||
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (serverSet === clientSet) return;
|
||||
// Distinct messages so the operator can tell which half is missing
|
||||
// — the fix is symmetric (set the other one) but the diagnostic
|
||||
// mentions which side is currently set so they don't have to grep.
|
||||
if (serverSet && !clientSet) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
} else {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function findMonorepoRoot(start: string): string | null {
|
||||
let dir = start;
|
||||
for (let i = 0; i < 6; i++) {
|
||||
|
||||
@ -9,7 +9,6 @@
|
||||
// AttachmentLightbox).
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentChip } from "./AttachmentViews";
|
||||
@ -44,8 +43,13 @@ export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: P
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
});
|
||||
@ -112,5 +116,9 @@ export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: P
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -35,7 +35,6 @@
|
||||
// downscale via canvas, but defer that to v2.
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentLightbox } from "./AttachmentLightbox";
|
||||
@ -76,14 +75,22 @@ export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: P
|
||||
}
|
||||
|
||||
// Platform-auth path: identical to downloadChatFile but we keep
|
||||
// the blob (don't trigger a Save-As). Auth headers come from the
|
||||
// shared `platformAuthHeaders()` helper — one source of truth for
|
||||
// every authenticated raw fetch in the canvas (#178).
|
||||
// the blob (don't trigger a Save-As). Use the same headers it does
|
||||
// by going through it indirectly — no, downloadChatFile triggers a
|
||||
// Save-As. Need a separate fetch.
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
// Read the same env var downloadChatFile reads — single source
|
||||
// of truth would be cleaner; refactor opportunity for PR-2 if
|
||||
// we add the same path to AttachmentVideo.
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
@ -177,7 +184,15 @@ export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: P
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api which uses the canonical
|
||||
// getTenantSlug() from @/lib/tenant. This eliminates the duplicate
|
||||
// hostname-regex + the duplicate bearer-token-attach pattern (#178).
|
||||
// Internal helper — duplicated from uploads.ts (it's not exported
|
||||
// there). Kept local so this component doesn't reach into private
|
||||
// surface; if AttachmentVideo / AttachmentPDF in PR-2/PR-3 also need
|
||||
// it, lift to an exported helper at that point (the third-caller
|
||||
// rule).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
// Tenant subdomain shape: <slug>.moleculesai.app
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -33,7 +33,6 @@
|
||||
// timeout, swap to chip. Implemented as a 3-second watchdog.
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentLightbox } from "./AttachmentLightbox";
|
||||
@ -70,8 +69,13 @@ export function AttachmentPDF({ workspaceId, attachment, onDownload, tone }: Pro
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
});
|
||||
@ -185,5 +189,9 @@ function PdfGlyph() {
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -26,7 +26,6 @@
|
||||
// to download the full file.
|
||||
|
||||
import { useState, useEffect } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentChip } from "./AttachmentViews";
|
||||
@ -58,13 +57,13 @@ export function AttachmentTextPreview({ workspaceId, attachment, onDownload, ton
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
// Only attach platform auth headers for in-platform URIs —
|
||||
// off-platform URLs (HTTP/HTTPS attachments) MUST NOT receive
|
||||
// our bearer token (it would leak the admin token to a third
|
||||
// party). The branch is preserved with the new shared helper.
|
||||
const headers: Record<string, string> = isPlatformAttachment(attachment.uri)
|
||||
? platformAuthHeaders()
|
||||
: {};
|
||||
const headers: Record<string, string> = {};
|
||||
if (isPlatformAttachment(attachment.uri)) {
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
}
|
||||
const res = await fetch(href, {
|
||||
headers,
|
||||
credentials: "include",
|
||||
@ -183,5 +182,9 @@ export function AttachmentTextPreview({ workspaceId, attachment, onDownload, ton
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -25,7 +25,6 @@
|
||||
// fetch via service worker. v2 if measured-needed.
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentChip } from "./AttachmentViews";
|
||||
@ -62,8 +61,13 @@ export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: P
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
// Videos are larger than images on average; give the request
|
||||
// more headroom. The server's per-request body cap (50MB) is
|
||||
@ -143,5 +147,11 @@ export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: P
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
// Internal helper — same shape as AttachmentImage's. Lifted to a
|
||||
// shared util in PR-2.5 if a third caller needs it (PDF, audio).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -64,54 +64,6 @@ describe("extractRequestText", () => {
|
||||
};
|
||||
expect(extractRequestText(body)).toBe("");
|
||||
});
|
||||
|
||||
// Regression: delegation.go stores request_body as {"task": "...", "delegation_id": "..."}.
|
||||
// extractRequestText was checking only the A2A params.message.parts path, so
|
||||
// outbound delegation messages were rendered as blank bubbles.
|
||||
// Fix: check body.task first (delegation format), then fall back to A2A.
|
||||
it("extracts text from body.task (delegation format)", () => {
|
||||
const body = {
|
||||
task: "Deploy the staging environment for this sprint's release",
|
||||
delegation_id: "delg_01jx8q4n3k",
|
||||
};
|
||||
expect(extractRequestText(body)).toBe(
|
||||
"Deploy the staging environment for this sprint's release"
|
||||
);
|
||||
});
|
||||
|
||||
it("prefers body.task over A2A params when both present", () => {
|
||||
const body = {
|
||||
task: "Delegation text wins",
|
||||
params: {
|
||||
message: {
|
||||
parts: [{ kind: "text", text: "A2A text" }],
|
||||
},
|
||||
},
|
||||
};
|
||||
// body.task is checked first; delegation wins for delegation activities.
|
||||
expect(extractRequestText(body)).toBe("Delegation text wins");
|
||||
});
|
||||
|
||||
it("falls back to A2A format when body.task is absent", () => {
|
||||
const body = {
|
||||
params: {
|
||||
message: {
|
||||
parts: [{ kind: "text", text: "A2A fallback" }],
|
||||
},
|
||||
},
|
||||
};
|
||||
expect(extractRequestText(body)).toBe("A2A fallback");
|
||||
});
|
||||
|
||||
it("returns empty string when body.task is empty string", () => {
|
||||
const body = { task: "" };
|
||||
expect(extractRequestText(body)).toBe("");
|
||||
});
|
||||
|
||||
it("returns empty string when body.task is not a string", () => {
|
||||
const body = { task: 42 };
|
||||
expect(extractRequestText(body)).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractResponseText", () => {
|
||||
|
||||
@ -114,15 +114,9 @@ function basename(uri: string): string {
|
||||
return slash >= 0 ? cleaned.slice(slash + 1) : cleaned || "file";
|
||||
}
|
||||
|
||||
/** Extract user message text from an activity log request_body.
|
||||
*
|
||||
* Delegation activities from delegation.go store the task text directly
|
||||
* at `body.task` as a plain string: {"task": "...", "delegation_id": "..."}.
|
||||
* Check this first before falling back to the A2A JSON-RPC format
|
||||
* (`body.params.message.parts[].text`). */
|
||||
/** Extract user message text from an activity log request_body */
|
||||
export function extractRequestText(body: Record<string, unknown> | null): string {
|
||||
if (!body) return "";
|
||||
if (typeof body.task === "string" && body.task) return body.task;
|
||||
const params = body.params as Record<string, unknown> | undefined;
|
||||
const msg = params?.message as Record<string, unknown> | undefined;
|
||||
const parts = msg?.parts as Array<Record<string, unknown>> | undefined;
|
||||
|
||||
@ -1,16 +1,12 @@
|
||||
import { PLATFORM_URL, platformAuthHeaders } from "@/lib/api";
|
||||
import { PLATFORM_URL } from "@/lib/api";
|
||||
import { getTenantSlug } from "@/lib/tenant";
|
||||
import type { ChatAttachment } from "./types";
|
||||
|
||||
/** Chat attachments are intentionally uploaded via a direct fetch()
|
||||
* instead of the `api.post` helper — `api.post` JSON-stringifies the
|
||||
* body, which would 500 on a Blob. Auth headers (tenant slug, admin
|
||||
* token, credentials) come from `platformAuthHeaders()` — the same
|
||||
* helper `request()` uses, so a missing bearer surfaces as a single
|
||||
* fix site instead of N copies. We deliberately do NOT set
|
||||
* Content-Type so the browser writes the multipart boundary into the
|
||||
* header; setting it manually would yield a multipart body the server
|
||||
* can't parse. See lib/api.ts platformAuthHeaders() for the full
|
||||
* rationale on why this pair must stay matched. */
|
||||
* body, which would 500 on a Blob. Mirrors the header plumbing
|
||||
* (tenant slug, admin token, credentials) so SaaS + self-hosted
|
||||
* callers work the same way. */
|
||||
export async function uploadChatFiles(
|
||||
workspaceId: string,
|
||||
files: File[],
|
||||
@ -20,12 +16,18 @@ export async function uploadChatFiles(
|
||||
const form = new FormData();
|
||||
for (const f of files) form.append("files", f, f.name);
|
||||
|
||||
const headers: Record<string, string> = {};
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
|
||||
// Uploads legitimately take a while on cold cache (tar write +
|
||||
// docker cp into the container). 60s is comfortable for the 25MB/
|
||||
// 50MB caps the server enforces.
|
||||
const res = await fetch(`${PLATFORM_URL}/workspaces/${workspaceId}/chat/uploads`, {
|
||||
method: "POST",
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
body: form,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
@ -141,8 +143,14 @@ export async function downloadChatFile(
|
||||
return;
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {};
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
});
|
||||
|
||||
@ -1,130 +0,0 @@
|
||||
// @vitest-environment node
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
|
||||
// Tests for the boot-time matched-pair guard added to next.config.ts.
|
||||
//
|
||||
// Why this lives in src/lib/__tests__ even though the function is in
|
||||
// canvas/next.config.ts:
|
||||
// - next.config.ts runs as ESM-but-also-CJS depending on which
|
||||
// consumer loads it (Next.js dev server vs Next.js build); we
|
||||
// want the test to be a plain ESM module Vitest already handles.
|
||||
// - Importing from "../../../next.config" pulls in the rest of the
|
||||
// file (loadMonorepoEnv, the default export, etc.) which has
|
||||
// side effects on module load (it runs loadMonorepoEnv()
|
||||
// immediately). To keep the test hermetic we don't import — we
|
||||
// duplicate the function under test.
|
||||
//
|
||||
// Sourcing the function from a shared module would be cleaner, but
|
||||
// next.config.ts is required to be a single self-contained file by
|
||||
// Next.js's loader on some host configurations. Pin invariant: the
|
||||
// duplicated function below MUST stay byte-identical to the one in
|
||||
// next.config.ts. If you change one, change the other and bump this
|
||||
// comment.
|
||||
|
||||
function checkAdminTokenPair(): void {
|
||||
const serverSet = !!process.env.ADMIN_TOKEN;
|
||||
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (serverSet === clientSet) return;
|
||||
if (serverSet && !clientSet) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
} else {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
describe("checkAdminTokenPair", () => {
|
||||
// Snapshot env so individual tests can stomp on it without leaking.
|
||||
// Rebuild from snapshot in afterEach so the next test sees a known
|
||||
// baseline regardless of mutation pattern.
|
||||
let originalEnv: Record<string, string | undefined>;
|
||||
let errorSpy: ReturnType<typeof vi.spyOn>;
|
||||
|
||||
beforeEach(() => {
|
||||
originalEnv = {
|
||||
ADMIN_TOKEN: process.env.ADMIN_TOKEN,
|
||||
NEXT_PUBLIC_ADMIN_TOKEN: process.env.NEXT_PUBLIC_ADMIN_TOKEN,
|
||||
};
|
||||
delete process.env.ADMIN_TOKEN;
|
||||
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
errorSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (originalEnv.ADMIN_TOKEN === undefined) delete process.env.ADMIN_TOKEN;
|
||||
else process.env.ADMIN_TOKEN = originalEnv.ADMIN_TOKEN;
|
||||
if (originalEnv.NEXT_PUBLIC_ADMIN_TOKEN === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalEnv.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
errorSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("emits no warning when both are unset", () => {
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("emits no warning when both are set (matched pair, the happy path)", () => {
|
||||
process.env.ADMIN_TOKEN = "local-dev-admin";
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("warns when ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not", () => {
|
||||
process.env.ADMIN_TOKEN = "local-dev-admin";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).toHaveBeenCalledTimes(1);
|
||||
// Exact-string assertion — substring would also pass when the
|
||||
// function's branch logic is broken (e.g. emits both messages, or
|
||||
// emits the wrong one). Pin the exact message that operators will
|
||||
// see in their dev console so regressions are visible.
|
||||
expect(errorSpy).toHaveBeenCalledWith(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
});
|
||||
|
||||
it("warns when NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not", () => {
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).toHaveBeenCalledTimes(1);
|
||||
expect(errorSpy).toHaveBeenCalledWith(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
});
|
||||
|
||||
// Empty string in process.env is the JS-side representation of `KEY=`
|
||||
// (no value) in a .env file. Treating "" as unset makes the pair
|
||||
// invariant symmetric: `KEY=` and `unset KEY` produce the same
|
||||
// verdict. Without this branch, an operator who comments out the
|
||||
// value but leaves the line would get a false-positive warning.
|
||||
it("treats empty string as unset (so KEY= and unset KEY are equivalent)", () => {
|
||||
process.env.ADMIN_TOKEN = "";
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("warns when ADMIN_TOKEN is set and NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
|
||||
process.env.ADMIN_TOKEN = "local-dev-admin";
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).toHaveBeenCalledTimes(1);
|
||||
// First branch — server set, client unset.
|
||||
expect(errorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining("ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not"),
|
||||
);
|
||||
});
|
||||
});
|
||||
@ -1,97 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
|
||||
// Tests for platformAuthHeaders — the shared helper extracted in #178
|
||||
// to consolidate the bearer-token-attach + tenant-slug-attach pattern
|
||||
// that was previously duplicated across 7 raw-fetch callsites in the
|
||||
// canvas (uploads + 5 Attachment* components + the api.ts request()
|
||||
// function).
|
||||
//
|
||||
// What we pin here:
|
||||
// - Returns a fresh object each call (so callers can mutate without
|
||||
// leaking into each other).
|
||||
// - Empty result on a non-tenant host with no admin token (the
|
||||
// localhost / self-hosted shape).
|
||||
// - Bearer attached when NEXT_PUBLIC_ADMIN_TOKEN is set.
|
||||
// - X-Molecule-Org-Slug attached when window.location.hostname is a
|
||||
// tenant subdomain (<slug>.moleculesai.app).
|
||||
// - Both attached when both apply (the production SaaS shape).
|
||||
//
|
||||
// Why jsdom: getTenantSlug() reads window.location.hostname. Node-only
|
||||
// environment yields no window and getTenantSlug returns null
|
||||
// unconditionally — wouldn't exercise the slug branch.
|
||||
|
||||
import { platformAuthHeaders } from "../api";
|
||||
|
||||
describe("platformAuthHeaders", () => {
|
||||
let originalAdminToken: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
originalAdminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (originalAdminToken === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalAdminToken;
|
||||
// jsdom resets hostname between tests via the @vitest-environment
|
||||
// pragma's per-test isolation. No explicit reset needed.
|
||||
});
|
||||
|
||||
it("returns an empty object on a non-tenant host with no admin token", () => {
|
||||
// jsdom default hostname is "localhost" — not a tenant slug, so
|
||||
// getTenantSlug() returns null and no X-Molecule-Org-Slug is added.
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({});
|
||||
});
|
||||
|
||||
it("attaches Authorization when NEXT_PUBLIC_ADMIN_TOKEN is set", () => {
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({ Authorization: "Bearer local-dev-admin" });
|
||||
});
|
||||
|
||||
it("does NOT attach Authorization when NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
|
||||
// Empty-string env is the JS-side shape of `KEY=` in .env.
|
||||
// Treating it as unset matches the matched-pair guard in
|
||||
// next.config.ts (admin-token-pair.test.ts) — symmetric semantics.
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({});
|
||||
});
|
||||
|
||||
it("attaches X-Molecule-Org-Slug on a tenant subdomain", () => {
|
||||
Object.defineProperty(window, "location", {
|
||||
value: { hostname: "reno-stars.moleculesai.app" },
|
||||
writable: true,
|
||||
});
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({ "X-Molecule-Org-Slug": "reno-stars" });
|
||||
});
|
||||
|
||||
it("attaches both when both apply (production SaaS shape)", () => {
|
||||
Object.defineProperty(window, "location", {
|
||||
value: { hostname: "reno-stars.moleculesai.app" },
|
||||
writable: true,
|
||||
});
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tenant-bearer";
|
||||
const headers = platformAuthHeaders();
|
||||
// Pin exact-equality on the full shape — substring/contains
|
||||
// assertions would also pass for an extra-header bug.
|
||||
expect(headers).toEqual({
|
||||
"X-Molecule-Org-Slug": "reno-stars",
|
||||
Authorization: "Bearer tenant-bearer",
|
||||
});
|
||||
});
|
||||
|
||||
it("returns a fresh object each call (callers can mutate safely)", () => {
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tok";
|
||||
const a = platformAuthHeaders();
|
||||
const b = platformAuthHeaders();
|
||||
expect(a).not.toBe(b); // distinct refs
|
||||
expect(a).toEqual(b); // same content
|
||||
a["Content-Type"] = "application/json";
|
||||
// Mutation on `a` does not leak into `b`.
|
||||
expect(b["Content-Type"]).toBeUndefined();
|
||||
});
|
||||
});
|
||||
@ -21,45 +21,6 @@ export interface RequestOptions {
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the platform auth header set used by every authenticated fetch
|
||||
* from the canvas. Returns a fresh object so callers can mutate (e.g.
|
||||
* append `Content-Type` for JSON requests, omit it for FormData).
|
||||
*
|
||||
* SaaS cross-origin shape:
|
||||
* - `X-Molecule-Org-Slug` — derived from `window.location.hostname`
|
||||
* by `getTenantSlug()`. Control plane uses it for fly-replay
|
||||
* routing. Empty on localhost / non-tenant hosts — safe to omit.
|
||||
* - `Authorization: Bearer <token>` — `NEXT_PUBLIC_ADMIN_TOKEN` baked
|
||||
* into the canvas build (see canvas/Dockerfile L8/L11). Required by
|
||||
* the workspace-server when `ADMIN_TOKEN` is set on the server side
|
||||
* (Tier-2b AdminAuth gate, wsauth_middleware.go ~L245). Empty when
|
||||
* no admin token was provisioned — the Tier-1 session-cookie path
|
||||
* handles that case via `credentials:"include"`.
|
||||
*
|
||||
* Why a shared helper: the two-line "read env, attach bearer; read
|
||||
* slug, attach header" pattern was duplicated across `request()` and
|
||||
* 7 raw-fetch callsites (chat uploads/download + 5 Attachment*
|
||||
* components) before this consolidation. A new poller or raw fetch
|
||||
* that forgets one of the two headers silently 401s against
|
||||
* workspace-server when ADMIN_TOKEN is set — the exact bug shape
|
||||
* called out in #178 / closes the post-#176 self-review gap.
|
||||
*
|
||||
* Callers that want JSON Content-Type should spread this and add it
|
||||
* themselves; FormData callers should NOT add Content-Type (the
|
||||
* browser sets the multipart boundary). Centralizing the auth pair
|
||||
* but leaving Content-Type up to the caller is the minimum viable
|
||||
* shared shape.
|
||||
*/
|
||||
export function platformAuthHeaders(): Record<string, string> {
|
||||
const headers: Record<string, string> = {};
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
return headers;
|
||||
}
|
||||
|
||||
async function request<T>(
|
||||
method: string,
|
||||
path: string,
|
||||
@ -67,16 +28,17 @@ async function request<T>(
|
||||
retryCount = 0,
|
||||
options?: RequestOptions,
|
||||
): Promise<T> {
|
||||
// JSON-bodied request — Content-Type is JSON. Auth pair comes from
|
||||
// the shared helper; see its doc comment for the SaaS-shape rationale.
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
...platformAuthHeaders(),
|
||||
};
|
||||
// Re-read slug locally for the 401 handler below — `headers` already
|
||||
// has it, but the 401 branch needs the bare value to gate the
|
||||
// session-probe + redirect logic on tenant context.
|
||||
// SaaS cross-origin shape:
|
||||
// - X-Molecule-Org-Slug: derived from window.location.hostname by
|
||||
// getTenantSlug(). Control plane uses it for fly-replay routing.
|
||||
// Empty on localhost / non-tenant hosts — safe to omit.
|
||||
// - credentials:"include": sends the session cookie cross-origin.
|
||||
// Cookie's Domain=.moleculesai.app attribute + cp's CORS allow this.
|
||||
const headers: Record<string, string> = { "Content-Type": "application/json" };
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
|
||||
const res = await fetch(`${PLATFORM_URL}${path}`, {
|
||||
method,
|
||||
|
||||
@ -7,22 +7,6 @@ export default defineConfig({
|
||||
test: {
|
||||
environment: 'node',
|
||||
exclude: ['e2e/**', 'node_modules/**', '**/dist/**'],
|
||||
// Issue #22 / vitest pool investigation:
|
||||
//
|
||||
// The forks pool spawns one Node.js worker per concurrent slot.
|
||||
// Each jsdom-environment worker bootstraps a full DOM (~30-50 MB resident
|
||||
// set) at cold-start. With the default maxWorkers derived from CPU
|
||||
// count, multiple jsdom workers can start simultaneously, exhausting
|
||||
// memory on the 2-CPU Gitea Actions runner and causing pool workers to
|
||||
// fail to respond with "[vitest-pool]: Timeout starting … runner."
|
||||
//
|
||||
// Fix: cap maxWorkers at 1 so only one worker is alive at any time.
|
||||
// Tests still run in parallel within that single worker's process (via
|
||||
// node's EventLoop) — this is the same parallelism as the `threads`
|
||||
// pool but without the per-worker jsdom cold-start overhead. 51 test
|
||||
// files that previously took 5070 s with 5 failures now run
|
||||
// sequentially through one worker, eliminating the memory spike.
|
||||
maxWorkers: 1,
|
||||
// CI-conditional test timeout (issue #96).
|
||||
//
|
||||
// Vitest's 5000ms default is too tight for the first test in any
|
||||
|
||||
@ -119,7 +119,7 @@ services:
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: molecule-core-net
|
||||
name: molecule-monorepo-net
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
|
||||
@ -1,7 +1,3 @@
|
||||
# Include infra services (Temporal, Langfuse) so `docker compose up` starts the full stack.
|
||||
include:
|
||||
- docker-compose.infra.yml
|
||||
|
||||
services:
|
||||
# --- Infrastructure ---
|
||||
postgres:
|
||||
@ -16,8 +12,7 @@ services:
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- molecule-core-net
|
||||
restart: unless-stopped
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-dev}"]
|
||||
interval: 2s
|
||||
@ -44,7 +39,7 @@ services:
|
||||
psql -h postgres -U "$${POSTGRES_USER}" -d postgres -c "CREATE DATABASE langfuse"
|
||||
fi
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
@ -54,8 +49,7 @@ services:
|
||||
volumes:
|
||||
- redisdata:/data
|
||||
networks:
|
||||
- molecule-core-net
|
||||
restart: unless-stopped
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 2s
|
||||
@ -72,7 +66,7 @@ services:
|
||||
volumes:
|
||||
- clickhousedata:/var/lib/clickhouse
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:8123/ping || exit 1"]
|
||||
interval: 5s
|
||||
@ -101,7 +95,7 @@ services:
|
||||
ports:
|
||||
- "3001:3000"
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/public/health || exit 1"]
|
||||
interval: 10s
|
||||
@ -132,10 +126,6 @@ services:
|
||||
REDIS_URL: redis://redis:6379
|
||||
PORT: "${PLATFORM_PORT:-8080}"
|
||||
PLATFORM_URL: "http://platform:${PLATFORM_PORT:-8080}"
|
||||
# Container network namespace is already isolated; "all interfaces"
|
||||
# inside the container = the bridge interface only. The fail-open
|
||||
# default (127.0.0.1) would block host-to-container access.
|
||||
BIND_ADDR: "${BIND_ADDR:-0.0.0.0}"
|
||||
# Default MOLECULE_ENV=development so the WorkspaceAuth / AdminAuth
|
||||
# middleware fail-open path activates when ADMIN_TOKEN is unset —
|
||||
# otherwise the canvas (which runs without a bearer in pure local
|
||||
@ -205,28 +195,12 @@ services:
|
||||
# App private key — read-only bind-mount. The host-side path is
|
||||
# gitignored per .gitignore rules (/.secrets/ + *.pem).
|
||||
- ./.secrets/github-app.pem:/secrets/github-app.pem:ro
|
||||
# Per-role persona credentials (molecule-core#242 local surface).
|
||||
# Sourced at workspace creation time by org_import.go::loadPersonaEnvFile
|
||||
# when a workspace.yaml carries `role: <name>`. The host-side dir is
|
||||
# populated by the operator-host bootstrap kit (28 dev-tree personas);
|
||||
# /etc/molecule-bootstrap/personas is the in-container path the
|
||||
# platform expects (matches the prod tenant-EC2 path so the same code
|
||||
# works in both modes).
|
||||
#
|
||||
# Read-only mount — workspace-server only reads, never writes here.
|
||||
# If the host dir is empty/missing the platform's loadPersonaEnvFile
|
||||
# silently no-ops per its existing semantics, so this mount is safe
|
||||
# even on a fresh machine that hasn't run the bootstrap kit yet.
|
||||
- ${MOLECULE_PERSONA_ROOT_HOST:-${HOME}/.molecule-ai/personas}:/etc/molecule-bootstrap/personas:ro
|
||||
ports:
|
||||
- "${PLATFORM_PUBLISH_PORT:-8080}:${PLATFORM_PORT:-8080}"
|
||||
networks:
|
||||
- molecule-core-net
|
||||
restart: unless-stopped
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
# Plain GET — `--spider` would issue HEAD, which returns 404 because
|
||||
# /health is registered as GET only.
|
||||
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@ -262,9 +236,9 @@ services:
|
||||
ports:
|
||||
- "${CANVAS_PUBLISH_PORT:-3000}:${CANVAS_PORT:-3000}"
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@ -295,7 +269,7 @@ services:
|
||||
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
|
||||
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY:-sk-molecule}
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1"]
|
||||
@ -320,7 +294,7 @@ services:
|
||||
volumes:
|
||||
- ollamadata:/root/.ollama
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "ollama list || exit 1"]
|
||||
@ -330,8 +304,8 @@ services:
|
||||
start_period: 20s
|
||||
|
||||
networks:
|
||||
molecule-core-net:
|
||||
name: molecule-core-net
|
||||
molecule-monorepo-net:
|
||||
name: molecule-monorepo-net
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
|
||||
@ -67,7 +67,7 @@ On-demand fits naturally with how agents work — an agent only needs to know ab
|
||||
|
||||
This is acceptable for MVP because:
|
||||
- All workspaces are provisioned by the same platform on trusted infrastructure
|
||||
- Docker network isolation (`molecule-core-net`) limits who can reach workspace endpoints
|
||||
- Docker network isolation (`molecule-monorepo-net`) limits who can reach workspace endpoints
|
||||
- The tool is self-hosted — the operator controls the network
|
||||
|
||||
**Known gap:** Once workspace A caches workspace B's URL, nothing stops A from calling B directly even after the hierarchy changes and A is no longer supposed to reach B. The cached URL remains valid until the container is restarted or the URL changes.
|
||||
|
||||
@ -124,7 +124,7 @@ Six runtime adapters ship production-ready on `main`: LangGraph, DeepAgents, Cla
|
||||
| Platform ↔ Redis | TCP | Ephemeral state (liveness TTL), caching, pub/sub |
|
||||
| Workspace ↔ Workspace | HTTP (A2A JSON-RPC 2.0) | Direct peer-to-peer, **platform not in data path** |
|
||||
| Workspace → Langfuse | HTTP | Automatic OpenTelemetry tracing |
|
||||
| Docker Network | `molecule-core-net` | Internal-only by default, no exposed DB/Redis ports |
|
||||
| Docker Network | `molecule-monorepo-net` | Internal-only by default, no exposed DB/Redis ports |
|
||||
|
||||
### Core Components
|
||||
|
||||
@ -465,7 +465,7 @@ Unknown tier values default to T2 for safety. Applied via `provisioner.ApplyTier
|
||||
|
||||
### Docker Networking
|
||||
|
||||
- All containers join `molecule-core-net` private network
|
||||
- All containers join `molecule-monorepo-net` private network
|
||||
- Container naming: `ws-{workspace_id[:12]}`
|
||||
- Ephemeral host port binding: `127.0.0.1:0→8000/tcp`
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ The provisioner is the platform component that deploys workspace containers and
|
||||
|
||||
## Docker Networking (Tier 1-3, Tier 4 uses host)
|
||||
|
||||
All workspace containers join the `molecule-core-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
|
||||
All workspace containers join the `molecule-monorepo-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
|
||||
|
||||
- `provisioner.ContainerName(workspaceID)` → `ws-{id[:12]}`
|
||||
- `provisioner.InternalURL(workspaceID)` → `http://ws-{id[:12]}:8000`
|
||||
@ -38,7 +38,7 @@ This URL is pre-stored in both Postgres and Redis before the agent registers. Wh
|
||||
|
||||
**Why not use Docker-internal URLs?** In local dev, the platform runs on the host (not in Docker), so it cannot resolve Docker container hostnames. The ephemeral port mapping lets the A2A proxy reach agents via localhost. In production (platform in Docker), the Docker-internal URL (`http://ws-{id}:8000`) would work directly.
|
||||
|
||||
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-core-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
|
||||
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-monorepo-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
|
||||
|
||||
For external HTTPS access (multi-host mode), Nginx on the host handles TLS termination and proxies to the container.
|
||||
|
||||
|
||||
@ -1,119 +0,0 @@
|
||||
# Canvas Architecture Audit — VERIFIED
|
||||
|
||||
> **Status:** VERIFIED — Cross-referenced against molecule-core/canvas/src/ (2026-05-09)
|
||||
> **Author:** Core-FE (draft), Core-UIUX (verification)
|
||||
> **Updated:** 2026-05-09 with architecture structure + known issues
|
||||
|
||||
## Canvas Stack (Verified)
|
||||
|
||||
| Technology | Version | Purpose |
|
||||
|-----------|--------|---------|
|
||||
| React Flow | `@xyflow/react` v12 | Node/edge rendering |
|
||||
| Framework | Next.js 14 App Router | Routing, SSR |
|
||||
| Styling | Tailwind v4 | CSS with custom properties |
|
||||
| State | Zustand | Client state management |
|
||||
|
||||
## Directory Structure (Verified)
|
||||
|
||||
```
|
||||
canvas/src/
|
||||
├── components/
|
||||
│ ├── Canvas.tsx # Viewport management, ReactFlow wrapper
|
||||
│ ├── Toolbar.tsx # Add node/edge controls
|
||||
│ ├── ContextMenu.tsx # Right-click menu
|
||||
│ ├── SidePanel.tsx # Properties panel
|
||||
│ ├── WorkspaceNode.tsx # Node rendering
|
||||
│ ├── A2AEdge.tsx # Edge rendering
|
||||
│ └── [tests]/ # Accessibility + component tests
|
||||
├── stores/
|
||||
│ └── secrets-store.ts # ⚠️ getGrouped() performance issue
|
||||
├── hooks/
|
||||
│ ├── useSocketEvent.ts
|
||||
│ ├── useTemplateDeploy.tsx
|
||||
│ └── useWorkspaceName.ts
|
||||
└── lib/
|
||||
├── api.ts
|
||||
├── auth.ts
|
||||
├── canvas-actions.ts
|
||||
├── design-tokens.ts # STATUS_CONFIG, TIER_CONFIG
|
||||
├── theme.ts
|
||||
└── theme-provider.tsx # ThemeProvider, useTheme()
|
||||
|
||||
## Known Issues
|
||||
|
||||
### 🔴 HIGH: secrets-store.ts Performance
|
||||
**File:** `canvas/src/stores/secrets-store.ts`
|
||||
**Issue:** `getGrouped()` selector creates new objects every call (Object.fromEntries + arrays). Not memoized.
|
||||
**Impact:** Causes unnecessary re-renders on frequent selector access.
|
||||
**Fix needed:** Memoize the selector or use a proper Zustand selector pattern.
|
||||
|
||||
### 🟡 MEDIUM: Pre-commit Hook Verification
|
||||
**Issue:** Pre-commit hook checks 'use client' on hook-using components but unclear if it actually fails on violations.
|
||||
**Action:** Verify the hook is enforcing the rule correctly.
|
||||
|
||||
## Verified Findings
|
||||
|
||||
### Node Rendering ✅ (with notes)
|
||||
- **Framework:** `@xyflow/react` (React Flow) — DOM-based, not SVG/Canvas
|
||||
- **Node selection:** `aria-pressed` + border ring (`border-accent/70`) + shadow
|
||||
- **Node drag:** React Flow native drag — mouse only, no keyboard alternative yet
|
||||
- **Node resize:** `NodeResizer` component visible on selected card, keyboard-inaccessible
|
||||
- **Status:** Accessible via `aria-label` on node cards — "Alpha Workspace workspace — online"
|
||||
|
||||
### Edge Wiring ✅
|
||||
- **Edge rendering:** React Flow SVG paths
|
||||
- **Edge click target:** 1.5px stroke (CSS `stroke-width: 1.5 !important` in globals.css)
|
||||
- **Edge creation:** React Flow drag-from-handle
|
||||
- **Edge anchors:** Visible on hover (`hover:!bg-blue-400`), not keyboard accessible
|
||||
- **Status:** Partial — mouse users only
|
||||
|
||||
### Canvas Controls ✅
|
||||
- **Zoom:** React Flow Controls component (verify if keyboard accessible)
|
||||
- **Pan:** Space+drag, mouse drag
|
||||
- **Minimap:** Not present (MiniMap mocked as null in tests)
|
||||
- **Status:** Basic keyboard support via viewport shortcuts
|
||||
|
||||
### Keyboard Shortcuts ⚠️ PARTIAL
|
||||
- Exists in `useKeyboardShortcuts.ts` but no `aria-describedby` on trigger buttons
|
||||
- No dedicated keyboard shortcut help dialog
|
||||
- **Gap:** Users can't discover shortcuts visually
|
||||
|
||||
### Focus Management ✅ (strong)
|
||||
- Skip link → `#canvas-main` ✅
|
||||
- `aria-label` on ReactFlow container ✅
|
||||
- Focus trap in modals via Radix ✅
|
||||
- Focus ring: `focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-950`
|
||||
|
||||
### Accessibility Tree ⚠️ PARTIAL
|
||||
- Canvas is in accessibility tree (React Flow DOM nodes)
|
||||
- Node state changes not announced to screen readers (no `aria-live` region)
|
||||
- Context menus announced via `role="menu"` ✅
|
||||
|
||||
### Context Menus ✅ (strong)
|
||||
- `role="menu"`, `role="menuitem"`, `role="separator"` ✅
|
||||
- `aria-label` with workspace name ✅
|
||||
- ArrowUp/Down navigation with wrap-around ✅
|
||||
- Escape + Tab close menu ✅
|
||||
- Auto-focus first item on open ✅
|
||||
|
||||
### Drag and Drop ⚠️ PARTIAL
|
||||
- **Mouse drag:** React Flow native
|
||||
- **Drop target:** Visual indicator (`bg-emerald-950/40 border-emerald-400/60`) ✅
|
||||
- **Keyboard alternative:** None — nodes repositioned only via mouse drag
|
||||
- **Status:** Mouse-only. Keyboard users cannot rearrange nodes.
|
||||
|
||||
---
|
||||
|
||||
## Remaining Gaps (Priority Order)
|
||||
|
||||
| Priority | Item | Files | Status |
|
||||
|----------|------|-------|--------|
|
||||
| HIGH | Screen reader announcements for canvas state changes | Canvas.tsx | Not started |
|
||||
| MEDIUM | Keyboard shortcut help dialog | useKeyboardShortcuts.ts | Not started |
|
||||
| MEDIUM | Keyboard-accessible node drag | WorkspaceNode.tsx, useDragHandlers.ts | Not started |
|
||||
| LOW | Edge anchor keyboard accessibility | A2AEdge.tsx | Not started |
|
||||
| LOW | Node resize keyboard accessibility | WorkspaceNode.tsx (NodeResizer) | Not started |
|
||||
|
||||
---
|
||||
|
||||
*Verified 2026-05-09 by Core-UIUX against molecule-core/canvas/src/*
|
||||
@ -1,424 +0,0 @@
|
||||
# Canvas Design System v1 — VERIFIED
|
||||
|
||||
> **Status:** VERIFIED — Cross-referenced against molecule-core/canvas/src/ (2026-05-09)
|
||||
> **Authors:** Core-FE (draft), Core-UIUX (verification + updates)
|
||||
> **Source files verified:**
|
||||
> - `canvas/src/app/globals.css`
|
||||
> - `canvas/src/styles/theme-tokens.css`
|
||||
> - `canvas/src/lib/design-tokens.ts`
|
||||
> - `canvas/src/components/Tooltip.tsx`
|
||||
> - `canvas/src/components/ContextMenu.tsx`
|
||||
> - `canvas/src/components/Canvas.tsx`
|
||||
> - `canvas/src/components/__tests__/Canvas.a11y.test.tsx`
|
||||
> - `canvas/src/components/__tests__/ContextMenu.keyboard.test.tsx`
|
||||
> - `canvas/src/components/__tests__/MissingKeysModal.a11y.test.tsx`
|
||||
> - `canvas/src/components/__tests__/ConversationTraceModal.a11y.test.tsx`
|
||||
|
||||
---
|
||||
|
||||
## 1. Color Palette — Three-Mode Theme System
|
||||
|
||||
Canvas supports **three themes**: System (follows OS), Light, Dark. Controlled via `ThemeProvider` in `theme-provider.tsx` with preference persisted in `mol_theme` cookie.
|
||||
|
||||
**Key principle: Use semantic tokens, NOT raw zinc values for surfaces.**
|
||||
|
||||
### 1.1 Theme-Mutable Tokens (use these for surfaces)
|
||||
|
||||
Defined in `globals.css` via Tailwind v4 `@theme` block. Automatically flip between light/dark.
|
||||
|
||||
**Light theme (warm paper):**
|
||||
|
||||
| Token | Tailwind Class | Hex | Usage |
|
||||
|-------|--------------|-----|-------|
|
||||
| `--color-surface` | `bg-surface` | `#fafaf7` | Page background |
|
||||
| `--color-surface-elevated` | `bg-surface-elevated` | `#ffffff` | Elevated cards, modals |
|
||||
| `--color-surface-sunken` | `bg-surface-sunken` | `#f3f1ec` | Input fields, recessed areas |
|
||||
| `--color-surface-card` | `bg-surface-card` | `#efece4` | Node cards, chips |
|
||||
| `--color-line` | `border-line` | `#e6e2d8` | Dividers, borders |
|
||||
| `--color-line-soft` | `border-line-soft` | `#efece4` | Subtle dividers |
|
||||
| `--color-ink` | `text-ink` | `#15181c` | Primary text |
|
||||
| `--color-ink-mid` | `text-ink-mid` | `#5a5e66` | Secondary text |
|
||||
| `--color-ink-soft` | `text-ink-soft` | `#8b8e95` | Tertiary text, placeholders |
|
||||
| `--color-accent` | `text-accent` | `#3b5bdb` | Links, primary actions |
|
||||
| `--color-accent-strong` | `text-accent-strong` | `#1a2f99` | Emphasized accent |
|
||||
| `--color-warm` | `text-warm` | `#c0532b` | Warnings |
|
||||
| `--color-good` | `text-good` | `#2f7a4d` | Success states |
|
||||
| `--color-bad` | `text-bad` | `#b94e4a` | Error states |
|
||||
|
||||
**Dark theme:**
|
||||
|
||||
| Token | Hex | Usage |
|
||||
|-------|-----|-------|
|
||||
| `--color-surface` | `#0e1014` | Page background |
|
||||
| `--color-surface-elevated` | `#15181c` | Elevated cards |
|
||||
| `--color-surface-sunken` | `#0a0b0e` | Input fields |
|
||||
| `--color-surface-card` | `#1a1d23` | Node cards |
|
||||
| `--color-line` | `#2a2f3a` | Dividers |
|
||||
| `--color-ink` | `#f4f1e9` | Primary text |
|
||||
| `--color-ink-mid` | `#c8c2b4` | Secondary text |
|
||||
| `--color-ink-soft` | `#8d92a0` | Tertiary text |
|
||||
| `--color-accent` | `#6883e8` | Links (brighter for AA contrast) |
|
||||
| `--color-accent-strong` | `#8aa1ee` | Emphasized accent |
|
||||
| `--color-warm` | `#d96f48` | Warnings |
|
||||
| `--color-good` | `#4ca06e` | Success |
|
||||
| `--color-bad` | `#d27773` | Errors |
|
||||
|
||||
### 1.2 Always-Dark Tokens (terminal surfaces)
|
||||
|
||||
Terminals, console modal, log streams **stay dark** in all themes — readable green-on-black doesn't translate to light.
|
||||
|
||||
| Token | Tailwind Class | Hex | Usage |
|
||||
|-------|--------------|-----|-------|
|
||||
| `--color-bg` | `bg-bg` | `rgb(9 9 11)` / zinc-950 | Terminal background |
|
||||
| `--color-bg-elev` | `bg-bg-elev` | `rgb(24 24 27)` / zinc-900 | Elevated terminal surfaces |
|
||||
| `--color-bg-card` | `bg-bg-card` | `rgb(39 39 42)` / zinc-800 | Terminal cards |
|
||||
| `--color-line-strong` | `border-line-strong` | `rgb(63 63 70)` / zinc-700 | Strong borders |
|
||||
| `--color-ink-mute` | `text-ink-mute` | `rgb(161 161 170)` / zinc-400 | Muted text |
|
||||
| `--color-ink-dim` | `text-ink-dim` | `rgb(113 113 122)` / zinc-500 | Dim text |
|
||||
|
||||
### 1.3 Raw Zinc Usage Rules
|
||||
|
||||
**Use raw zinc for:**
|
||||
- Borders: `border-zinc-700`, `border-zinc-800`
|
||||
- Disabled states: `text-zinc-600`, `bg-zinc-800`
|
||||
- Code highlighting: `bg-zinc-900`, `text-zinc-300`
|
||||
- Terminal surfaces: `bg-zinc-950` (always-dark)
|
||||
|
||||
**NEVER use for surfaces:**
|
||||
- `bg-zinc-900` or `bg-zinc-950` as page/card backgrounds — use `bg-surface`
|
||||
- `text-zinc-50` or `text-zinc-100` as primary text — use `text-ink`
|
||||
- `bg-white`, `bg-gray-50/100` for surfaces — use semantic tokens
|
||||
|
||||
### 1.4 Accessibility Contrast
|
||||
|
||||
| Pair | Ratio | WCAG |
|
||||
|------|-------|------|
|
||||
| `text-ink` on `bg-surface` (light) | ~14.5:1 | AAA |
|
||||
| `text-ink` on `bg-surface` (dark) | ~15.8:1 | AAA |
|
||||
| `text-ink-mid` on `bg-surface` (light) | ~5.2:1 | AA |
|
||||
| `text-ink-mid` on `bg-surface` (dark) | ~5.9:1 | AA |
|
||||
| `text-accent` on `bg-surface` (light) | ~4.8:1 | AA |
|
||||
| `text-accent` on `bg-surface` (dark) | ~4.6:1 | AA |
|
||||
|
||||
---
|
||||
|
||||
## 2. Typography Scale
|
||||
|
||||
**Actual font stack** (from `globals.css`):
|
||||
```
|
||||
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", sans-serif
|
||||
```
|
||||
No custom fonts loaded — uses OS-native system stack.
|
||||
|
||||
| Size Token | Tailwind | Usage |
|
||||
|------------|----------|-------|
|
||||
| `text-[10px]` | 10px | Micro badges, tier labels |
|
||||
| `text-[11px]` | 11px | Tooltip text |
|
||||
| `text-xs` / `text-[12px]` | 12px | Badges, timestamps |
|
||||
| `text-sm` / `text-[13px]` | 13–14px | Secondary labels, node titles |
|
||||
| `text-base` / `text-[16px]` | 16px | Body text |
|
||||
| `text-lg` | 18px | Section headers |
|
||||
| `text-xl` | 20px | Modal titles |
|
||||
|
||||
**Line height:** `leading-tight` (1.25) for headings, `leading-relaxed` (1.625) for body/tooltips.
|
||||
|
||||
---
|
||||
|
||||
## 3. Animation / Motion Tokens
|
||||
|
||||
**Defined in `canvas/src/styles/theme-tokens.css`** — use these, don't hardcode ms values.
|
||||
|
||||
| Token | Value | Usage |
|
||||
|-------|-------|-------|
|
||||
| `--mol-duration-fast` | 150ms | Hover states, button feedback |
|
||||
| `--mol-duration-base` | 300ms | Standard transitions |
|
||||
| `--mol-duration-spawn` | 350ms | Node spawn animation |
|
||||
| `--mol-duration-root-complete` | 700ms | Org-deploy root glow |
|
||||
| `--mol-duration-fit-view` | 800ms | Canvas fit-viewport |
|
||||
|
||||
| Token | Value | Usage |
|
||||
|-------|-------|-------|
|
||||
| `--mol-easing-standard` | `cubic-bezier(0.2, 0, 0, 1)` | Default ease |
|
||||
| `--mol-easing-bounce-out` | `cubic-bezier(0.2, 0.8, 0.2, 1.05)` | Node spawn bounce |
|
||||
| `--mol-easing-emphasize` | `cubic-bezier(0.3, 0, 0, 1)` | Modal/drawer enter |
|
||||
|
||||
**CSS usage:**
|
||||
```css
|
||||
/* Good — reference the token */
|
||||
transition: all var(--mol-duration-fast) ease;
|
||||
|
||||
/* Bad — hardcoded value */
|
||||
transition: all 150ms ease;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Component Patterns (Verified)
|
||||
|
||||
### 4.1 Buttons
|
||||
|
||||
```tsx
|
||||
// Primary — accent background, ink text
|
||||
<button className="bg-accent hover:bg-accent/90 active:scale-95
|
||||
text-ink px-4 py-2 rounded-md text-sm font-medium
|
||||
focus-visible:ring-2 focus-visible:ring-blue-500
|
||||
focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-900
|
||||
disabled:opacity-50 disabled:cursor-not-allowed">
|
||||
Primary
|
||||
</button>
|
||||
|
||||
// Secondary — surface-card background, border-line
|
||||
<button className="bg-surface-card hover:bg-surface-elevated border border-line
|
||||
text-ink px-4 py-2 rounded-md text-sm font-medium
|
||||
focus-visible:ring-2 focus-visible:ring-blue-500
|
||||
focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-900">
|
||||
Secondary
|
||||
</button>
|
||||
|
||||
// Ghost — no background, hover surface
|
||||
<button className="hover:bg-surface-card text-ink-mid hover:text-ink
|
||||
px-4 py-2 rounded-md text-sm font-medium">
|
||||
Ghost
|
||||
</button>
|
||||
|
||||
// Danger — bad color, requires confirmation dialog
|
||||
<button className="bg-bad hover:bg-bad/90 text-white px-4 py-2
|
||||
rounded-md text-sm font-medium">
|
||||
Delete
|
||||
</button>
|
||||
```
|
||||
|
||||
**States:** default, hover, active (`scale-95`), focus (`ring-2 ring-blue-500 ring-offset-2 ring-offset-zinc-900`), disabled (`opacity-50 cursor-not-allowed`).
|
||||
|
||||
### 4.2 Inputs
|
||||
|
||||
```tsx
|
||||
// Text input — use semantic tokens for surfaces
|
||||
<input
|
||||
className="bg-surface-sunken border border-line text-ink
|
||||
placeholder:text-ink-soft px-3 py-2 rounded-md text-sm
|
||||
focus:outline-none focus:ring-2 focus:ring-blue-500
|
||||
focus:border-transparent
|
||||
disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
placeholder="Enter workspace name"
|
||||
/>
|
||||
|
||||
// Error state
|
||||
<input
|
||||
className="border-bad focus:ring-bad"
|
||||
aria-invalid="true"
|
||||
aria-describedby="error-message"
|
||||
/>
|
||||
```
|
||||
|
||||
**Label:** `text-sm font-medium text-ink mb-1`
|
||||
**Error:** `text-xs text-bad mt-1`
|
||||
|
||||
### 4.3 Cards
|
||||
|
||||
```tsx
|
||||
// Workspace node card (from WorkspaceNode.tsx)
|
||||
<div className="bg-surface-sunken/90 border border-line/80
|
||||
rounded-xl p-3.5 py-2.5
|
||||
hover:border-zinc-500/60 shadow-lg shadow-black/30
|
||||
focus-visible:ring-2 focus-visible:ring-accent/70
|
||||
focus-visible:ring-offset-1 focus-visible:ring-offset-zinc-950">
|
||||
```
|
||||
|
||||
### 4.4 Modals (Radix Dialog)
|
||||
|
||||
```tsx
|
||||
// Backdrop
|
||||
<div className="fixed inset-0 bg-black/70 backdrop-blur-sm z-50"
|
||||
aria-hidden="true" />
|
||||
|
||||
// Dialog — use surface-card + border-line
|
||||
<div className="bg-surface-card border border-line rounded-xl
|
||||
shadow-2xl p-6 max-w-md w-full mx-4">
|
||||
{/* Modal content */}
|
||||
</div>
|
||||
```
|
||||
|
||||
Note: Uses `--color-surface-sunken` for sunken areas (node cards). Cards use `bg-surface-card`.
|
||||
|
||||
**Important:** Use `@radix-ui/react-dialog` — it provides WCAG 2.1 compliance automatically (focus trap, Escape key, aria-modal, aria-labelledby).
|
||||
|
||||
### 4.5 Tooltips
|
||||
|
||||
**Verified implementation** (`canvas/src/components/Tooltip.tsx`):
|
||||
|
||||
```tsx
|
||||
// Trigger wraps children
|
||||
<span aria-describedby="tooltip-id">
|
||||
{children}
|
||||
</span>
|
||||
|
||||
// Tooltip portal (shows on hover + focus, 400ms delay)
|
||||
<div id="tooltip-id"
|
||||
role="tooltip"
|
||||
className="fixed z-[9999] max-w-[400px] max-h-[300px] overflow-y-auto
|
||||
px-3 py-2 bg-surface-card border border-line
|
||||
rounded-lg shadow-2xl shadow-black/60 pointer-events-none">
|
||||
<div className="text-[11px] text-ink whitespace-pre-wrap break-words leading-relaxed">
|
||||
{text}
|
||||
</div>
|
||||
</div>
|
||||
```
|
||||
|
||||
**WCAG 1.4.13 compliance:** Escape key dismisses tooltip without moving pointer/focus.
|
||||
|
||||
### 4.6 Theme Switching
|
||||
|
||||
Use `useTheme()` hook from `theme-provider.tsx`:
|
||||
|
||||
```tsx
|
||||
import { useTheme } from "@/lib/theme-provider";
|
||||
|
||||
function ThemeToggle() {
|
||||
const { theme, resolvedTheme, setTheme } = useTheme();
|
||||
return (
|
||||
<select
|
||||
value={theme}
|
||||
onChange={(e) => setTheme(e.target.value as ThemePreference)}
|
||||
>
|
||||
<option value="system">System</option>
|
||||
<option value="light">Light</option>
|
||||
<option value="dark">Dark</option>
|
||||
</select>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
**Theme types:**
|
||||
```ts
|
||||
type ThemePreference = "system" | "light" | "dark";
|
||||
type ResolvedTheme = "light" | "dark";
|
||||
```
|
||||
|
||||
**Cookie:** `mol_theme` with `Domain=.moleculesai.app` — persists across surfaces.
|
||||
|
||||
---
|
||||
|
||||
## 5. Accessibility Rules (WCAG 2.1 AA) — VERIFIED
|
||||
|
||||
### 5.1 Focus Management ✅ VERIFIED
|
||||
- All interactive elements have `focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-950`
|
||||
- No `outline-none` without equivalent focus ring
|
||||
- Radix Dialog traps focus automatically
|
||||
|
||||
### 5.2 Semantic HTML ✅ VERIFIED
|
||||
- Buttons use `<button>` — verified in WorkspaceNode.tsx, ContextMenu.tsx
|
||||
- Form inputs have associated `<label>` patterns
|
||||
- Radix Dialog provides role="dialog" + aria-modal
|
||||
|
||||
### 5.3 ARIA ✅ VERIFIED
|
||||
- Icon-only buttons: `aria-label` with descriptive text (not "X")
|
||||
- Example: `aria-label="Extract ${name} from team"` in WorkspaceNode.tsx
|
||||
- Live regions: `aria-live="polite"` on Toast component
|
||||
- Modals: Radix provides `role="dialog"`, `aria-modal="true"`, `aria-labelledby`
|
||||
- Error messages: `aria-invalid="true"`, `aria-describedby` linking to error text
|
||||
- Tooltips: `role="tooltip"` + `aria-describedby` on trigger
|
||||
|
||||
### 5.4 Keyboard Navigation ✅ VERIFIED
|
||||
- ContextMenu: ArrowUp/Down wraps, Enter/Space selects, Escape closes, Tab closes
|
||||
- Modals: Escape closes (Radix), focus returns to trigger
|
||||
- `prefers-reduced-motion` ✅ (verified in globals.css)
|
||||
|
||||
### 5.5 Color Independence ✅
|
||||
- Status indicators use text labels + icons, not color alone
|
||||
- `STATUS_CONFIG` has text labels: "Online", "Offline", "Failed", etc.
|
||||
|
||||
---
|
||||
|
||||
## 6. React Flow Canvas Specifics
|
||||
|
||||
Canvas uses `@xyflow/react` (React Flow).
|
||||
|
||||
### Canvas Container ✅ VERIFIED
|
||||
```tsx
|
||||
// Canvas.tsx wraps ReactFlow with:
|
||||
<ReactFlow
|
||||
aria-label="Molecule AI workspace canvas"
|
||||
// ...
|
||||
/>
|
||||
```
|
||||
|
||||
### Node Accessibility ✅ VERIFIED
|
||||
- `role="button"` on workspace node cards
|
||||
- `tabIndex={0}` for keyboard focus
|
||||
- `aria-pressed` for selection state
|
||||
- `aria-label` with workspace name + status
|
||||
|
||||
### Skip Link ✅ VERIFIED
|
||||
```tsx
|
||||
<a href="#canvas-main">Skip to canvas</a>
|
||||
<main id="canvas-main" role="main">
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Enforcement Checklist
|
||||
|
||||
### Color Token Rules
|
||||
- [x] No `bg-white` / `bg-zinc-50` for surfaces — use `bg-surface`
|
||||
- [x] No `text-zinc-50` / `text-zinc-100` for surfaces — use `text-ink`
|
||||
- [x] No `bg-zinc-900` / `bg-zinc-950` for surfaces — use `bg-surface` or `bg-surface-card`
|
||||
- [x] Raw zinc OK for: borders, disabled states, code, terminal surfaces
|
||||
|
||||
### Accessibility Rules
|
||||
- [x] All buttons have focus rings (verified in tests)
|
||||
- [x] All modals use Radix Dialog (verified)
|
||||
- [x] All tooltips use `role="tooltip"` + `aria-describedby` (verified)
|
||||
- [x] No `outline-none` without focus ring (verified)
|
||||
- [x] All inputs have visible labels (verified pattern)
|
||||
- [x] Contrast ratios at 4.5:1 minimum (verified above)
|
||||
- [x] `prefers-reduced-motion` suppresses all animations (verified in globals.css)
|
||||
- [x] Context menu has keyboard navigation (verified in ContextMenu.keyboard.test.tsx)
|
||||
- [x] Theme switching works: System/Light/Dark modes verified
|
||||
|
||||
---
|
||||
|
||||
## 8. Canvas Architecture (Verified)
|
||||
|
||||
**Stack:**
|
||||
- `@xyflow/react` v12 (React Flow) — node/edge rendering
|
||||
- Next.js 14 App Router
|
||||
- Tailwind v4 with CSS custom properties
|
||||
- Zustand for state management
|
||||
|
||||
**Directory Structure:**
|
||||
```
|
||||
canvas/src/
|
||||
├── components/ # Canvas.tsx, Toolbar.tsx, ContextMenu.tsx, SidePanel.tsx, WorkspaceNode.tsx, A2AEdge.tsx
|
||||
├── stores/ # secrets-store.ts (only store)
|
||||
├── hooks/ # useSocketEvent.ts, useTemplateDeploy.tsx, useWorkspaceName.ts
|
||||
├── lib/ # api.ts, auth.ts, canvas-actions.ts, design-tokens.ts, theme.ts, theme-provider.tsx
|
||||
└── app/ # Next.js App Router
|
||||
```
|
||||
|
||||
## 9. Known Issues (Technical Debt)
|
||||
|
||||
### Performance Issues
|
||||
- **secrets-store.ts getGrouped() selector** — Creates new objects every call (Object.fromEntries + arrays) — not memoized. Causes performance issues with frequent re-renders. Needs selector optimization.
|
||||
|
||||
### Code Quality
|
||||
- Check for `any` types in canvas/ directory
|
||||
- Verify pre-commit hook actually fails on 'use client' violations (unverified)
|
||||
- Verify all Zustand selectors avoid object creation (see getGrouped issue above)
|
||||
- Check 'use client' directive on hook-using components
|
||||
|
||||
### Testing
|
||||
- Add axe-core integration for automated accessibility testing
|
||||
- Visual regression tests — no screenshot tests exist yet (KI-006)
|
||||
- Target >80% test coverage on changed files
|
||||
|
||||
## 10. Remaining Open Items
|
||||
|
||||
### Accessibility Gaps
|
||||
1. **Screen reader announcements** — Node/edge changes not announced. Need `aria-live="polite"` region.
|
||||
2. **Keyboard shortcut help dialog** — No dedicated dialog. Shortcuts exist in `useKeyboardShortcuts.ts` but no `aria-describedby` hints on buttons.
|
||||
3. **Edge anchor accessibility** — React Flow handles purely visual. Need ARIA annotations for screen readers.
|
||||
4. **Drag-and-drop keyboard alternative** — Mouse only. Need keyboard equivalent for node rearrangement.
|
||||
|
||||
### Performance
|
||||
5. **secrets-store.ts getGrouped()** — Not memoized, creates new objects every call.
|
||||
@ -73,7 +73,7 @@ These are applied after CORS middleware on every response.
|
||||
|
||||
## 14. No Exposed Database Ports
|
||||
|
||||
Postgres and Redis must not expose host ports. They communicate exclusively over the internal Docker network (`molecule-core-net`). Use `docker compose exec` for direct access during development.
|
||||
Postgres and Redis must not expose host ports. They communicate exclusively over the internal Docker network (`molecule-monorepo-net`). Use `docker compose exec` for direct access during development.
|
||||
|
||||
## Related Docs
|
||||
|
||||
|
||||
@ -73,19 +73,19 @@ runner-wide setting, not per-job. Source: gitea/act_runner config docs
|
||||
|
||||
Flipping the global `container.network` to `bridge` would break every
|
||||
other workflow in the repo (cache server discovery,
|
||||
`molecule-core-net` peer access during integration tests, etc.) —
|
||||
`molecule-monorepo-net` peer access during integration tests, etc.) —
|
||||
unacceptable blast radius for a per-test bug.
|
||||
|
||||
## Fix shape
|
||||
|
||||
`handlers-postgres-integration.yml` no longer uses `services: postgres:`.
|
||||
It launches a sibling postgres container manually on the existing
|
||||
`molecule-core-net` bridge network with a per-run unique name:
|
||||
`molecule-monorepo-net` bridge network with a per-run unique name:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
PG_NETWORK: molecule-core-net
|
||||
PG_NETWORK: molecule-monorepo-net
|
||||
|
||||
steps:
|
||||
- name: Start sibling Postgres on bridge network
|
||||
@ -117,7 +117,7 @@ host-network runner config. Translate using this same pattern:
|
||||
1. Drop the `services:` block.
|
||||
2. Use `${{ github.run_id }}-${{ github.run_attempt }}` for unique
|
||||
container name.
|
||||
3. Launch on `molecule-core-net` (already trusted bridge in
|
||||
3. Launch on `molecule-monorepo-net` (already trusted bridge in
|
||||
`docker-compose.infra.yml`).
|
||||
4. Read back the bridge IP via `docker inspect` and export as a step env.
|
||||
5. `if: always()` cleanup step at the end.
|
||||
@ -131,7 +131,7 @@ in one place.
|
||||
- Issue #88 (closed by #92): localhost → 127.0.0.1 fix that unmasked
|
||||
this collision; the IPv6 fix is correct, port collision is the new
|
||||
layer.
|
||||
- Issue #94 created `molecule-core-net` + `alpine:latest` as
|
||||
- Issue #94 created `molecule-monorepo-net` + `alpine:latest` as
|
||||
prereqs.
|
||||
- Saved memory `feedback_act_runner_github_server_url` documents
|
||||
another act_runner-vs-GHA divergence (server URL).
|
||||
|
||||
@ -5,7 +5,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
echo "==> Ensuring shared docker network exists..."
|
||||
docker network create molecule-core-net 2>/dev/null || true
|
||||
docker network create molecule-monorepo-net 2>/dev/null || true
|
||||
|
||||
# Populate the template / plugin registry.
|
||||
# workspace-configs-templates/, org-templates/, and plugins/ are intentionally
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
{
|
||||
"_comment": "OSS surface registry — every repo listed here MUST be public on git.moleculesai.app. Layer-3 customer/private templates are NOT registered here; they are handled at provision-time via the per-tenant credential resolver (see internal#102 RFC). 'main' refs are pinned to tags before broad rollout.",
|
||||
"_comment": "Pin refs to release tags for reproducible builds. 'main' is OK while all repos are internal.",
|
||||
"version": 1,
|
||||
"plugins": [
|
||||
{"name": "browser-automation", "repo": "molecule-ai/molecule-ai-plugin-browser-automation", "ref": "main"},
|
||||
@ -40,6 +40,7 @@
|
||||
{"name": "free-beats-all", "repo": "molecule-ai/molecule-ai-org-template-free-beats-all", "ref": "main"},
|
||||
{"name": "medo-smoke", "repo": "molecule-ai/molecule-ai-org-template-medo-smoke", "ref": "main"},
|
||||
{"name": "molecule-worker-gemini", "repo": "molecule-ai/molecule-ai-org-template-molecule-worker-gemini", "ref": "main"},
|
||||
{"name": "reno-stars", "repo": "molecule-ai/molecule-ai-org-template-reno-stars", "ref": "main"},
|
||||
{"name": "ux-ab-lab", "repo": "molecule-ai/molecule-ai-org-template-ux-ab-lab", "ref": "main"},
|
||||
{"name": "mock-bigorg", "repo": "molecule-ai/molecule-ai-org-template-mock-bigorg", "ref": "main"}
|
||||
]
|
||||
|
||||
@ -8,24 +8,27 @@
|
||||
# Requires: git, jq (lighter than python3 — ~2MB vs ~50MB in Alpine)
|
||||
#
|
||||
# Auth (optional):
|
||||
# Post-2026-05-08 (#192): every repo in manifest.json is public on
|
||||
# git.moleculesai.app. Anonymous clone works for the entire registered
|
||||
# set. The OSS-surface contract is recorded in manifest.json's _comment
|
||||
# — Layer-3 customer/private templates (e.g. reno-stars) are NOT in the
|
||||
# manifest; they are handled at provision-time via the per-tenant
|
||||
# credential resolver (internal#102 RFC).
|
||||
# When MOLECULE_GITEA_TOKEN is set, embed it as the basic-auth password so
|
||||
# private Gitea repos clone successfully. When unset, clone anonymously
|
||||
# (works only for repos that are public on git.moleculesai.app).
|
||||
#
|
||||
# MOLECULE_GITEA_TOKEN is therefore optional today. Kept supported for
|
||||
# two reasons: (a) historical CI configs that still inject
|
||||
# AUTO_SYNC_TOKEN remain harmless, (b) reserved for the case where a
|
||||
# private internal-only template is later registered via a ci-readonly
|
||||
# team grant — review must explicitly sign off on that, since it
|
||||
# violates the public-OSS-surface contract.
|
||||
# This is the path the publish-workspace-server-image.yml workflow uses:
|
||||
# it injects AUTO_SYNC_TOKEN (devops-engineer persona PAT, repo:read on
|
||||
# the molecule-ai org) so the in-CI pre-clone step succeeds for ALL
|
||||
# manifest entries — including the 5 private workspace-template-* repos
|
||||
# (codex, crewai, deepagents, gemini-cli, langgraph) and all 7
|
||||
# org-template-* repos.
|
||||
#
|
||||
# The token (when set) never enters the Docker image: this script runs
|
||||
# in the trusted CI context BEFORE `docker buildx build`, populates
|
||||
# The token never enters the Docker image: this script runs in the
|
||||
# trusted CI context BEFORE `docker buildx build`, populates
|
||||
# .tenant-bundle-deps/, then `Dockerfile.tenant` COPYs from there with
|
||||
# the .git directories already stripped (see line ~67 below).
|
||||
#
|
||||
# For backward compatibility — and so a fresh clone works without
|
||||
# secrets when (eventually) the workspace-template-* repos flip public —
|
||||
# the unset path remains a plain anonymous HTTPS clone. That path will
|
||||
# FAIL with "could not read Username" on private repos today; CI MUST
|
||||
# set MOLECULE_GITEA_TOKEN.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@ echo "=== NUKE ==="
|
||||
docker compose -f "$ROOT/docker-compose.yml" down -v 2>/dev/null || true
|
||||
docker ps -a --format "{{.Names}}" | grep "^ws-" | xargs -r docker rm -f 2>/dev/null || true
|
||||
docker volume ls --format "{{.Name}}" | grep "^ws-" | xargs -r docker volume rm 2>/dev/null || true
|
||||
docker network rm molecule-core-net 2>/dev/null || true
|
||||
docker network rm molecule-monorepo-net 2>/dev/null || true
|
||||
echo " cleaned"
|
||||
|
||||
echo "=== POPULATE MANIFEST DIRS ==="
|
||||
|
||||
3
workspace-server/.gitignore
vendored
3
workspace-server/.gitignore
vendored
@ -1,5 +1,2 @@
|
||||
# The compiled binary, not the cmd/server package.
|
||||
/server
|
||||
|
||||
# air live-reload build cache (Dockerfile.dev + docker-compose.dev.yml).
|
||||
/tmp/
|
||||
|
||||
@ -15,14 +15,8 @@
|
||||
|
||||
FROM golang:1.25-alpine
|
||||
|
||||
# air + git (for go mod) + ca-certs (for TLS) + tzdata (for time-zone DB)
|
||||
# + docker-cli + docker-cli-buildx so the platform binary can shell out to
|
||||
# /var/run/docker.sock (bind-mounted from host) for local-build provisioning.
|
||||
# docker-cli alone is insufficient: alpine's docker-cli enables BuildKit by
|
||||
# default but ships without buildx, producing
|
||||
# `ERROR: BuildKit is enabled but the buildx component is missing or broken`
|
||||
# on every `docker build`. docker-cli-buildx provides the buildx subcommand.
|
||||
RUN apk add --no-cache git ca-certificates tzdata wget docker-cli docker-cli-buildx \
|
||||
# air + git (for go mod) + ca-certs (for TLS) + tzdata (for time-zone DB).
|
||||
RUN apk add --no-cache git ca-certificates tzdata wget \
|
||||
&& go install github.com/air-verse/air@latest
|
||||
|
||||
WORKDIR /app/workspace-server
|
||||
@ -37,7 +31,7 @@ RUN go mod download
|
||||
# block) so the Dockerfile doesn't need to COPY it. air watches the
|
||||
# bind-mounted dir for changes.
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV CGO_ENABLED=1
|
||||
ENV GOFLAGS="-buildvcs=false"
|
||||
|
||||
# Run air with the .air.toml in the bind-mounted source dir.
|
||||
|
||||
@ -26,14 +26,6 @@ func TestExtended_WorkspaceDelete(t *testing.T) {
|
||||
WithArgs(wsDelID).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
|
||||
|
||||
// CascadeDelete walks descendants unconditionally (the 0-children
|
||||
// optimization in the old inline path was dropped during the
|
||||
// CascadeDelete extraction — descendant CTE returns 0 rows here,
|
||||
// same end state, one extra cheap query).
|
||||
mock.ExpectQuery("WITH RECURSIVE descendants").
|
||||
WithArgs(wsDelID).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}))
|
||||
|
||||
// #73: batch UPDATE happens BEFORE any container teardown.
|
||||
// Uses ANY($1::uuid[]) even with a single ID for consistency.
|
||||
mock.ExpectExec("UPDATE workspaces SET status =").
|
||||
|
||||
@ -25,35 +25,6 @@ import (
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/registry"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
// insertMCPDelegationRow writes a delegation activity row so the canvas
|
||||
// Agent Comms tab can show the task text for MCP-initiated delegations.
|
||||
// Mirrors insertDelegationRow (delegation.go) for the MCP tool path.
|
||||
func insertMCPDelegationRow(ctx context.Context, db *sql.DB, workspaceID, targetID, delegationID, task string) error {
|
||||
taskJSON, _ := json.Marshal(map[string]interface{}{
|
||||
"task": task,
|
||||
"delegation_id": delegationID,
|
||||
})
|
||||
_, err := db.ExecContext(ctx, `
|
||||
INSERT INTO activity_logs (workspace_id, activity_type, method, source_id, target_id, summary, request_body, status)
|
||||
VALUES ($1, 'delegation', 'delegate', $2, $3, $4, $5::jsonb, 'pending')
|
||||
`, workspaceID, workspaceID, targetID, "Delegating to "+targetID, string(taskJSON))
|
||||
return err
|
||||
}
|
||||
|
||||
// updateMCPDelegationStatus updates a delegation activity row's status.
|
||||
// Mirrors updateDelegationStatus (delegation.go) for the MCP tool path.
|
||||
func updateMCPDelegationStatus(ctx context.Context, db *sql.DB, workspaceID, delegationID, status, errorDetail string) {
|
||||
if _, err := db.ExecContext(ctx, `
|
||||
UPDATE activity_logs
|
||||
SET status = $1, error_detail = CASE WHEN $2 = '' THEN error_detail ELSE $2 END
|
||||
WHERE workspace_id = $3
|
||||
AND method = 'delegate'
|
||||
AND request_body->>'delegation_id' = $4
|
||||
`, status, errorDetail, workspaceID, delegationID); err != nil {
|
||||
log.Printf("MCP Delegation %s: status update failed: %v", delegationID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// Tool implementations
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
@ -183,13 +154,6 @@ func (h *MCPHandler) toolDelegateTask(ctx context.Context, callerID string, args
|
||||
return "", fmt.Errorf("workspace %s is not authorised to communicate with %s", callerID, targetID)
|
||||
}
|
||||
|
||||
// Issue #158: write delegation row so canvas Agent Comms tab shows the task text.
|
||||
delegationID := uuid.New().String()
|
||||
if err := insertMCPDelegationRow(ctx, h.database, callerID, targetID, delegationID, task); err != nil {
|
||||
log.Printf("MCP delegate_task: failed to record delegation row: %v", err)
|
||||
// Non-fatal: still make the A2A call even if activity log write fails.
|
||||
}
|
||||
|
||||
agentURL, err := mcpResolveURL(ctx, h.database, targetID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -233,16 +197,10 @@ func (h *MCPHandler) toolDelegateTask(ctx context.Context, callerID string, args
|
||||
|
||||
resp, err := http.DefaultClient.Do(httpReq)
|
||||
if err != nil {
|
||||
updateMCPDelegationStatus(ctx, h.database, callerID, delegationID, "failed", err.Error())
|
||||
return "", fmt.Errorf("A2A call failed: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// A 200/500 from the peer still means the call was dispatched — only
|
||||
// network errors are truly "failed". Status 'dispatched' is correct for
|
||||
// any HTTP response (peer's A2A layer handles the actual processing).
|
||||
updateMCPDelegationStatus(ctx, h.database, callerID, delegationID, "dispatched", "")
|
||||
|
||||
body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response: %w", err)
|
||||
@ -265,16 +223,7 @@ func (h *MCPHandler) toolDelegateTaskAsync(ctx context.Context, callerID string,
|
||||
return "", fmt.Errorf("workspace %s is not authorised to communicate with %s", callerID, targetID)
|
||||
}
|
||||
|
||||
delegationID := uuid.New().String()
|
||||
|
||||
// Issue #158: write delegation row so canvas Agent Comms tab shows the task text.
|
||||
// Insert with 'dispatched' status since the goroutine won't update it.
|
||||
if err := insertMCPDelegationRow(ctx, h.database, callerID, targetID, delegationID, task); err != nil {
|
||||
log.Printf("MCP delegate_task_async: failed to record delegation row: %v", err)
|
||||
// Non-fatal: still fire the A2A call.
|
||||
} else {
|
||||
updateMCPDelegationStatus(ctx, h.database, callerID, delegationID, "dispatched", "")
|
||||
}
|
||||
taskID := uuid.New().String()
|
||||
|
||||
// Fire and forget in a detached goroutine. Use a background context so
|
||||
// the call is not cancelled when the HTTP request completes.
|
||||
@ -295,7 +244,7 @@ func (h *MCPHandler) toolDelegateTaskAsync(ctx context.Context, callerID string,
|
||||
|
||||
a2aBody, _ := json.Marshal(map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": delegationID,
|
||||
"id": taskID,
|
||||
"method": "message/send",
|
||||
"params": map[string]interface{}{
|
||||
"message": map[string]interface{}{
|
||||
@ -324,7 +273,7 @@ func (h *MCPHandler) toolDelegateTaskAsync(ctx context.Context, callerID string,
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
}()
|
||||
|
||||
return fmt.Sprintf(`{"task_id":%q,"status":"dispatched","target_id":%q}`, delegationID, targetID), nil
|
||||
return fmt.Sprintf(`{"task_id":%q,"status":"dispatched","target_id":%q}`, taskID, targetID), nil
|
||||
}
|
||||
|
||||
func (h *MCPHandler) toolCheckTaskStatus(ctx context.Context, callerID string, args map[string]interface{}) (string, error) {
|
||||
|
||||
@ -13,15 +13,12 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/channels"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/lib/pq"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@ -425,16 +422,6 @@ type OrgWorkspace struct {
|
||||
Tier int `yaml:"tier" json:"tier"`
|
||||
Template string `yaml:"template" json:"template"`
|
||||
FilesDir string `yaml:"files_dir" json:"files_dir"`
|
||||
// Spawning gates whether this workspace (AND its descendants) gets
|
||||
// provisioned during /org/import. Pointer so we can distinguish
|
||||
// "explicitly set to false" from "unset" (default = spawn). Use case:
|
||||
// the dev-tree org template declares the full team structure but a
|
||||
// developer's local machine only has RAM for a subset; setting
|
||||
// spawning: false on a leaf or a sub-tree root skips that branch
|
||||
// entirely without editing the canonical template structure.
|
||||
// Counted in countWorkspaces same as actual; subtree-skip happens
|
||||
// at provision time in createWorkspaceTree.
|
||||
Spawning *bool `yaml:"spawning,omitempty" json:"spawning,omitempty"`
|
||||
// SystemPrompt is an inline override. Normally each role's system-prompt.md
|
||||
// lives at `<files_dir>/system-prompt.md` and is copied via the files_dir
|
||||
// template-copy step; inline overrides that path for ad-hoc workspaces.
|
||||
@ -571,19 +558,6 @@ func (h *OrgHandler) Import(c *gin.Context) {
|
||||
var body struct {
|
||||
Dir string `json:"dir"` // org template directory name
|
||||
Template OrgTemplate `json:"template"` // or inline template
|
||||
// Mode controls cleanup behavior of pre-existing workspaces:
|
||||
// "" / "merge" — additive (default; current behavior).
|
||||
// Existing workspaces matched by
|
||||
// (parent_id, name) are skipped; nothing
|
||||
// outside the new tree is touched.
|
||||
// "reconcile" — additive + cleanup. After import, any
|
||||
// online workspace whose name matches an
|
||||
// imported workspace's name but whose id
|
||||
// isn't in the import result set is
|
||||
// cascade-deleted. Catches "previous
|
||||
// import survived a re-import" zombies
|
||||
// (the 20:13→21:17 dev-tree case).
|
||||
Mode string `json:"mode"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
|
||||
@ -607,16 +581,7 @@ func (h *OrgHandler) Import(c *gin.Context) {
|
||||
orgFile := filepath.Join(orgBaseDir, "org.yaml")
|
||||
data, err := os.ReadFile(orgFile)
|
||||
if err != nil {
|
||||
// Audit 2026-05-09 (Core-Security): the prior message echoed
|
||||
// the user-supplied `body.Dir` verbatim. Path traversal is
|
||||
// already blocked by resolveInsideRoot above, but echoing
|
||||
// the raw input back lets a client probe for the existence
|
||||
// of relative paths inside h.orgDir (a 404 with the input
|
||||
// vs. a 400 from resolveInsideRoot is itself a signal).
|
||||
// Drop the input from the message; log full context server-
|
||||
// side via the resolved path for operator triage.
|
||||
log.Printf("OrgImport: failed to read %s (requested dir=%q): %v", orgFile, body.Dir, err)
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "org template not found"})
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("org template not found: %s", body.Dir)})
|
||||
return
|
||||
}
|
||||
// Expand !include directives before unmarshal. Splits org.yaml
|
||||
@ -638,19 +603,6 @@ func (h *OrgHandler) Import(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Emit started AFTER the YAML is loaded so payload.name carries the
|
||||
// resolved template name (was: empty when caller passed `dir` instead
|
||||
// of inline `template`). Pre-parse error paths above return without
|
||||
// emitting — semantically "we couldn't even start an import" — so
|
||||
// every started event is guaranteed a paired completed/failed below
|
||||
// (no orphan started rows in structure_events).
|
||||
importStart := time.Now()
|
||||
emitOrgEvent(c.Request.Context(), "org.import.started", map[string]any{
|
||||
"name": tmpl.Name,
|
||||
"dir": body.Dir,
|
||||
"mode": body.Mode,
|
||||
})
|
||||
|
||||
// Required-env preflight — refuses import when any required_env is
|
||||
// missing from global_secrets. No bypass: the prior `force: true`
|
||||
// escape hatch was removed (issue #2290) because it was the silent
|
||||
@ -756,171 +708,18 @@ func (h *OrgHandler) Import(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// Reconcile mode: prune workspaces present from a previous import that
|
||||
// share a name with the new tree but are NOT in the new result set.
|
||||
// Catches the additive-import bug where re-running /org/import with a
|
||||
// changed tree shape (different parent_id for the same role name) leaves
|
||||
// the prior workspace online — visible to the canvas, consuming
|
||||
// containers, and looking like a duplicate. Default mode "" / "merge"
|
||||
// preserves the old additive behavior.
|
||||
reconcileRemovedCount := 0
|
||||
reconcileSkipped := 0
|
||||
reconcileErrs := []string{}
|
||||
if body.Mode == "reconcile" && createErr == nil {
|
||||
ctx := c.Request.Context()
|
||||
importedNames := []string{}
|
||||
walkOrgWorkspaceNames(tmpl.Workspaces, &importedNames)
|
||||
|
||||
importedIDs := make([]string, 0, len(results))
|
||||
for _, r := range results {
|
||||
if id, ok := r["id"].(string); ok && id != "" {
|
||||
importedIDs = append(importedIDs, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty-set guards: if the import didn't produce any names or any
|
||||
// IDs, skip — querying with empty arrays would either match
|
||||
// nothing (harmless) or, worse, match every workspace if a future
|
||||
// query rewrite drops the IN clause. Belt-and-suspenders.
|
||||
if len(importedNames) > 0 && len(importedIDs) > 0 {
|
||||
rows, err := db.DB.QueryContext(ctx, `
|
||||
SELECT id FROM workspaces
|
||||
WHERE name = ANY($1::text[])
|
||||
AND id != ALL($2::uuid[])
|
||||
AND status != 'removed'
|
||||
`, pq.Array(importedNames), pq.Array(importedIDs))
|
||||
if err != nil {
|
||||
log.Printf("Org import reconcile: orphan query failed: %v", err)
|
||||
reconcileErrs = append(reconcileErrs, fmt.Sprintf("orphan query: %v", err))
|
||||
} else {
|
||||
orphanIDs := []string{}
|
||||
for rows.Next() {
|
||||
var orphanID string
|
||||
if rows.Scan(&orphanID) == nil {
|
||||
orphanIDs = append(orphanIDs, orphanID)
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
for _, oid := range orphanIDs {
|
||||
descendantIDs, stopErrs, err := h.workspace.CascadeDelete(ctx, oid)
|
||||
if err != nil {
|
||||
log.Printf("Org import reconcile: CascadeDelete(%s) failed: %v", oid, err)
|
||||
reconcileErrs = append(reconcileErrs, fmt.Sprintf("delete %s: %v", oid, err))
|
||||
reconcileSkipped++
|
||||
continue
|
||||
}
|
||||
reconcileRemovedCount += 1 + len(descendantIDs)
|
||||
if len(stopErrs) > 0 {
|
||||
log.Printf("Org import reconcile: %s had %d stop errors (orphan sweeper will retry)", oid, len(stopErrs))
|
||||
}
|
||||
}
|
||||
log.Printf("Org import reconcile: %d orphans removed (%d cascade descendants), %d skipped", len(orphanIDs), reconcileRemovedCount-len(orphanIDs), reconcileSkipped)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
status := http.StatusCreated
|
||||
resp := gin.H{
|
||||
"org": tmpl.Name,
|
||||
"workspaces": results,
|
||||
"count": len(results),
|
||||
}
|
||||
if body.Mode == "reconcile" {
|
||||
resp["mode"] = "reconcile"
|
||||
resp["reconcile_removed_count"] = reconcileRemovedCount
|
||||
if len(reconcileErrs) > 0 {
|
||||
resp["reconcile_errors"] = reconcileErrs
|
||||
}
|
||||
}
|
||||
if createErr != nil {
|
||||
status = http.StatusMultiStatus
|
||||
resp["error"] = createErr.Error()
|
||||
}
|
||||
|
||||
// results contains both freshly-created AND lookupExistingChild skips
|
||||
// (entries with "skipped":true). Splitting the count here so the audit
|
||||
// row reflects "what changed" vs "what was already there" — telemetry
|
||||
// readers shouldn't need to grep stdout to tell an idempotent re-run
|
||||
// apart from a fresh-create.
|
||||
createdCount, skippedCount := 0, 0
|
||||
for _, r := range results {
|
||||
if skipped, _ := r["skipped"].(bool); skipped {
|
||||
skippedCount++
|
||||
} else {
|
||||
createdCount++
|
||||
}
|
||||
}
|
||||
log.Printf("Org import: %s — %d created, %d skipped, %d reconciled",
|
||||
tmpl.Name, createdCount, skippedCount, reconcileRemovedCount)
|
||||
emitOrgEvent(c.Request.Context(), "org.import.completed", map[string]any{
|
||||
"name": tmpl.Name,
|
||||
"dir": body.Dir,
|
||||
"mode": body.Mode,
|
||||
"created_count": createdCount,
|
||||
"skipped_count": skippedCount,
|
||||
"reconcile_removed_count": reconcileRemovedCount,
|
||||
"reconcile_errors": len(reconcileErrs),
|
||||
"duration_ms": time.Since(importStart).Milliseconds(),
|
||||
"create_error": errString(createErr),
|
||||
})
|
||||
log.Printf("Org import: %s — %d workspaces created", tmpl.Name, len(results))
|
||||
c.JSON(status, resp)
|
||||
}
|
||||
|
||||
// walkOrgWorkspaceNames collects every Name in the tree (in any order) into
|
||||
// names. Used by reconcile to detect orphan workspaces — workspaces with the
|
||||
// same role name as a freshly-imported one but a different id, surviving from
|
||||
// a prior import.
|
||||
func walkOrgWorkspaceNames(workspaces []OrgWorkspace, names *[]string) {
|
||||
for _, w := range workspaces {
|
||||
// spawning:false subtrees are still part of the imported tree
|
||||
// from a logical-tree perspective — DON'T skip the recursion,
|
||||
// or reconcile would orphan the rest of the subtree on every
|
||||
// re-import where spawning is toggled. Names of skipped
|
||||
// workspaces remain registered so reconcile won't double-create
|
||||
// them when spawning flips back to true.
|
||||
if w.Name != "" {
|
||||
*names = append(*names, w.Name)
|
||||
}
|
||||
walkOrgWorkspaceNames(w.Children, names)
|
||||
}
|
||||
}
|
||||
|
||||
// emitOrgEvent records an org-lifecycle event in structure_events so the
|
||||
// import history is queryable independent of stdout log retention. Errors
|
||||
// are logged and swallowed — never block the request path on telemetry.
|
||||
//
|
||||
// Event-type taxonomy (extend by appending; never rename):
|
||||
//
|
||||
// org.import.started — handler entered, request body parsed
|
||||
// org.import.completed — handler exiting (success or partial)
|
||||
// org.import.failed — handler exiting with an unrecoverable error
|
||||
//
|
||||
// payload fields are documented at each call site.
|
||||
func emitOrgEvent(ctx context.Context, eventType string, payload map[string]any) {
|
||||
if payload == nil {
|
||||
payload = map[string]any{}
|
||||
}
|
||||
payloadJSON, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
log.Printf("emitOrgEvent: marshal %s payload failed: %v", eventType, err)
|
||||
return
|
||||
}
|
||||
if _, err := db.DB.ExecContext(ctx, `
|
||||
INSERT INTO structure_events (event_type, payload, created_at)
|
||||
VALUES ($1, $2, now())
|
||||
`, eventType, payloadJSON); err != nil {
|
||||
log.Printf("emitOrgEvent: insert %s failed: %v", eventType, err)
|
||||
}
|
||||
}
|
||||
|
||||
// errString returns "" for a nil error, err.Error() otherwise. Lets us put
|
||||
// nullable error strings in event payloads without checking for nil at every
|
||||
// call site.
|
||||
func errString(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
|
||||
@ -42,20 +42,6 @@ import (
|
||||
// straight into the parent's child-coordinate space without doing a
|
||||
// canvas-wide absolute-position walk.
|
||||
func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX, absY, relX, relY float64, defaults OrgDefaults, orgBaseDir string, results *[]map[string]interface{}, provisionSem chan struct{}) error {
|
||||
// spawning: false guard — skip this workspace AND all descendants.
|
||||
// Pointer-typed so we distinguish "explicitly false" from "unset"
|
||||
// (unset = default to spawn). The guard sits BEFORE any side effect
|
||||
// (no DB row, no docker provision, no children recursion) so a
|
||||
// false-spawning subtree is genuinely a no-op except for the log line.
|
||||
// Use case: dev-tree org template ships the full role taxonomy but a
|
||||
// developer's machine only has RAM for a subset; a per-workspace
|
||||
// `spawning: false` lets them narrow without editing the parent
|
||||
// template's structure.
|
||||
if ws.Spawning != nil && !*ws.Spawning {
|
||||
log.Printf("Org import: skipping workspace %q (spawning=false; %d descendant workspace(s) in subtree also skipped)", ws.Name, countWorkspaces(ws.Children))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply defaults
|
||||
runtime := ws.Runtime
|
||||
if runtime == "" {
|
||||
@ -467,25 +453,8 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX
|
||||
envVars := map[string]string{}
|
||||
// 0. Persona env (lowest precedence; injects the role's Gitea identity:
|
||||
// GITEA_USER, GITEA_TOKEN, GITEA_TOKEN_SCOPES, GITEA_USER_EMAIL,
|
||||
// GITEA_SSH_KEY_PATH, plus MODEL_PROVIDER/MODEL and the LLM auth
|
||||
// token like CLAUDE_CODE_OAUTH_TOKEN or MINIMAX_API_KEY).
|
||||
// Workspace and org .env can override.
|
||||
//
|
||||
// Use ws.FilesDir as the persona-dir lookup key, NOT ws.Role. In the
|
||||
// dev-tree org.yaml shape, `role:` carries the multi-line descriptive
|
||||
// text the agent reads from its prompt ("Engineering planning and
|
||||
// team coordination — leads Core Platform, Controlplane, ..."), while
|
||||
// `files_dir:` holds the short slug (`core-lead`, `dev-lead`, etc.)
|
||||
// matching `~/.molecule-ai/personas/<files_dir>/env`
|
||||
// (bind-mounted to `/etc/molecule-bootstrap/personas/<files_dir>/env`).
|
||||
//
|
||||
// Pre-fix, this passed `ws.Role` whose multi-word content failed
|
||||
// isSafeRoleName silently, so every imported workspace booted with
|
||||
// zero persona-env rows in workspace_secrets — no ANTHROPIC /
|
||||
// CLAUDE_CODE auth in the container env. The claude_agent_sdk
|
||||
// then wedged on `query.initialize()` with a 60s control-request
|
||||
// timeout (caught 2026-05-08 right after dev-only org/import).
|
||||
loadPersonaEnvFile(ws.FilesDir, envVars)
|
||||
// GITEA_SSH_KEY_PATH). Workspace and org .env can override.
|
||||
loadPersonaEnvFile(ws.Role, envVars)
|
||||
if orgBaseDir != "" {
|
||||
// 1. Org root .env (shared defaults)
|
||||
parseEnvFile(filepath.Join(orgBaseDir, ".env"), envVars)
|
||||
|
||||
@ -1,158 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
)
|
||||
|
||||
// Tests for the reconcile-mode + audit-event additions to OrgHandler.Import.
|
||||
//
|
||||
// Background: /org/import was purely additive — re-running with a tree that
|
||||
// renamed/reparented a role left the prior workspace online (different
|
||||
// parent_id from the new one, so lookupExistingChild's parent-scoped dedupe
|
||||
// missed it). The 2026-05-08 dev-tree case left 8 orphans surviving a
|
||||
// re-import. mode="reconcile" closes the gap; emitOrgEvent makes "what
|
||||
// happened at 20:13?" queryable instead of stdout-grep archaeology.
|
||||
|
||||
func TestWalkOrgWorkspaceNames_FlatTree(t *testing.T) {
|
||||
tree := []OrgWorkspace{
|
||||
{Name: "Dev Lead"},
|
||||
{Name: "Release Manager"},
|
||||
}
|
||||
var names []string
|
||||
walkOrgWorkspaceNames(tree, &names)
|
||||
sort.Strings(names)
|
||||
want := []string{"Dev Lead", "Release Manager"}
|
||||
if !equalStrings(names, want) {
|
||||
t.Errorf("flat tree: got %v, want %v", names, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkOrgWorkspaceNames_NestedTree(t *testing.T) {
|
||||
tree := []OrgWorkspace{
|
||||
{
|
||||
Name: "Dev Lead",
|
||||
Children: []OrgWorkspace{
|
||||
{Name: "Core Platform Lead", Children: []OrgWorkspace{{Name: "Core-BE"}}},
|
||||
{Name: "SDK Lead"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var names []string
|
||||
walkOrgWorkspaceNames(tree, &names)
|
||||
sort.Strings(names)
|
||||
want := []string{"Core Platform Lead", "Core-BE", "Dev Lead", "SDK Lead"}
|
||||
if !equalStrings(names, want) {
|
||||
t.Errorf("nested tree: got %v, want %v", names, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Pins the contract that spawning:false subtrees still contribute their names
|
||||
// to the reconcile working set. If the walker started skipping them, a
|
||||
// re-import that toggled spawning would orphan whichever workspaces had been
|
||||
// previously imported with spawning:true — the inverse of the bug being
|
||||
// fixed. Spawning gates *provisioning*, not *reconcile membership*.
|
||||
func TestWalkOrgWorkspaceNames_SpawningFalseStillCounted(t *testing.T) {
|
||||
f := false
|
||||
tree := []OrgWorkspace{
|
||||
{Name: "Dev Lead", Children: []OrgWorkspace{
|
||||
{Name: "Skipped Lead", Spawning: &f, Children: []OrgWorkspace{
|
||||
{Name: "Skipped Child"},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
var names []string
|
||||
walkOrgWorkspaceNames(tree, &names)
|
||||
sort.Strings(names)
|
||||
want := []string{"Dev Lead", "Skipped Child", "Skipped Lead"}
|
||||
if !equalStrings(names, want) {
|
||||
t.Errorf("spawning:false subtree: got %v, want %v", names, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkOrgWorkspaceNames_EmptyNamesSkipped(t *testing.T) {
|
||||
tree := []OrgWorkspace{
|
||||
{Name: "Dev Lead"},
|
||||
{Name: ""}, // YAML default / placeholder
|
||||
{Name: "Release Manager"},
|
||||
}
|
||||
var names []string
|
||||
walkOrgWorkspaceNames(tree, &names)
|
||||
sort.Strings(names)
|
||||
want := []string{"Dev Lead", "Release Manager"}
|
||||
if !equalStrings(names, want) {
|
||||
t.Errorf("empty-name skip: got %v, want %v", names, want)
|
||||
}
|
||||
}
|
||||
|
||||
// emitOrgEvent must INSERT into structure_events with event_type + JSON
|
||||
// payload. Verifies the SQL shape pinning so a future schema rename
|
||||
// (e.g., switching to audit_events) breaks the test loudly instead of
|
||||
// silently dropping telemetry.
|
||||
func TestEmitOrgEvent_InsertsToStructureEvents(t *testing.T) {
|
||||
mock := setupTestDB(t)
|
||||
mock.ExpectExec(`INSERT INTO structure_events`).
|
||||
WithArgs("org.import.started", sqlmock.AnyArg()).
|
||||
WillReturnResult(sqlmock.NewResult(1, 1))
|
||||
|
||||
emitOrgEvent(context.Background(), "org.import.started", map[string]any{
|
||||
"name": "test-org",
|
||||
"mode": "reconcile",
|
||||
})
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("sqlmock expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Insert failures are log-and-swallow — telemetry MUST NOT block the
|
||||
// caller path. If this regresses (e.g., a future patch returns the err),
|
||||
// org-import requests would fail with HTTP 500 every time a structure_events
|
||||
// INSERT hiccups, which is strictly worse than losing the row.
|
||||
func TestEmitOrgEvent_DBErrorIsSwallowed(t *testing.T) {
|
||||
mock := setupTestDB(t)
|
||||
mock.ExpectExec(`INSERT INTO structure_events`).
|
||||
WithArgs("org.import.failed", sqlmock.AnyArg()).
|
||||
WillReturnError(errSentinelTest)
|
||||
|
||||
// Must not panic; must not propagate. The function returns nothing,
|
||||
// so the contract is "doesn't crash."
|
||||
emitOrgEvent(context.Background(), "org.import.failed", map[string]any{
|
||||
"err": "preflight failed",
|
||||
})
|
||||
|
||||
if err := mock.ExpectationsWereMet(); err != nil {
|
||||
t.Errorf("sqlmock expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrString(t *testing.T) {
|
||||
if got := errString(nil); got != "" {
|
||||
t.Errorf("nil error: got %q, want empty", got)
|
||||
}
|
||||
if got := errString(errSentinelTest); got != "sentinel" {
|
||||
t.Errorf("sentinel error: got %q, want \"sentinel\"", got)
|
||||
}
|
||||
}
|
||||
|
||||
// errSentinelTest is a marker error used for swallow-error assertions.
|
||||
var errSentinelTest = sentinelErrTest{}
|
||||
|
||||
type sentinelErrTest struct{}
|
||||
|
||||
func (sentinelErrTest) Error() string { return "sentinel" }
|
||||
|
||||
func equalStrings(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@ -91,14 +91,6 @@ func (h *PluginsHandler) Install(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Record the install in workspace_plugins (core#113 — version-subscription
|
||||
// foundation). Best-effort: DB write failure is logged but doesn't fail
|
||||
// the install — the plugin IS in the container; surfacing a 500 here
|
||||
// would mislead the caller about the install state.
|
||||
if err := recordWorkspacePluginInstall(ctx, workspaceID, result.PluginName, result.Source.Raw(), req.Track); err != nil {
|
||||
log.Printf("Plugin install: failed to record %s for %s in workspace_plugins: %v (install succeeded; tracking row missing)", result.PluginName, workspaceID, err)
|
||||
}
|
||||
|
||||
log.Printf("Plugin install: %s via %s → workspace %s (restarting)", result.PluginName, result.Source.Scheme, workspaceID)
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"status": "installed",
|
||||
|
||||
@ -114,15 +114,6 @@ type installRequest struct {
|
||||
// When present, resolveAndStage verifies the fetched content matches
|
||||
// before allowing the install to proceed (SAFE-T1102 supply-chain hardening).
|
||||
SHA256 string `json:"sha256,omitempty"`
|
||||
// Track is the version-subscription mode for this install (core#113):
|
||||
// "none" — no auto-update tracking (default)
|
||||
// "tag:vX.Y.Z" — track a specific version tag
|
||||
// "tag:latest" — track latest tag, drift on every new tag
|
||||
// "sha:<full>" — pinned, no drift ever
|
||||
// The drift detector (separate component, follow-up) reads
|
||||
// workspace_plugins rows where tracked_ref != 'none' and queues
|
||||
// updates when upstream resolves to a different SHA.
|
||||
Track string `json:"track,omitempty"`
|
||||
}
|
||||
|
||||
// stageResult bundles the outputs of resolveAndStage for the caller.
|
||||
|
||||
@ -1,78 +0,0 @@
|
||||
package handlers
|
||||
|
||||
// plugins_tracking.go — workspace_plugins DB tracking for the
|
||||
// version-subscription model (core#113).
|
||||
//
|
||||
// Schema lives in migration 20260508160000_workspace_plugins_tracking.up.sql.
|
||||
// This file is the Go-side write surface used at install time to record
|
||||
// each plugin's install record. Drift detection / queue / apply are
|
||||
// follow-up scope (filed as a separate issue once this lands).
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
)
|
||||
|
||||
// trackedRefValues is the closed set of bare-string values the
|
||||
// workspace_plugins.tracked_ref column accepts. Prefixed values
|
||||
// ("tag:..." / "sha:...") are validated structurally below.
|
||||
var trackedRefValues = map[string]bool{
|
||||
"none": true,
|
||||
}
|
||||
|
||||
// validateTrackedRef returns the canonical form of a track string, or
|
||||
// an error if the input is malformed. Empty input → "none" (default).
|
||||
//
|
||||
// Accepted shapes:
|
||||
//
|
||||
// "" — defaults to "none"
|
||||
// "none" — no tracking
|
||||
// "tag:vX.Y.Z" — track a specific tag
|
||||
// "tag:latest" — track latest tag, drift on every new tag
|
||||
// "sha:<full-sha>" — pinned to commit SHA
|
||||
func validateTrackedRef(s string) (string, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "" {
|
||||
return "none", nil
|
||||
}
|
||||
if trackedRefValues[s] {
|
||||
return s, nil
|
||||
}
|
||||
if strings.HasPrefix(s, "tag:") && len(s) > 4 {
|
||||
return s, nil
|
||||
}
|
||||
if strings.HasPrefix(s, "sha:") && len(s) > 4 {
|
||||
return s, nil
|
||||
}
|
||||
return "", fmt.Errorf("invalid track value %q: expected 'none' | 'tag:vX.Y.Z' | 'tag:latest' | 'sha:<full>'", s)
|
||||
}
|
||||
|
||||
// recordWorkspacePluginInstall upserts the workspace_plugins row for a
|
||||
// plugin install. ON CONFLICT (workspace_id, plugin_name) DO UPDATE so
|
||||
// reinstalling the same plugin name (with a possibly-different source or
|
||||
// track value) updates the existing row rather than failing.
|
||||
func recordWorkspacePluginInstall(
|
||||
ctx context.Context, workspaceID, pluginName, sourceRaw, track string,
|
||||
) error {
|
||||
if workspaceID == "" || pluginName == "" || sourceRaw == "" {
|
||||
return errors.New("recordWorkspacePluginInstall: missing required field")
|
||||
}
|
||||
canonicalTrack, err := validateTrackedRef(track)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = db.DB.ExecContext(ctx, `
|
||||
INSERT INTO workspace_plugins (workspace_id, plugin_name, source_raw, tracked_ref)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (workspace_id, plugin_name)
|
||||
DO UPDATE SET
|
||||
source_raw = EXCLUDED.source_raw,
|
||||
tracked_ref = EXCLUDED.tracked_ref,
|
||||
updated_at = NOW()
|
||||
`, workspaceID, pluginName, sourceRaw, canonicalTrack)
|
||||
return err
|
||||
}
|
||||
@ -1,54 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestValidateTrackedRef: pin the exact set of accepted track values
|
||||
// the install endpoint stores. Drift detector reads this column; any
|
||||
// value that slips through here without structural validation would
|
||||
// silently fail at drift-check time.
|
||||
func TestValidateTrackedRef(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
want string
|
||||
err bool
|
||||
}{
|
||||
// Defaults
|
||||
{"", "none", false},
|
||||
{" ", "none", false},
|
||||
{"none", "none", false},
|
||||
|
||||
// Tag shape
|
||||
{"tag:v1.0.0", "tag:v1.0.0", false},
|
||||
{"tag:v0.4.0-gitea.1", "tag:v0.4.0-gitea.1", false},
|
||||
{"tag:latest", "tag:latest", false},
|
||||
|
||||
// SHA shape
|
||||
{"sha:abc123", "sha:abc123", false},
|
||||
{"sha:0123456789abcdef0123456789abcdef01234567", "sha:0123456789abcdef0123456789abcdef01234567", false},
|
||||
|
||||
// Reject malformed
|
||||
{"tag:", "", true}, // empty after prefix
|
||||
{"sha:", "", true}, // empty after prefix
|
||||
{"latest", "", true}, // bare 'latest' is ambiguous (tag? branch?)
|
||||
{"main", "", true}, // bare branch name not allowed
|
||||
{"v1.0.0", "", true}, // missing tag: prefix
|
||||
{"random", "", true}, // not in allowlist
|
||||
{"tag", "", true}, // prefix without separator
|
||||
}
|
||||
for _, tc := range cases {
|
||||
got, err := validateTrackedRef(tc.in)
|
||||
if tc.err {
|
||||
if err == nil {
|
||||
t.Errorf("validateTrackedRef(%q) = (%q, nil); want error", tc.in, got)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("validateTrackedRef(%q) error: %v", tc.in, err)
|
||||
continue
|
||||
}
|
||||
if got != tc.want {
|
||||
t.Errorf("validateTrackedRef(%q) = %q; want %q", tc.in, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -134,7 +134,7 @@ func (h *TranscriptHandler) Get(c *gin.Context) {
|
||||
// - block cloud metadata endpoints (IMDS, GCP, Azure)
|
||||
// - block link-local IPs (169.254/16 IPv4, fe80::/10 IPv6)
|
||||
// - loopback is allowed — local dev runs workspaces on 127.0.0.1
|
||||
// - Docker internal hostnames (host.docker.internal, *.molecule-core-net)
|
||||
// - Docker internal hostnames (host.docker.internal, *.molecule-monorepo-net)
|
||||
// are allowed; the whole threat model assumes the platform already
|
||||
// trusts peers on that network
|
||||
func validateWorkspaceURL(u *url.URL) error {
|
||||
|
||||
@ -323,25 +323,161 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delegate the cascade to CascadeDelete so the HTTP path and the
|
||||
// OrgImport reconcile path share one teardown sequence (#73 race
|
||||
// guard, container stop, volume removal, token revocation, schedule
|
||||
// disable, broadcast). The HTTP-specific bits — direct-children 409
|
||||
// gate above, ?purge=true hard-delete below, response shaping —
|
||||
// stay in this handler.
|
||||
descendantIDs, stopErrs, err := h.CascadeDelete(ctx, id)
|
||||
if err != nil {
|
||||
// Audit 2026-05-09 (Core-Security): raw `err.Error()` here was
|
||||
// exposed to HTTP clients verbatim, including wrapped lib/pq
|
||||
// driver strings that disclose schema column names + index
|
||||
// hints. Log full error server-side; return a sanitized message
|
||||
// to the client. Operators trace via the log line below using
|
||||
// the workspace id.
|
||||
log.Printf("Delete: CascadeDelete(%s) failed: %v", id, err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error processing delete request"})
|
||||
return
|
||||
// Cascade delete: collect ALL descendants (not just direct children) via
|
||||
// recursive CTE, then stop each container and remove each volume.
|
||||
// Previous bug: only direct children's containers were stopped, leaving
|
||||
// grandchildren as orphan running containers after a cascade delete.
|
||||
descendantIDs := []string{}
|
||||
if len(children) > 0 {
|
||||
descRows, err := db.DB.QueryContext(ctx, `
|
||||
WITH RECURSIVE descendants AS (
|
||||
SELECT id FROM workspaces WHERE parent_id = $1 AND status != 'removed'
|
||||
UNION ALL
|
||||
SELECT w.id FROM workspaces w JOIN descendants d ON w.parent_id = d.id WHERE w.status != 'removed'
|
||||
)
|
||||
SELECT id FROM descendants
|
||||
`, id)
|
||||
if err != nil {
|
||||
log.Printf("Delete: descendant query error for %s: %v", id, err)
|
||||
} else {
|
||||
for descRows.Next() {
|
||||
var descID string
|
||||
if descRows.Scan(&descID) == nil {
|
||||
descendantIDs = append(descendantIDs, descID)
|
||||
}
|
||||
}
|
||||
descRows.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// #73 fix: mark rows 'removed' in the DB FIRST, BEFORE stopping containers
|
||||
// or removing volumes. Previously the sequence was stop → update-status,
|
||||
// which left a gap where:
|
||||
// - the container's last pre-teardown heartbeat could resurrect the row
|
||||
// via the register-handler UPSERT (now also guarded in #73)
|
||||
// - the liveness monitor could observe 'online' status + expired Redis
|
||||
// TTL and trigger RestartByID, recreating a container we're trying
|
||||
// to destroy
|
||||
// Marking 'removed' first makes both of those paths no-op via their
|
||||
// existing `status NOT IN ('removed', ...)` guards.
|
||||
allIDs := append([]string{id}, descendantIDs...)
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = ANY($2::uuid[])`,
|
||||
models.StatusRemoved, pq.Array(allIDs)); err != nil {
|
||||
log.Printf("Delete status update error for %s: %v", id, err)
|
||||
}
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`DELETE FROM canvas_layouts WHERE workspace_id = ANY($1::uuid[])`,
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("Delete canvas_layouts error for %s: %v", id, err)
|
||||
}
|
||||
// Revoke all auth tokens for the deleted workspaces. Once the workspace is
|
||||
// gone its tokens are meaningless; leaving them alive would keep
|
||||
// HasAnyLiveTokenGlobal = true even after the platform is otherwise empty,
|
||||
// which prevents AdminAuth from returning to fail-open and breaks the E2E
|
||||
// test's count-zero assertion (and local re-run cleanup).
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspace_auth_tokens SET revoked_at = now()
|
||||
WHERE workspace_id = ANY($1::uuid[]) AND revoked_at IS NULL`,
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("Delete token revocation error for %s: %v", id, err)
|
||||
}
|
||||
// #1027: cascade-disable all schedules for the deleted workspaces so
|
||||
// the scheduler never fires a cron into a removed container.
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspace_schedules SET enabled = false, updated_at = now()
|
||||
WHERE workspace_id = ANY($1::uuid[]) AND enabled = true`,
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("Delete schedule disable error for %s: %v", id, err)
|
||||
}
|
||||
|
||||
// Now stop containers + remove volumes for all descendants (any depth).
|
||||
// Any concurrent heartbeat / registration / liveness-triggered restart
|
||||
// will see status='removed' and bail out early.
|
||||
//
|
||||
// Combines two concerns:
|
||||
//
|
||||
// 1. Detach cleanup from the request ctx via WithoutCancel + a 30s
|
||||
// timeout, so when the canvas's `api.del` resolves on our 200
|
||||
// (and gin cancels c.Request.Context()), in-flight Docker
|
||||
// stop/remove calls don't get cancelled mid-operation. The
|
||||
// previous shape leaked containers every time the canvas hung
|
||||
// up promptly: Stop returned "context canceled", the container
|
||||
// stayed up, and the next RemoveVolume failed with
|
||||
// "volume in use". 30s is generous for Docker daemon round-
|
||||
// trips (typical: <2s) and bounds a stuck daemon.
|
||||
//
|
||||
// 2. #1843: aggregate Stop() failures into stopErrs so the
|
||||
// post-deletion block surfaces them as 500. On the CP/EC2
|
||||
// backend, Stop() calls control plane's DELETE endpoint to
|
||||
// terminate the EC2; if that errors (transient 5xx, network),
|
||||
// the EC2 stays running with no DB row to track it (the
|
||||
// "orphan EC2 on a 0-customer account" scenario). Loud-fail
|
||||
// instead of silent-leak — clients retry, Stop's instance_id
|
||||
// lookup is idempotent against status='removed'. RemoveVolume
|
||||
// errors stay log-and-continue (local cleanup, not infra-leak).
|
||||
cleanupCtx, cleanupCancel := context.WithTimeout(
|
||||
context.WithoutCancel(ctx), 30*time.Second)
|
||||
defer cleanupCancel()
|
||||
|
||||
var stopErrs []error
|
||||
stopAndRemove := func(wsID string) {
|
||||
// Stop the workload first via the backend dispatcher (CP for
|
||||
// SaaS, Docker for self-hosted). Pre-2026-05-05 this gate was
|
||||
// `if h.provisioner == nil { return }` — early-returning on
|
||||
// every SaaS tenant left the EC2 running with no DB row to
|
||||
// track it (issue #2814; the comment below claimed "loud-fail
|
||||
// instead of silent-leak" but the early-return made it the
|
||||
// silent path on SaaS).
|
||||
//
|
||||
// Check Stop's error before any volume cleanup — the previous
|
||||
// code discarded it and immediately tried RemoveVolume, which
|
||||
// always fails with "volume in use" when Stop didn't actually
|
||||
// kill the container. The orphan sweeper
|
||||
// (registry/orphan_sweeper.go) catches what we skip here on
|
||||
// the next reconcile pass.
|
||||
if err := h.StopWorkspaceAuto(cleanupCtx, wsID); err != nil {
|
||||
log.Printf("Delete %s stop failed: %v — leaving cleanup for orphan sweeper", wsID, err)
|
||||
stopErrs = append(stopErrs, fmt.Errorf("stop %s: %w", wsID, err))
|
||||
return
|
||||
}
|
||||
// Volume cleanup is Docker-only — CP-managed workspaces have
|
||||
// no host-bind volumes to remove. Skip silently when no Docker
|
||||
// provisioner is wired (the SaaS path already terminated the
|
||||
// EC2 above; nothing left to do).
|
||||
if h.provisioner != nil {
|
||||
if err := h.provisioner.RemoveVolume(cleanupCtx, wsID); err != nil {
|
||||
log.Printf("Delete %s volume removal warning: %v", wsID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, descID := range descendantIDs {
|
||||
stopAndRemove(descID)
|
||||
db.ClearWorkspaceKeys(cleanupCtx, descID)
|
||||
// #2269: drop the per-workspace restartState entry so it
|
||||
// doesn't accumulate across the platform's lifetime. The
|
||||
// LoadOrStore that creates the entry (workspace_restart.go)
|
||||
// has no companion remove path; without this Delete, every
|
||||
// short-lived workspace leaks ~16 bytes forever.
|
||||
restartStates.Delete(descID)
|
||||
// Detach broadcaster ctx for the same reason as the cleanup
|
||||
// above — RecordAndBroadcast does an INSERT INTO
|
||||
// structure_events + Redis Publish. If the canvas hangs up,
|
||||
// a request-ctx-bound INSERT can be cancelled mid-write,
|
||||
// leaving other WS clients ignorant of the cascade. The DB
|
||||
// row is already 'removed' so it's recoverable, but the
|
||||
// inconsistency is avoidable.
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), descID, map[string]interface{}{})
|
||||
}
|
||||
|
||||
stopAndRemove(id)
|
||||
db.ClearWorkspaceKeys(cleanupCtx, id)
|
||||
restartStates.Delete(id) // #2269: same as descendants above
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), id, map[string]interface{}{
|
||||
"cascade_deleted": len(descendantIDs),
|
||||
})
|
||||
|
||||
// If any Stop call failed, surface 500 so the client retries. The DB
|
||||
// row is already 'removed' (idempotent), and Stop's instance_id
|
||||
@ -407,104 +543,6 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"status": "removed", "cascade_deleted": len(descendantIDs)})
|
||||
}
|
||||
|
||||
// CascadeDelete performs the cascade-removal sequence used by the HTTP
|
||||
// DELETE handler and by OrgImport's reconcile mode: walk descendants, mark
|
||||
// self+descendants 'removed' first (#73 race guard), stop containers / EC2s,
|
||||
// remove volumes, revoke tokens, disable schedules, broadcast events.
|
||||
//
|
||||
// Idempotent against already-removed rows (the descendant CTE and all UPDATE
|
||||
// guards skip status='removed'). Returns the descendant id list so the HTTP
|
||||
// caller can drive the optional `?purge=true` hard-delete path against the
|
||||
// same set the cascade just touched, plus any per-workspace stop errors so
|
||||
// callers can surface a retryable failure instead of a silent-leak.
|
||||
//
|
||||
// Caller is responsible for the children-confirmation gate (the HTTP handler
|
||||
// returns 409 when children exist + ?confirm=true is missing); this helper
|
||||
// always cascades.
|
||||
func (h *WorkspaceHandler) CascadeDelete(ctx context.Context, id string) ([]string, []error, error) {
|
||||
if err := validateWorkspaceID(id); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
descendantIDs := []string{}
|
||||
descRows, err := db.DB.QueryContext(ctx, `
|
||||
WITH RECURSIVE descendants AS (
|
||||
SELECT id FROM workspaces WHERE parent_id = $1 AND status != 'removed'
|
||||
UNION ALL
|
||||
SELECT w.id FROM workspaces w JOIN descendants d ON w.parent_id = d.id WHERE w.status != 'removed'
|
||||
)
|
||||
SELECT id FROM descendants
|
||||
`, id)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("descendant query: %w", err)
|
||||
}
|
||||
for descRows.Next() {
|
||||
var descID string
|
||||
if descRows.Scan(&descID) == nil {
|
||||
descendantIDs = append(descendantIDs, descID)
|
||||
}
|
||||
}
|
||||
descRows.Close()
|
||||
|
||||
allIDs := append([]string{id}, descendantIDs...)
|
||||
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = ANY($2::uuid[])`,
|
||||
models.StatusRemoved, pq.Array(allIDs)); err != nil {
|
||||
log.Printf("CascadeDelete status update for %s: %v", id, err)
|
||||
}
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`DELETE FROM canvas_layouts WHERE workspace_id = ANY($1::uuid[])`,
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("CascadeDelete canvas_layouts for %s: %v", id, err)
|
||||
}
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspace_auth_tokens SET revoked_at = now()
|
||||
WHERE workspace_id = ANY($1::uuid[]) AND revoked_at IS NULL`,
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("CascadeDelete token revocation for %s: %v", id, err)
|
||||
}
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspace_schedules SET enabled = false, updated_at = now()
|
||||
WHERE workspace_id = ANY($1::uuid[]) AND enabled = true`,
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("CascadeDelete schedule disable for %s: %v", id, err)
|
||||
}
|
||||
|
||||
cleanupCtx, cleanupCancel := context.WithTimeout(
|
||||
context.WithoutCancel(ctx), 30*time.Second)
|
||||
defer cleanupCancel()
|
||||
|
||||
var stopErrs []error
|
||||
stopAndRemove := func(wsID string) {
|
||||
if err := h.StopWorkspaceAuto(cleanupCtx, wsID); err != nil {
|
||||
log.Printf("CascadeDelete %s stop failed: %v — leaving cleanup for orphan sweeper", wsID, err)
|
||||
stopErrs = append(stopErrs, fmt.Errorf("stop %s: %w", wsID, err))
|
||||
return
|
||||
}
|
||||
if h.provisioner != nil {
|
||||
if err := h.provisioner.RemoveVolume(cleanupCtx, wsID); err != nil {
|
||||
log.Printf("CascadeDelete %s volume removal warning: %v", wsID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, descID := range descendantIDs {
|
||||
stopAndRemove(descID)
|
||||
db.ClearWorkspaceKeys(cleanupCtx, descID)
|
||||
restartStates.Delete(descID)
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), descID, map[string]interface{}{})
|
||||
}
|
||||
stopAndRemove(id)
|
||||
db.ClearWorkspaceKeys(cleanupCtx, id)
|
||||
restartStates.Delete(id)
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), id, map[string]interface{}{
|
||||
"cascade_deleted": len(descendantIDs),
|
||||
})
|
||||
|
||||
return descendantIDs, stopErrs, nil
|
||||
}
|
||||
|
||||
// validateWorkspaceID returns an error when id is not a valid UUID.
|
||||
// #687: prevents 500s from Postgres when a garbage string (e.g. ../../etc/passwd)
|
||||
// is passed as the :id path parameter.
|
||||
|
||||
@ -173,7 +173,7 @@ func (h *WorkspaceHandler) provisionWorkspaceOpts(workspaceID, templatePath stri
|
||||
log.Printf("Provisioner: failed to cache URL for %s: %v", workspaceID, cacheErr)
|
||||
}
|
||||
// Also cache the Docker-internal URL for workspace-to-workspace discovery.
|
||||
// Containers on molecule-core-net can reach each other by container name.
|
||||
// Containers on molecule-monorepo-net can reach each other by container name.
|
||||
internalURL := provisioner.InternalURL(workspaceID)
|
||||
if cacheErr := db.CacheInternalURL(ctx, workspaceID, internalURL); cacheErr != nil {
|
||||
log.Printf("Provisioner: failed to cache internal URL for %s: %v", workspaceID, cacheErr)
|
||||
@ -715,30 +715,14 @@ func deriveProviderFromModelSlug(model string) string {
|
||||
// payload.Model at boot), this is a no-op — no harm in the switch
|
||||
// being empty for those cases.
|
||||
func applyRuntimeModelEnv(envVars map[string]string, runtime, model string) {
|
||||
// Resolution order (priority high → low):
|
||||
// 1. payload.Model (caller passed the canvas-picked model id verbatim)
|
||||
// 2. envVars["MODEL"] (workspace_secret persisted by /org/import via
|
||||
// the persona env file — MODEL=MiniMax-M2.7-highspeed etc.)
|
||||
// 3. envVars["MODEL_PROVIDER"] (legacy: this secret was historically a
|
||||
// *model id* set by canvas Save+Restart's PUT /model; on the
|
||||
// post-2026-05-08 persona-env convention it's a *provider slug*
|
||||
// (e.g. "minimax") which is NOT a valid model id, so this fallback
|
||||
// only fires when MODEL is absent.)
|
||||
//
|
||||
// Pre-fix bug: this function unconditionally OVERWROTE envVars["MODEL"]
|
||||
// with the MODEL_PROVIDER slug (when payload.Model was empty), wiping
|
||||
// the operator's explicit per-persona MODEL secret on every restart.
|
||||
// Symptom: a workspace whose persona env said
|
||||
// MODEL=MiniMax-M2.7-highspeed booted fine on first /org/import (the
|
||||
// envVars map was populated direct from the env file), then on the
|
||||
// next Restart the workspace_secrets-derived MODEL got clobbered by
|
||||
// MODEL_PROVIDER="minimax" — the literal slug, not a valid model id —
|
||||
// and the workspace template's adapter routed to providers[0]
|
||||
// (anthropic-oauth) and wedged at SDK initialize. Caught 2026-05-08
|
||||
// during Phase 4 verification of template-claude-code PR #9.
|
||||
if model == "" {
|
||||
model = envVars["MODEL"]
|
||||
}
|
||||
// Fall back to the MODEL_PROVIDER workspace secret when the caller
|
||||
// didn't pass one explicitly. This is the path that "Save+Restart"
|
||||
// hits — Restart builds its payload from the workspaces row (no model
|
||||
// column there) so payload.Model is always empty, but the user's
|
||||
// canvas selection was stored as MODEL_PROVIDER via PUT /model and
|
||||
// is already loaded into envVars here. Without this fallback hermes
|
||||
// silently boots with the template default and errors "No LLM
|
||||
// provider configured" even though the user picked a valid model.
|
||||
if model == "" {
|
||||
model = envVars["MODEL_PROVIDER"]
|
||||
}
|
||||
|
||||
@ -724,68 +724,3 @@ func TestApplyRuntimeModelEnv_SetsUniversalMODELForAllRuntimes(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestApplyRuntimeModelEnv_PersonaEnvMODELSecretPreserved locks in the
|
||||
// 2026-05-08 fix that prevents the MODEL_PROVIDER-as-slug fallback from
|
||||
// silently overwriting a per-persona MODEL workspace_secret on restart.
|
||||
//
|
||||
// Pre-fix bug recurrence guard: when the persona env file (loaded into
|
||||
// workspace_secrets at /org/import time) declares both MODEL=<id> and
|
||||
// MODEL_PROVIDER=<slug>, the restart path used to overwrite envVars["MODEL"]
|
||||
// with the MODEL_PROVIDER slug because applyRuntimeModelEnv'\''s
|
||||
// payload.Model fallback consulted MODEL_PROVIDER first. Symptom: dev-tree
|
||||
// workspaces booted fine on first /org/import, then on next restart the
|
||||
// model id became literal "minimax" and the workspace template'\''s adapter
|
||||
// failed to match any registry prefix, fell through to anthropic-oauth,
|
||||
// and wedged at SDK initialize. Caught during Phase 4 verification of
|
||||
// template-claude-code PR #9.
|
||||
func TestApplyRuntimeModelEnv_PersonaEnvMODELSecretPreserved(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
envMODEL string
|
||||
envMP string
|
||||
wantMODEL string
|
||||
}{
|
||||
{
|
||||
name: "MODEL secret wins over MODEL_PROVIDER slug (persona-env shape on restart)",
|
||||
envMODEL: "MiniMax-M2.7-highspeed",
|
||||
envMP: "minimax",
|
||||
wantMODEL: "MiniMax-M2.7-highspeed",
|
||||
},
|
||||
{
|
||||
name: "MODEL secret wins even when same as MODEL_PROVIDER",
|
||||
envMODEL: "opus",
|
||||
envMP: "claude-code",
|
||||
wantMODEL: "opus",
|
||||
},
|
||||
{
|
||||
name: "MODEL absent → fall back to MODEL_PROVIDER (legacy canvas Save+Restart shape)",
|
||||
envMODEL: "",
|
||||
envMP: "MiniMax-M2.7",
|
||||
wantMODEL: "MiniMax-M2.7",
|
||||
},
|
||||
{
|
||||
name: "Both absent → no MODEL set",
|
||||
envMODEL: "",
|
||||
envMP: "",
|
||||
wantMODEL: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
envVars := map[string]string{}
|
||||
if tc.envMODEL != "" {
|
||||
envVars["MODEL"] = tc.envMODEL
|
||||
}
|
||||
if tc.envMP != "" {
|
||||
envVars["MODEL_PROVIDER"] = tc.envMP
|
||||
}
|
||||
// payload.Model is empty (the restart case)
|
||||
applyRuntimeModelEnv(envVars, "claude-code", "")
|
||||
if got := envVars["MODEL"]; got != tc.wantMODEL {
|
||||
t.Errorf("MODEL = %q, want %q (envMODEL=%q envMP=%q)",
|
||||
got, tc.wantMODEL, tc.envMODEL, tc.envMP)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -813,12 +813,6 @@ func TestWorkspaceDelete_DisablesSchedules(t *testing.T) {
|
||||
WithArgs(wsID).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
|
||||
|
||||
// CascadeDelete walks descendants unconditionally — 0-children case
|
||||
// returns 0 rows here.
|
||||
mock.ExpectQuery("WITH RECURSIVE descendants").
|
||||
WithArgs(wsID).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}))
|
||||
|
||||
// Mark workspace as removed
|
||||
mock.ExpectExec("UPDATE workspaces SET status =").
|
||||
WillReturnResult(sqlmock.NewResult(0, 1))
|
||||
@ -941,12 +935,6 @@ func TestWorkspaceDelete_ScheduleDisableOnlyTargetsDeletedWorkspace(t *testing.T
|
||||
WithArgs(wsA).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id", "name"}))
|
||||
|
||||
// CascadeDelete walks descendants unconditionally — 0-children case
|
||||
// returns 0 rows here.
|
||||
mock.ExpectQuery("WITH RECURSIVE descendants").
|
||||
WithArgs(wsA).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}))
|
||||
|
||||
// Mark only workspace A as removed
|
||||
mock.ExpectExec("UPDATE workspaces SET status =").
|
||||
WillReturnResult(sqlmock.NewResult(0, 1))
|
||||
|
||||
@ -67,7 +67,7 @@ var DefaultImage = RuntimeImage(defaultRuntime)
|
||||
|
||||
const (
|
||||
// DefaultNetwork is the Docker network workspaces join.
|
||||
DefaultNetwork = "molecule-core-net"
|
||||
DefaultNetwork = "molecule-monorepo-net"
|
||||
|
||||
// DefaultPort is the port the A2A server listens on inside the container.
|
||||
DefaultPort = "8000"
|
||||
@ -405,7 +405,7 @@ func (p *Provisioner) Start(ctx context.Context, cfg WorkspaceConfig) (string, e
|
||||
// Apply tier-based container configuration
|
||||
ApplyTierConfig(hostCfg, cfg, configMount, name)
|
||||
|
||||
// Network config — join molecule-core-net with container name as alias
|
||||
// Network config — join molecule-monorepo-net with container name as alias
|
||||
networkCfg := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
DefaultNetwork: {
|
||||
|
||||
@ -1,3 +0,0 @@
|
||||
DROP INDEX IF EXISTS workspace_plugins_tracked_not_none;
|
||||
DROP INDEX IF EXISTS workspace_plugins_workspace_name;
|
||||
DROP TABLE IF EXISTS workspace_plugins;
|
||||
@ -1,39 +0,0 @@
|
||||
-- workspace_plugins: per-workspace record of installed plugins, with the
|
||||
-- tracked-ref needed for the version-subscription model (core#113).
|
||||
--
|
||||
-- Today plugin install state is filesystem-only — `/configs/plugins/<name>/`
|
||||
-- inside the workspace container. There's no DB record of "what's installed
|
||||
-- where, from what source, pinned to what." That's fine until you want
|
||||
-- drift detection (compare upstream tag's resolved SHA vs the installed
|
||||
-- one) and that's the foundation this table provides.
|
||||
--
|
||||
-- This migration is purely additive: existing install paths keep working;
|
||||
-- they'll write to this table on next install. Workspaces with plugins
|
||||
-- already installed before this migration won't have rows until they're
|
||||
-- re-installed (acceptable — the tracking is forward-looking).
|
||||
--
|
||||
-- tracked_ref values:
|
||||
-- 'none' — no auto-update tracking (default)
|
||||
-- 'tag:vX.Y.Z' — track a specific version tag
|
||||
-- 'tag:latest' — track the latest tag (drift on every new tag)
|
||||
-- 'sha:<full>' — pinned to a specific commit SHA (no drift ever)
|
||||
--
|
||||
-- A subsequent migration adds the plugin_update_queue table once drift
|
||||
-- detection lands.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS workspace_plugins (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
workspace_id UUID NOT NULL REFERENCES workspaces(id) ON DELETE CASCADE,
|
||||
plugin_name TEXT NOT NULL,
|
||||
source_raw TEXT NOT NULL,
|
||||
tracked_ref TEXT NOT NULL DEFAULT 'none',
|
||||
installed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS workspace_plugins_workspace_name
|
||||
ON workspace_plugins(workspace_id, plugin_name);
|
||||
|
||||
-- Partial index for the drift detector: only scan rows opted into tracking.
|
||||
CREATE INDEX IF NOT EXISTS workspace_plugins_tracked_not_none
|
||||
ON workspace_plugins(tracked_ref) WHERE tracked_ref != 'none';
|
||||
@ -1,2 +0,0 @@
|
||||
DROP INDEX IF EXISTS workspaces_update_tier_canary;
|
||||
ALTER TABLE workspaces DROP COLUMN IF EXISTS update_tier;
|
||||
@ -1,26 +0,0 @@
|
||||
-- workspaces.update_tier — canary vs production filter for plugin updates
|
||||
-- (core#115). Composes with the version-subscription DB foundation
|
||||
-- (core#113, merged) and the upcoming drift+queue+apply endpoint
|
||||
-- (core#123).
|
||||
--
|
||||
-- Tiers:
|
||||
-- 'production' (default) — fan-out target; only updated AFTER canary soak
|
||||
-- 'canary' — early-adopter target; updates land here first
|
||||
--
|
||||
-- Default 'production' so existing customers (Reno-Stars + any future
|
||||
-- live tenant) are default-safe. Synthetic dogfooding workspaces opt
|
||||
-- INTO 'canary' explicitly.
|
||||
--
|
||||
-- The column is just metadata at this layer; the actual filter logic
|
||||
-- ('apply this update only to canary tier first') lives in the future
|
||||
-- POST /admin/plugin-updates/:id/apply endpoint (core#123).
|
||||
|
||||
ALTER TABLE workspaces
|
||||
ADD COLUMN IF NOT EXISTS update_tier TEXT NOT NULL DEFAULT 'production'
|
||||
CHECK (update_tier IN ('canary', 'production'));
|
||||
|
||||
-- Partial index for the apply endpoint's canary-tier scan: only
|
||||
-- index canary rows since the apply path queries them most often
|
||||
-- and the production set is the much larger default.
|
||||
CREATE INDEX IF NOT EXISTS workspaces_update_tier_canary
|
||||
ON workspaces(update_tier) WHERE update_tier = 'canary';
|
||||
@ -43,29 +43,11 @@ if [ "$(id -u)" = "0" ]; then
|
||||
ln -sfn /root/.claude/sessions /home/agent/.claude/sessions
|
||||
fi
|
||||
|
||||
# --- Per-persona git identity (closes molecule-core#155) ---
|
||||
# Without this, every team commit lands with an empty author and Gitea
|
||||
# attributes the work to the founder PAT instead of the persona that
|
||||
# actually authored it. Same fingerprint that got us suspended on GitHub
|
||||
# 2026-05-06. GITEA_USER is injected by the provisioner from the
|
||||
# workspace_secrets table; bot.moleculesai.app is the agent-only domain
|
||||
# so commits are clearly distinguishable from human authors.
|
||||
if [ -n "${GITEA_USER:-}" ]; then
|
||||
git config --global user.name "${GITEA_USER}"
|
||||
git config --global user.email "${GITEA_USER}@bot.moleculesai.app"
|
||||
fi
|
||||
|
||||
# --- GitHub credential helper setup (issue #547 / #613) ---
|
||||
# Configure git to use the molecule credential helper for github.com.
|
||||
# This runs as root so the global gitconfig is written before we drop
|
||||
# to agent. The helper fetches fresh GitHub App installation tokens
|
||||
# from the platform API, with caching and env-var fallback.
|
||||
#
|
||||
# NOTE: post-suspension (2026-05-06), github.com/Molecule-AI is gone;
|
||||
# the helper's platform endpoint also 500s (internal#187). The helper
|
||||
# block is kept for legacy boxes that still have a working token chain;
|
||||
# post-suspension provisioner injects GITEA_TOKEN directly so this
|
||||
# path's failure is non-fatal. Full removal tracked under #171.
|
||||
if [ -x /app/scripts/molecule-git-token-helper.sh ]; then
|
||||
# Set credential helper for github.com only (not all hosts).
|
||||
# The '!' prefix tells git to run the command as a shell command.
|
||||
@ -73,13 +55,11 @@ if [ "$(id -u)" = "0" ]; then
|
||||
"!/app/scripts/molecule-git-token-helper.sh"
|
||||
# Disable other credential helpers for github.com to avoid conflicts.
|
||||
git config --global "credential.https://github.com.useHttpPath" true
|
||||
fi
|
||||
# Move gitconfig to agent's home so it takes effect after gosu —
|
||||
# done unconditionally so the per-persona identity survives the drop
|
||||
# even when the github.com helper block is skipped.
|
||||
if [ -f /root/.gitconfig ]; then
|
||||
cp /root/.gitconfig /home/agent/.gitconfig
|
||||
chown agent:agent /home/agent/.gitconfig
|
||||
# Move gitconfig to agent's home so it takes effect after gosu.
|
||||
if [ -f /root/.gitconfig ]; then
|
||||
cp /root/.gitconfig /home/agent/.gitconfig
|
||||
chown agent:agent /home/agent/.gitconfig
|
||||
fi
|
||||
fi
|
||||
# Create the token cache directory for the agent user.
|
||||
mkdir -p /home/agent/.molecule-token-cache
|
||||
|
||||
@ -434,7 +434,7 @@ async def main(): # pragma: no cover
|
||||
|
||||
async def _transcript_handler(request):
|
||||
# Require workspace bearer token — the same token issued at registration
|
||||
# and stored in /configs/.auth_token. Any container on molecule-core-net
|
||||
# and stored in /configs/.auth_token. Any container on molecule-monorepo-net
|
||||
# could otherwise read the full session log. Closes #287.
|
||||
#
|
||||
# #328: fail CLOSED when the token file is unavailable. get_token()
|
||||
|
||||
@ -3,7 +3,7 @@ the workspace auth token is not yet on disk.
|
||||
|
||||
Prior behaviour (regressed in #287): `if expected:` skipped the auth
|
||||
check when `get_token()` returned None, so any container on
|
||||
`molecule-core-net` could read the full session log during the
|
||||
`molecule-monorepo-net` could read the full session log during the
|
||||
bootstrap window. The fix lifts the guard into transcript_auth.py for
|
||||
testability.
|
||||
"""
|
||||
|
||||
Loading…
Reference in New Issue
Block a user