Compare commits

..

No commits in common. "main" and "runtime-v0.0.32" have entirely different histories.

272 changed files with 3472 additions and 23118 deletions

View File

@ -1,118 +0,0 @@
#!/usr/bin/env bash
# audit-force-merge — detect a §SOP-6 force-merge after PR close, emit
# `incident.force_merge` to stdout as structured JSON.
#
# Vector's docker_logs source picks up runner stdout; the JSON gets
# shipped to Loki on molecule-canonical-obs, indexable by event_type.
# Query example:
#
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
#
# A force-merge is detected when a PR closed-with-merged=true had at
# least one of the repo's required-status-check contexts in a state
# other than "success" at the merge commit's SHA. That's exactly what
# the Gitea force_merge:true API call lets through, so it's a faithful
# detector of the override path.
#
# Triggers on `pull_request_target: closed` (loaded from base branch
# per §SOP-6 security model). No-op when merged=false.
#
# Required env (set by the workflow):
# GITEA_TOKEN, GITEA_HOST, REPO, PR_NUMBER, REQUIRED_CHECKS
#
# REQUIRED_CHECKS is a newline-separated list of status-check context
# names that branch protection requires. Declared in the workflow YAML
# rather than fetched from /branch_protections (which needs admin
# scope — sop-tier-bot has read-only). Trade dynamism for simplicity:
# when the required-check set changes, update both branch protection
# AND this env. Keeping them in sync is less complexity than granting
# the audit bot admin perms on every repo.
set -euo pipefail
: "${GITEA_TOKEN:?required}"
: "${GITEA_HOST:?required}"
: "${REPO:?required}"
: "${PR_NUMBER:?required}"
: "${REQUIRED_CHECKS:?required (newline-separated context names)}"
OWNER="${REPO%%/*}"
NAME="${REPO##*/}"
API="https://${GITEA_HOST}/api/v1"
AUTH="Authorization: token ${GITEA_TOKEN}"
# 1. Fetch the PR. If not merged, no-op.
PR=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}")
MERGED=$(echo "$PR" | jq -r '.merged // false')
if [ "$MERGED" != "true" ]; then
echo "::notice::PR #${PR_NUMBER} closed without merge — no audit emission."
exit 0
fi
MERGE_SHA=$(echo "$PR" | jq -r '.merge_commit_sha // empty')
MERGED_BY=$(echo "$PR" | jq -r '.merged_by.login // "unknown"')
TITLE=$(echo "$PR" | jq -r '.title // ""')
BASE_BRANCH=$(echo "$PR" | jq -r '.base.ref // "main"')
HEAD_SHA=$(echo "$PR" | jq -r '.head.sha // empty')
if [ -z "$MERGE_SHA" ]; then
echo "::warning::PR #${PR_NUMBER} merged=true but no merge_commit_sha — cannot evaluate force-merge."
exit 0
fi
# 2. Required status checks declared in the workflow env.
REQUIRED="$REQUIRED_CHECKS"
if [ -z "${REQUIRED//[[:space:]]/}" ]; then
echo "::notice::REQUIRED_CHECKS empty — force-merge not applicable."
exit 0
fi
# 3. Status-check state at the PR HEAD (where checks ran). The merge
# commit doesn't get its own checks; we evaluate the PR's last
# commit, which is what branch protection compared against.
STATUS=$(curl -sS -H "$AUTH" \
"${API}/repos/${OWNER}/${NAME}/commits/${HEAD_SHA}/status")
declare -A CHECK_STATE
while IFS=$'\t' read -r ctx state; do
[ -n "$ctx" ] && CHECK_STATE[$ctx]="$state"
done < <(echo "$STATUS" | jq -r '.statuses // [] | .[] | "\(.context)\t\(.status)"')
# 4. For each required check, was it green at merge? YAML block scalars
# (`|`) leave a trailing newline; skip blank/whitespace-only lines.
FAILED_CHECKS=()
while IFS= read -r req; do
trimmed="${req#"${req%%[![:space:]]*}"}" # ltrim
trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" # rtrim
[ -z "$trimmed" ] && continue
state="${CHECK_STATE[$trimmed]:-missing}"
if [ "$state" != "success" ]; then
FAILED_CHECKS+=("${trimmed}=${state}")
fi
done <<< "$REQUIRED"
if [ "${#FAILED_CHECKS[@]}" -eq 0 ]; then
echo "::notice::PR #${PR_NUMBER} merged with all required checks green — not a force-merge."
exit 0
fi
# 5. Emit structured audit event.
NOW=$(date -u +%Y-%m-%dT%H:%M:%SZ)
FAILED_JSON=$(printf '%s\n' "${FAILED_CHECKS[@]}" | jq -R . | jq -s .)
# Print as a single-line JSON so Vector's parse_json transform can pick
# it up cleanly from docker_logs.
jq -nc \
--arg event_type "incident.force_merge" \
--arg ts "$NOW" \
--arg repo "$REPO" \
--argjson pr "$PR_NUMBER" \
--arg title "$TITLE" \
--arg base "$BASE_BRANCH" \
--arg merged_by "$MERGED_BY" \
--arg merge_sha "$MERGE_SHA" \
--argjson failed_checks "$FAILED_JSON" \
'{event_type: $event_type, ts: $ts, repo: $repo, pr: $pr, title: $title,
base_branch: $base, merged_by: $merged_by, merge_sha: $merge_sha,
failed_checks: $failed_checks}'
echo "::warning::FORCE-MERGE detected on PR #${PR_NUMBER} by ${MERGED_BY}: ${#FAILED_CHECKS[@]} required check(s) not green at merge time."

View File

@ -1,149 +0,0 @@
#!/usr/bin/env bash
# sop-tier-check — verify a Gitea PR satisfies the §SOP-6 approval gate.
#
# Reads the PR's tier label, walks approving reviewers, and checks each
# approver's Gitea team membership against the tier's eligible-team set.
# Marks pass only when at least one non-author approver is in an eligible
# team.
#
# Invoked from `.gitea/workflows/sop-tier-check.yml`. The workflow sets
# the env vars below; this script does no IO outside of stdout/stderr +
# the Gitea API.
#
# Required env:
# GITEA_TOKEN — bot PAT with read:organization,read:user,
# read:issue,read:repository scopes
# GITEA_HOST — e.g. git.moleculesai.app
# REPO — owner/name (from github.repository)
# PR_NUMBER — int (from github.event.pull_request.number)
# PR_AUTHOR — login (from github.event.pull_request.user.login)
#
# Optional:
# SOP_DEBUG=1 — print per-API-call diagnostic lines (HTTP codes,
# raw response bodies). Default: off.
#
# Stale-status caveat: Gitea Actions does not always re-fire workflows
# on `labeled` / `pull_request_review:submitted` events. If the
# sop-tier-check status is stale (e.g. red after labels/approvals were
# added), push an empty commit to the PR branch to force a synchronize
# event, OR re-request reviews. Tracked: internal#46.
set -euo pipefail
debug() {
if [ "${SOP_DEBUG:-}" = "1" ]; then
echo " [debug] $*" >&2
fi
}
# Validate env
: "${GITEA_TOKEN:?GITEA_TOKEN required}"
: "${GITEA_HOST:?GITEA_HOST required}"
: "${REPO:?REPO required (owner/name)}"
: "${PR_NUMBER:?PR_NUMBER required}"
: "${PR_AUTHOR:?PR_AUTHOR required}"
OWNER="${REPO%%/*}"
NAME="${REPO##*/}"
API="https://${GITEA_HOST}/api/v1"
AUTH="Authorization: token ${GITEA_TOKEN}"
echo "::notice::tier-check start: repo=$OWNER/$NAME pr=$PR_NUMBER author=$PR_AUTHOR"
# Sanity: token resolves to a user
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""')
if [ -z "$WHOAMI" ]; then
echo "::error::GITEA_TOKEN cannot resolve a user via /api/v1/user — check the token scope and that the secret is wired correctly."
exit 1
fi
echo "::notice::token resolves to user: $WHOAMI"
# 1. Read tier label
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name')
TIER=""
for L in $LABELS; do
case "$L" in
tier:low|tier:medium|tier:high)
if [ -n "$TIER" ]; then
echo "::error::Multiple tier labels: $TIER + $L. Apply exactly one."
exit 1
fi
TIER="$L"
;;
esac
done
if [ -z "$TIER" ]; then
echo "::error::PR has no tier:low|tier:medium|tier:high label. Apply one before merge."
exit 1
fi
debug "tier=$TIER"
# 2. Tier → eligible teams
case "$TIER" in
tier:low) ELIGIBLE="engineers managers ceo" ;;
tier:medium) ELIGIBLE="managers ceo" ;;
tier:high) ELIGIBLE="ceo" ;;
esac
debug "eligible_teams=$ELIGIBLE"
# Resolve team-name → team-id once. /orgs/{org}/teams/{slug}/... endpoints
# don't exist on Gitea 1.22; we have to use /teams/{id}.
ORG_TEAMS_FILE=$(mktemp)
trap 'rm -f "$ORG_TEAMS_FILE"' EXIT
HTTP_CODE=$(curl -sS -o "$ORG_TEAMS_FILE" -w '%{http_code}' -H "$AUTH" \
"${API}/orgs/${OWNER}/teams")
debug "teams-list HTTP=$HTTP_CODE size=$(wc -c <"$ORG_TEAMS_FILE")"
if [ "${SOP_DEBUG:-}" = "1" ]; then
echo " [debug] teams-list body (first 300 chars):" >&2
head -c 300 "$ORG_TEAMS_FILE" >&2; echo >&2
fi
if [ "$HTTP_CODE" != "200" ]; then
echo "::error::GET /orgs/${OWNER}/teams returned HTTP $HTTP_CODE — token likely lacks read:org scope. Add a SOP_TIER_CHECK_TOKEN secret with read:organization scope at the org level."
exit 1
fi
declare -A TEAM_ID
for T in $ELIGIBLE; do
ID=$(jq -r --arg t "$T" '.[] | select(.name==$t) | .id' <"$ORG_TEAMS_FILE" | head -1)
if [ -z "$ID" ] || [ "$ID" = "null" ]; then
VISIBLE=$(jq -r '.[]?.name? // empty' <"$ORG_TEAMS_FILE" 2>/dev/null | tr '\n' ' ')
echo "::error::Team \"$T\" not found in org $OWNER. Teams visible: $VISIBLE"
exit 1
fi
TEAM_ID[$T]="$ID"
debug "team-id: $T$ID"
done
# 3. Read approving reviewers
REVIEWS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}/reviews")
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]')
if [ -z "$APPROVERS" ]; then
echo "::error::No approving reviews. Tier $TIER requires approval from {$ELIGIBLE} (non-author)."
exit 1
fi
debug "approvers: $(echo "$APPROVERS" | tr '\n' ' ')"
# 4. For each approver: check non-author + team membership (by id)
OK=""
for U in $APPROVERS; do
if [ "$U" = "$PR_AUTHOR" ]; then
debug "skip self-review by $U"
continue
fi
for T in $ELIGIBLE; do
ID="${TEAM_ID[$T]}"
CODE=$(curl -sS -o /dev/null -w '%{http_code}' -H "$AUTH" \
"${API}/teams/${ID}/members/${U}")
debug "probe: $U in team $T (id=$ID) → HTTP $CODE"
if [ "$CODE" = "200" ] || [ "$CODE" = "204" ]; then
echo "::notice::approver $U is in team $T (eligible for $TIER)"
OK="yes"
break
fi
done
[ -n "$OK" ] && break
done
if [ -z "$OK" ]; then
echo "::error::Tier $TIER requires approval from a non-author member of {$ELIGIBLE}. Got approvers: $APPROVERS — none of them satisfied team membership. Set SOP_DEBUG=1 to see per-probe HTTP codes."
exit 1
fi
echo "::notice::sop-tier-check passed: $TIER, approver in {$ELIGIBLE}"

View File

@ -1,58 +0,0 @@
# audit-force-merge — emit `incident.force_merge` to runner stdout when
# a PR is merged with required-status-checks not green. Vector picks
# the JSON line off docker_logs and ships to Loki on
# molecule-canonical-obs (per `reference_obs_stack_phase1`); query as:
#
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
#
# Closes the §SOP-6 audit gap (the doc says force-merges write to
# `structure_events`, but that table lives in the platform DB, not
# Gitea-side; Loki is the practical equivalent for Gitea Actions
# events). When the credential / observability stack converges later,
# this can sync into structure_events from Loki via a backfill job —
# the structured JSON shape is forward-compatible.
#
# Logic in `.gitea/scripts/audit-force-merge.sh` per the same script-
# extract pattern as sop-tier-check.
name: audit-force-merge
# pull_request_target loads from the base branch — same security model
# as sop-tier-check. Without this, an attacker could rewrite the
# workflow on a PR and skip the audit emission for their own
# force-merge. See `.gitea/workflows/sop-tier-check.yml` for the full
# rationale.
on:
pull_request_target:
types: [closed]
jobs:
audit:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
# Skip when PR is closed without merge — saves a runner.
if: github.event.pull_request.merged == true
steps:
- name: Check out base branch (for the script)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event.pull_request.base.sha }}
- name: Detect force-merge + emit audit event
env:
# Same org-level secret the sop-tier-check workflow uses.
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.pull_request.number }}
# Required-status-check contexts to evaluate at merge time.
# Newline-separated. Mirror this against branch protection
# (settings → branches → protected branch → required checks).
# Declared here rather than fetched from /branch_protections
# because that endpoint requires admin write — sop-tier-bot is
# read-only by design (least-privilege).
REQUIRED_CHECKS: |
sop-tier-check / tier-check (pull_request)
Secret scan / Scan diff for credential-shaped strings (pull_request)
run: bash .gitea/scripts/audit-force-merge.sh

View File

@ -1,191 +0,0 @@
name: Secret scan
# Hard CI gate. Refuses any PR / push whose diff additions contain a
# recognisable credential. Defense-in-depth for the #2090-class incident
# (2026-04-24): GitHub's hosted Copilot Coding Agent leaked a ghs_*
# installation token into tenant-proxy/package.json via `npm init`
# slurping the URL from a token-embedded origin remote. We can't fix
# upstream's clone hygiene, so we gate here.
#
# Same regex set as the runtime's bundled pre-commit hook
# (molecule-ai-workspace-runtime: molecule_runtime/scripts/pre-commit-checks.sh).
# Keep the two sides aligned when adding patterns.
#
# Ported from .github/workflows/secret-scan.yml so the gate actually
# fires on Gitea Actions. Differences from the GitHub version:
# - drops `merge_group` event (Gitea has no merge queue)
# - drops `workflow_call` (no cross-repo reusable invocation on Gitea)
# - SELF path updated to .gitea/workflows/secret-scan.yml
# The job name + step name are identical to the GitHub workflow so the
# status-check context (`Secret scan / Scan diff for credential-shaped
# strings (pull_request)`) matches branch protection on molecule-core/main.
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [main, staging]
jobs:
scan:
name: Scan diff for credential-shaped strings
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 2 # need previous commit to diff against on push events
# For pull_request events the diff base may be many commits behind
# HEAD and absent from the shallow clone. Fetch it explicitly.
- name: Fetch PR base SHA (pull_request events only)
if: github.event_name == 'pull_request'
run: git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }}
- name: Refuse if credential-shaped strings appear in diff additions
env:
# Plumb event-specific SHAs through env so the script doesn't
# need conditional `${{ ... }}` interpolation per event type.
# github.event.before/after only exist on push events;
# pull_request has pull_request.base.sha / pull_request.head.sha.
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
PUSH_BEFORE: ${{ github.event.before }}
PUSH_AFTER: ${{ github.event.after }}
run: |
# Pattern set covers GitHub family (the actual #2090 vector),
# Anthropic / OpenAI / Slack / AWS. Anchored on prefixes with low
# false-positive rates against agent-generated content. Mirror of
# molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh
# — keep aligned.
SECRET_PATTERNS=(
'ghp_[A-Za-z0-9]{36,}' # GitHub PAT (classic)
'ghs_[A-Za-z0-9]{36,}' # GitHub App installation token
'gho_[A-Za-z0-9]{36,}' # GitHub OAuth user-to-server
'ghu_[A-Za-z0-9]{36,}' # GitHub OAuth user
'ghr_[A-Za-z0-9]{36,}' # GitHub OAuth refresh
'github_pat_[A-Za-z0-9_]{82,}' # GitHub fine-grained PAT
'sk-ant-[A-Za-z0-9_-]{40,}' # Anthropic API key
'sk-proj-[A-Za-z0-9_-]{40,}' # OpenAI project key
'sk-svcacct-[A-Za-z0-9_-]{40,}' # OpenAI service-account key
'sk-cp-[A-Za-z0-9_-]{60,}' # MiniMax API key (F1088 vector — caught only after the fact)
'xox[baprs]-[A-Za-z0-9-]{20,}' # Slack tokens
'AKIA[0-9A-Z]{16}' # AWS access key ID
'ASIA[0-9A-Z]{16}' # AWS STS temp access key ID
)
# Determine the diff base. Each event type stores its SHAs in
# a different place — see the env block above.
case "${{ github.event_name }}" in
pull_request)
BASE="$PR_BASE_SHA"
HEAD="$PR_HEAD_SHA"
;;
*)
BASE="$PUSH_BEFORE"
HEAD="$PUSH_AFTER"
;;
esac
# On push events with shallow clones, BASE may be present in
# the event payload but absent from the local object DB
# (fetch-depth=2 doesn't always reach the previous commit
# across true merges). Try fetching it on demand. If the
# fetch fails — e.g. the SHA was force-overwritten — we fall
# through to the empty-BASE branch below, which scans the
# entire tree as if every file were new. Correct, just slow.
if [ -n "$BASE" ] && ! echo "$BASE" | grep -qE '^0+$'; then
if ! git cat-file -e "$BASE" 2>/dev/null; then
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
fi
fi
# Files added or modified in this change.
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$' || ! git cat-file -e "$BASE" 2>/dev/null; then
# New branch / no previous SHA / BASE unreachable — check the
# entire tree as added content. Slower, but correct on first
# push.
CHANGED=$(git ls-tree -r --name-only HEAD)
DIFF_RANGE=""
else
CHANGED=$(git diff --name-only --diff-filter=AM "$BASE" "$HEAD")
DIFF_RANGE="$BASE $HEAD"
fi
if [ -z "$CHANGED" ]; then
echo "No changed files to inspect."
exit 0
fi
# Self-exclude: this workflow file legitimately contains the
# pattern strings as regex literals. Without an exclude it would
# block its own merge. Both the .github/ original and this
# .gitea/ port are excluded so a sync between them stays clean.
SELF_GITHUB=".github/workflows/secret-scan.yml"
SELF_GITEA=".gitea/workflows/secret-scan.yml"
OFFENDING=""
# `while IFS= read -r` (not `for f in $CHANGED`) so filenames
# containing whitespace don't word-split silently — a path
# with a space would otherwise produce two iterations on
# tokens that aren't real filenames, breaking the
# self-exclude + diff lookup.
while IFS= read -r f; do
[ -z "$f" ] && continue
[ "$f" = "$SELF_GITHUB" ] && continue
[ "$f" = "$SELF_GITEA" ] && continue
if [ -n "$DIFF_RANGE" ]; then
ADDED=$(git diff --no-color --unified=0 "$BASE" "$HEAD" -- "$f" 2>/dev/null | grep -E '^\+[^+]' || true)
else
# No diff range (new branch first push) — scan the full file
# contents as if every line were new.
ADDED=$(cat "$f" 2>/dev/null || true)
fi
[ -z "$ADDED" ] && continue
for pattern in "${SECRET_PATTERNS[@]}"; do
if echo "$ADDED" | grep -qE "$pattern"; then
OFFENDING="${OFFENDING}${f} (matched: ${pattern})\n"
break
fi
done
done <<< "$CHANGED"
if [ -n "$OFFENDING" ]; then
echo "::error::Credential-shaped strings detected in diff additions:"
# `printf '%b' "$OFFENDING"` interprets backslash escapes
# (the literal `\n` we appended above becomes a newline)
# WITHOUT treating OFFENDING as a format string. Plain
# `printf "$OFFENDING"` is a format-string sink: a filename
# containing `%` would be interpreted as a conversion
# specifier, corrupting the error message (or printing
# `%(missing)` artifacts).
printf '%b' "$OFFENDING"
echo ""
echo "The actual matched values are NOT echoed here, deliberately —"
echo "round-tripping a leaked credential into CI logs widens the blast"
echo "radius (logs are searchable + retained)."
echo ""
echo "Recovery:"
echo " 1. Remove the secret from the file. Replace with an env var"
echo " reference (e.g. \${{ secrets.GITHUB_TOKEN }} in workflows,"
echo " process.env.X in code)."
echo " 2. If the credential was already pushed (this PR's commit"
echo " history reaches a public ref), treat it as compromised —"
echo " ROTATE it immediately, do not just remove it. The token"
echo " remains valid in git history forever and may be in any"
echo " log/cache that consumed this branch."
echo " 3. Force-push the cleaned commit (or stack a revert) and"
echo " re-run CI."
echo ""
echo "If the match is a false positive (test fixture, docs example,"
echo "or this workflow's own regex literals): use a clearly-fake"
echo "placeholder like ghs_EXAMPLE_DO_NOT_USE that doesn't satisfy"
echo "the length suffix, OR add the file path to the SELF exclude"
echo "list in this workflow with a short reason."
echo ""
echo "Mirror of the regex set lives in the runtime's bundled"
echo "pre-commit hook (molecule-ai-workspace-runtime:"
echo "molecule_runtime/scripts/pre-commit-checks.sh) — keep aligned."
exit 1
fi
echo "✓ No credential-shaped strings in this change."

View File

@ -1,81 +0,0 @@
# sop-tier-check — canonical Gitea Actions workflow for §SOP-6 enforcement.
#
# Logic lives in `.gitea/scripts/sop-tier-check.sh` (extracted 2026-05-09
# from the previous inline-bash version). The script is the single source
# of truth; this workflow file just sets env + invokes it.
#
# Copy BOTH files (`.gitea/workflows/sop-tier-check.yml` +
# `.gitea/scripts/sop-tier-check.sh`) into any repo that wants the
# §SOP-6 PR gate enforced. Pair with branch protection on the protected
# branch:
# required_status_checks: ["sop-tier-check / tier-check (pull_request)"]
# required_approving_reviews: 1
# approving_review_teams: ["ceo", "managers", "engineers"]
#
# Tier → eligible-team mapping (mirror of dev-sop §SOP-6):
# tier:low → engineers, managers, ceo
# tier:medium → managers, ceo
# tier:high → ceo
#
# Force-merge: Owners-team override remains available out-of-band via
# the Gitea merge API; force-merge writes `incident.force_merge` to
# `structure_events` per §Persistent structured logging gate (Phase 3).
#
# Set `SOP_DEBUG: '1'` in the env block to enable per-API-call diagnostic
# lines — useful when diagnosing token-scope or team-id-resolution
# issues. Default off.
name: sop-tier-check
# SECURITY: triggers MUST use `pull_request_target`, not `pull_request`.
# `pull_request_target` loads the workflow definition from the BASE
# branch (i.e. `main`), not the PR's HEAD. With `pull_request`, anyone
# with write access to a feature branch could rewrite this file in
# their PR to dump SOP_TIER_CHECK_TOKEN (org-read scope) to logs and
# exfiltrate it. Verified 2026-05-09 against Gitea 1.22.6 —
# `pull_request_target` (added in Gitea 1.21 via go-gitea/gitea#25229)
# is the documented mitigation.
#
# This workflow does NOT call `actions/checkout` of PR HEAD code, so no
# untrusted code is ever executed in the runner — we only HTTP-call the
# Gitea API. If a future change adds a checkout step, it MUST pin to
# `${{ github.event.pull_request.base.sha }}` (NOT `head.sha`) to keep
# the trust boundary.
on:
pull_request_target:
types: [opened, edited, synchronize, reopened, labeled, unlabeled]
pull_request_review:
types: [submitted, dismissed, edited]
jobs:
tier-check:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
steps:
- name: Check out base branch (for the script)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# Pin to base.sha — pull_request_target's protection only
# works if we never check out PR HEAD. Same SHA the workflow
# itself was loaded from.
ref: ${{ github.event.pull_request.base.sha }}
- name: Verify tier label + reviewer team membership
env:
# SOP_TIER_CHECK_TOKEN is the org-level secret for the
# sop-tier-bot PAT (read:organization,read:user,read:issue,
# read:repository). Stored at the org level
# (/api/v1/orgs/molecule-ai/actions/secrets) so per-repo
# configuration is unnecessary — every repo in the org
# picks it up automatically.
# Falls back to GITHUB_TOKEN with a clear error if missing.
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
# Set to '1' for diagnostic per-API-call output. Off by default
# so production logs aren't noisy.
SOP_DEBUG: '0'
run: bash .gitea/scripts/sop-tier-check.sh

View File

@ -37,7 +37,7 @@ CANONICAL_FILE = Path(".github/workflows/secret-scan.yml")
CONSUMERS: list[tuple[str, str]] = [
(
"molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh",
"https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-runtime/raw/branch/main/molecule_runtime/scripts/pre-commit-checks.sh",
"https://raw.githubusercontent.com/Molecule-AI/molecule-ai-workspace-runtime/main/molecule_runtime/scripts/pre-commit-checks.sh",
),
]

View File

@ -0,0 +1,429 @@
name: Auto-promote :latest after main image build
# Retags `ghcr.io/molecule-ai/{platform,platform-tenant}:staging-<sha>`
# → `:latest` after either the image build or E2E completes on a `main`
# push, gated on E2E Staging SaaS not being red for that SHA.
#
# Why two triggers:
#
# `publish-workspace-server-image` and `e2e-staging-saas` are both
# paths-filtered, but with DIFFERENT path sets:
#
# publish-workspace-server-image:
# workspace-server/**, canvas/**, manifest.json
#
# e2e-staging-saas (full lifecycle):
# workspace-server/internal/handlers/{registry,workspace_provision,
# a2a_proxy}.go, workspace-server/internal/middleware/**,
# workspace-server/internal/provisioner/**, tests/e2e/test_staging_full_saas.sh
#
# The E2E set is a strict SUBSET of the publish set. So:
# - canvas/** changes → publish fires, E2E does not
# - workspace-server/cmd/** changes → publish fires, E2E does not
# - workspace-server/internal/sweep/** → publish fires, E2E does not
#
# The previous version triggered ONLY on E2E completion, which meant
# non-E2E-path changes (canvas, cmd, sweep, etc.) rebuilt the image
# but never advanced `:latest`. Result: as of 2026-04-28 this workflow
# had run zero times since merge despite eight main pushes — `:latest`
# was ~7 hours / 9 PRs behind main with no human realising. See
# `molecule-core` Slack discussion 2026-04-28.
#
# Adding `publish-workspace-server-image` as a second trigger closes
# the gap: any image rebuild on main eligibly advances `:latest`.
#
# Why E2E remains a kill-switch (not the trigger):
#
# When E2E DID run for this SHA and ended red, we abort — `:latest`
# stays on the prior known-good digest. When E2E didn't run (paths
# filtered out), we proceed: pre-merge gates already validated this
# SHA on staging via auto-promote-staging requiring CI + E2E Canvas +
# E2E API + CodeQL all green. Image content for non-E2E-paths
# (canvas, cmd, sweep) is exercised by those staging gates.
#
# Why `main` only:
#
# `:latest` is what prod tenants pull. We only want SHAs that have
# reached main (via auto-promote-staging) to advance `:latest`.
# Triggering on staging would let a staging-only revert advance
# `:latest` to a SHA that never reaches main, breaking the "production
# runs what's on main" invariant.
#
# Idempotency:
#
# When a SHA touches paths that match BOTH publish and E2E, both
# workflows fire and complete. Both trigger this workflow on
# completion → two runs race. Both retag `:staging-<sha>` →
# `:latest`. crane tag is idempotent (re-tagging the same digest is a
# no-op), so the second run is harmless. concurrency group serializes
# them anyway.
on:
workflow_run:
workflows:
- 'E2E Staging SaaS (full lifecycle)'
- 'publish-workspace-server-image'
types: [completed]
branches: [main]
workflow_dispatch:
inputs:
sha:
description: 'Short sha to promote (override; defaults to upstream workflow_run head_sha)'
required: false
type: string
permissions:
contents: read
packages: write
concurrency:
# Serialize promotes per-SHA so the publish+E2E both-fired race lands
# cleanly. Different SHAs can promote in parallel.
group: auto-promote-latest-${{ github.event.workflow_run.head_sha || github.event.inputs.sha || github.sha }}
cancel-in-progress: false
env:
IMAGE_NAME: ghcr.io/molecule-ai/platform
TENANT_IMAGE_NAME: ghcr.io/molecule-ai/platform-tenant
jobs:
promote:
# Proceed if upstream succeeded OR manual dispatch. Upstream-failure
# paths are filtered here; the E2E-was-red kill-switch lives in the
# gate-check step below (covers the case where upstream is publish
# success but E2E for the same SHA failed).
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success')
runs-on: ubuntu-latest
steps:
- name: Compute short sha
id: sha
run: |
set -euo pipefail
if [ -n "${{ github.event.inputs.sha }}" ]; then
FULL="${{ github.event.inputs.sha }}"
else
FULL="${{ github.event.workflow_run.head_sha }}"
fi
echo "short=${FULL:0:7}" >> "$GITHUB_OUTPUT"
echo "full=${FULL}" >> "$GITHUB_OUTPUT"
- name: Gate — E2E Staging SaaS state for this SHA
# When upstream IS E2E success, we know it's green (filtered by
# the job-level `if` already). When upstream is publish, look up
# E2E state for the same SHA. Four buckets:
#
# - completed/success: E2E confirmed safe → proceed
# - completed/failure|cancelled|timed_out: E2E found a
# regression → ABORT (exit 1), `:latest` stays put
# - in_progress|queued|requested: E2E is RACING with publish
# for a runtime-touching SHA. publish typically completes
# ~5-10min before E2E (~10-15min). If we promote on the
# publish signal here, a later E2E failure can't roll back
# `:latest` — it'd already be wrongly advanced. So we DEFER:
# skip subsequent steps (proceed=false) and let E2E's own
# completion event re-fire this workflow, which then takes
# the upstream-is-E2E path. exit 0 so the run shows as
# success rather than a noisy fake-failure.
# - none/none: E2E was paths-filtered out for this SHA (the
# change touched canvas/cmd/sweep/etc. — paths covered by
# publish but not by E2E). pre-merge gates on staging
# already validated this SHA → proceed.
#
# Manual dispatch skips this check — operator override.
id: gate
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
SHA: ${{ steps.sha.outputs.full }}
UPSTREAM_NAME: ${{ github.event.workflow_run.name }}
EVENT_NAME: ${{ github.event_name }}
run: |
set -euo pipefail
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
echo "proceed=true" >> "$GITHUB_OUTPUT"
echo "::notice::Manual dispatch — skipping E2E gate (operator override)"
exit 0
fi
if [ "$UPSTREAM_NAME" = "E2E Staging SaaS (full lifecycle)" ]; then
echo "proceed=true" >> "$GITHUB_OUTPUT"
echo "::notice::Upstream is E2E itself (success per job-level if) — gate trivially satisfied"
exit 0
fi
# Upstream is publish-workspace-server-image. Check E2E state.
# The jq filter must defend against TWO empty cases that gh
# CLI emits indistinguishably:
# 1. gh exits non-zero (network blip, auth issue) → handled
# by the `|| echo "none/none"` fallback below.
# 2. gh exits zero but returns `[]` (no E2E run on this
# main SHA — the common case for canvas-only / cmd-only
# / sweep-only changes whose paths don't trigger E2E).
# Without `(.[0] // {})`, jq sees `null` and emits
# "null/none" — which the case statement below has no
# branch for, so it falls into *) → exit 1.
# Surfaced 2026-04-30 the first time the App-token chain
# (#2389) actually fired auto-promote-on-e2e from a publish
# upstream — every prior run was E2E-upstream which
# short-circuits before this gate.
RESULT=$(gh run list \
--repo "$REPO" \
--workflow e2e-staging-saas.yml \
--branch main \
--commit "$SHA" \
--limit 1 \
--json status,conclusion \
--jq '(.[0] // {}) | "\(.status // "none")/\(.conclusion // "none")"' \
2>/dev/null || echo "none/none")
echo "E2E Staging SaaS for ${SHA:0:7}: $RESULT"
case "$RESULT" in
completed/success)
echo "proceed=true" >> "$GITHUB_OUTPUT"
echo "::notice::E2E green for this SHA — proceeding with promote"
;;
completed/failure|completed/timed_out)
echo "proceed=false" >> "$GITHUB_OUTPUT"
{
echo "## ❌ Auto-promote aborted — E2E Staging SaaS failed"
echo
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\`"
echo "\`:latest\` stays on the prior known-good digest."
echo
echo "If the failure was a flake, manually dispatch this workflow with the same sha to override."
} >> "$GITHUB_STEP_SUMMARY"
exit 1
;;
completed/cancelled)
# cancelled ≠ failure. Per-SHA concurrency cancels older E2E
# runs when a newer push lands (memory:
# feedback_concurrency_group_per_sha) — the newer SHA will
# have its own E2E + promote chain. Treat the same as
# in_progress: defer without aborting, let the next E2E run
# promote when it lands.
#
# Caught 2026-05-05 02:03 on sha 31f9a5e — auto-promote
# blocked the whole chain because this case fell through to
# exit 1 instead of clean defer.
echo "proceed=false" >> "$GITHUB_OUTPUT"
{
echo "## ⏭ Auto-promote deferred — E2E Staging SaaS was cancelled"
echo
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\`"
echo "Likely per-SHA concurrency (newer push superseded this E2E run)."
echo "The newer SHA's E2E will fire its own promote when it lands."
echo "If you need this specific SHA promoted, manually dispatch."
} >> "$GITHUB_STEP_SUMMARY"
;;
in_progress/*|queued/*|requested/*|waiting/*|pending/*)
echo "proceed=false" >> "$GITHUB_OUTPUT"
{
echo "## ⏳ Auto-promote deferred — E2E Staging SaaS still running"
echo
echo "Publish completed before E2E for \`${SHA:0:7}\` (state: \`$RESULT\`)."
echo "Skipping retag here — E2E's own completion event will re-fire this workflow."
echo "If E2E ends green, that run promotes \`:latest\`. If red, it aborts."
} >> "$GITHUB_STEP_SUMMARY"
;;
none/none)
echo "proceed=true" >> "$GITHUB_OUTPUT"
echo "::notice::E2E paths-filtered out for this SHA — pre-merge staging gates carry"
;;
*)
echo "proceed=false" >> "$GITHUB_OUTPUT"
{
echo "## ❓ Auto-promote aborted — unexpected E2E state"
echo
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\` (unhandled)"
echo "Manual investigation needed; re-dispatch with the same sha once resolved."
} >> "$GITHUB_STEP_SUMMARY"
exit 1
;;
esac
- if: steps.gate.outputs.proceed == 'true'
uses: imjasonh/setup-crane@6da1ae018866400525525ce74ff892880c099987 # v0.5
- name: GHCR login
if: steps.gate.outputs.proceed == 'true'
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | \
crane auth login ghcr.io -u "${{ github.actor }}" --password-stdin
- name: Verify :staging-<sha> exists for both images
# Better to fail fast with a clear message than to half-tag
# (platform retagged but platform-tenant missing → tenants pull
# a stale image).
if: steps.gate.outputs.proceed == 'true'
run: |
set -euo pipefail
for img in "${IMAGE_NAME}" "${TENANT_IMAGE_NAME}"; do
tag="${img}:staging-${{ steps.sha.outputs.short }}"
if ! crane manifest "$tag" >/dev/null 2>&1; then
echo "::error::Missing tag: $tag"
echo "::error::publish-workspace-server-image must complete on this SHA before auto-promote can retag :latest."
exit 1
fi
echo " ok: $tag exists"
done
- name: Ancestry check — refuse to promote :latest backwards
# #2244: workflow_run completions arrive in arbitrary order. If
# SHA-A and SHA-B both reach main within ~10 min and SHA-B's E2E
# completes before SHA-A's, this workflow can fire for SHA-A
# AFTER it already promoted SHA-B → :latest goes backwards. The
# orphan-reconciler "next run corrects it" doesn't apply: there's
# no auto-corrective re-promote, :latest stays wrong until the
# next main push lands.
#
# Detection: read current :latest's `org.opencontainers.image.revision`
# label (set by publish-workspace-server-image.yml at build time)
# and ask the GitHub compare API whether the candidate SHA is
# ahead-of / identical-to / behind / diverged-from current.
# Hard-fail on `behind` and `diverged` per the approved design —
# silent-bypass is the class we're moving away from. Workflow
# goes red, oncall sees it, operator decides how to recover
# (manual dispatch with the right SHA, force-promote, etc.).
#
# Manual dispatch skips this check — operator override semantics
# match the gate-check step above.
#
# Backward-compat: when current :latest carries no revision
# label (legacy image pre-publish-with-label), skip-with-warning.
# All :latest images on main are post-label as of 2026-04-29, so
# this branch will be dead within 90 days; remove then.
if: steps.gate.outputs.proceed == 'true' && github.event_name != 'workflow_dispatch'
id: ancestry
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
TARGET_SHA: ${{ steps.sha.outputs.full }}
run: |
set -euo pipefail
# Read the current :latest config and pull the revision label.
# `crane config` returns the OCI image config blob (not the manifest);
# labels live under `.config.Labels`. `// empty` makes jq return ""
# rather than the literal "null" so the test below works.
CURRENT_REVISION=$(crane config "${IMAGE_NAME}:latest" 2>/dev/null \
| jq -r '.config.Labels["org.opencontainers.image.revision"] // empty' \
|| true)
if [ -z "$CURRENT_REVISION" ]; then
echo "decision=skip-no-label" >> "$GITHUB_OUTPUT"
{
echo "## ⚠ Ancestry check skipped — current :latest has no revision label"
echo
echo "Likely a legacy image built before \`org.opencontainers.image.revision\` was set."
echo "Falling through to retag. After all \`:latest\` images are post-label (TODO 90 days), this branch is dead and should be removed."
} >> "$GITHUB_STEP_SUMMARY"
echo "::warning::Current :latest carries no revision label — skipping ancestry check (legacy image)"
exit 0
fi
if [ "$CURRENT_REVISION" = "$TARGET_SHA" ]; then
echo "decision=identical" >> "$GITHUB_OUTPUT"
echo "::notice:::latest already at ${TARGET_SHA:0:7} — retag will be a no-op"
exit 0
fi
# Ask GitHub which side of the merge graph TARGET_SHA sits on
# relative to CURRENT_REVISION. Returns one of: ahead | identical
# | behind | diverged. Network or auth errors collapse to "error"
# via the explicit fallback so the case below always matches.
STATUS=$(gh api \
"repos/${REPO}/compare/${CURRENT_REVISION}...${TARGET_SHA}" \
--jq '.status' 2>/dev/null || echo "error")
echo "ancestry compare ${CURRENT_REVISION:0:7} → ${TARGET_SHA:0:7}: $STATUS"
case "$STATUS" in
ahead)
echo "decision=ahead" >> "$GITHUB_OUTPUT"
echo "::notice::Target ${TARGET_SHA:0:7} is ahead of current :latest (${CURRENT_REVISION:0:7}) — proceeding with retag"
;;
identical)
echo "decision=identical" >> "$GITHUB_OUTPUT"
echo "::notice::Target identical to :latest — retag will be a no-op"
;;
behind)
echo "decision=behind" >> "$GITHUB_OUTPUT"
{
echo "## ❌ Auto-promote refused — target is BEHIND current :latest"
echo
echo "| Field | Value |"
echo "|---|---|"
echo "| Target SHA | \`$TARGET_SHA\` |"
echo "| Current :latest revision | \`$CURRENT_REVISION\` |"
echo "| GitHub compare status | \`behind\` |"
echo
echo "This guard catches the workflow_run-completion-order race (#2244):"
echo "two rapid main pushes whose E2Es complete out-of-order can otherwise"
echo "promote \`:latest\` backwards. \`:latest\` stays on \`${CURRENT_REVISION:0:7}\`."
echo
echo "**Recovery:** if this is a legitimate revert that should land on \`:latest\`,"
echo "manually dispatch this workflow with the target sha as input — the manual-dispatch"
echo "path skips the ancestry check (operator override)."
} >> "$GITHUB_STEP_SUMMARY"
exit 1
;;
diverged)
echo "decision=diverged" >> "$GITHUB_OUTPUT"
{
echo "## ❓ Auto-promote refused — history diverged"
echo
echo "| Field | Value |"
echo "|---|---|"
echo "| Target SHA | \`$TARGET_SHA\` |"
echo "| Current :latest revision | \`$CURRENT_REVISION\` |"
echo "| GitHub compare status | \`diverged\` |"
echo
echo "Likely cause: force-push rewrote main's history, leaving the previous"
echo "\`:latest\` revision orphaned. Needs human review before \`:latest\` advances."
} >> "$GITHUB_STEP_SUMMARY"
exit 1
;;
error|*)
echo "decision=error" >> "$GITHUB_OUTPUT"
{
echo "## ❌ Auto-promote aborted — ancestry-check API error"
echo
echo "\`gh api repos/${REPO}/compare/${CURRENT_REVISION}...${TARGET_SHA}\` returned unexpected status: \`$STATUS\`"
echo
echo "Manual dispatch with the target sha bypasses this check."
} >> "$GITHUB_STEP_SUMMARY"
exit 1
;;
esac
- name: Retag platform :staging-<sha> → :latest
if: steps.gate.outputs.proceed == 'true'
run: |
crane tag "${IMAGE_NAME}:staging-${{ steps.sha.outputs.short }}" latest
- name: Retag tenant :staging-<sha> → :latest
if: steps.gate.outputs.proceed == 'true'
run: |
crane tag "${TENANT_IMAGE_NAME}:staging-${{ steps.sha.outputs.short }}" latest
- name: Summary
if: steps.gate.outputs.proceed == 'true'
run: |
{
echo "## :latest promoted to ${{ steps.sha.outputs.short }}"
echo
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "- Trigger: manual dispatch"
else
echo "- Upstream: \`${{ github.event.workflow_run.name }}\` ([run](${{ github.event.workflow_run.html_url }}))"
fi
echo "- platform:staging-${{ steps.sha.outputs.short }} → :latest"
echo "- platform-tenant:staging-${{ steps.sha.outputs.short }} → :latest"
echo
echo "Tenant fleet auto-pulls within 5 min via IMAGE_AUTO_REFRESH=true."
echo "Force immediate fanout: dispatch redeploy-tenants-on-main.yml."
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -0,0 +1,434 @@
name: Auto-promote staging → main
# Fires after any of the staging-branch quality gates complete. When ALL
# required gates are green on the same staging SHA, opens (or re-uses)
# a PR `staging → main` and enables auto-merge so the merge queue lands
# it. Closes the gap that historically let features sit on staging for
# weeks waiting for a bulk promotion PR (see molecule-core#1496 for the
# 1172-commit example).
#
# 2026-04-28 rewrite (PR #142): the previous version did a direct
# `git merge --ff-only origin staging && git push origin main`. That
# breaks against main's branch-protection ruleset, which requires
# status checks "set by the expected GitHub apps" — direct pushes
# can't satisfy that condition (only PR merges through the queue can).
# The workflow was failing every tick with:
# remote: error: GH006: Protected branch update failed for refs/heads/main.
# remote: - Required status checks ... were not set by the expected GitHub apps.
# Fix: mirror the PR-based pattern from auto-sync-main-to-staging.yml
# (the reverse-direction sync, fixed in #2234 for the same reason).
# Both directions now use the same merge-queue path that humans use,
# no special-case bypass.
#
# Safety model:
# - Runs ONLY on workflow_run events for the staging branch.
# - Requires EVERY named gate workflow to have the same head_sha and
# all be `conclusion == success`. If any of them is red, skipped,
# cancelled, or pending, we abort (stay on the current main).
# - The PR base=main head=staging path lets GitHub itself enforce
# branch protection. If main has diverged from staging or required
# checks aren't satisfied, the merge queue declines the PR — no
# need for a manual ff-only ancestry check here.
# - Loop safety: the auto-sync-main-to-staging workflow fires when
# main lands the auto-promote PR, but its merge into staging is by
# GITHUB_TOKEN which doesn't trigger downstream workflow_run events
# (GitHub Actions safety). So this workflow doesn't re-fire from
# its own promote landing.
#
# Toggle via repo variable AUTO_PROMOTE_ENABLED (true/unset). When
# unset, the workflow logs what it would have done but doesn't open
# the PR — useful for dry-running the gate logic without surfacing
# a noisy PR while staging CI is still flaky.
#
# **One-time repo setting (load-bearing):** this workflow opens the
# staging→main PR via `gh pr create` using the default GITHUB_TOKEN.
# Since GitHub's 2022 default change, that token cannot create or
# approve PRs unless the repo opts in. The toggle is at:
#
# Settings → Actions → General → Workflow permissions
# → ✅ Allow GitHub Actions to create and approve pull requests
#
# Without it, every workflow_run fails with:
#
# pull request create failed: GraphQL: GitHub Actions is not
# permitted to create or approve pull requests (createPullRequest)
#
# Observed 2026-04-29 01:43 UTC blocking promotion of fcd87b9 (PRs
# #2248 + #2249); manually bridged via PR #2252. Re-check this
# setting if auto-promote starts failing with createPullRequest
# errors after a repo or org admin change.
on:
workflow_run:
workflows:
- CI
- E2E Staging Canvas (Playwright)
- E2E API Smoke Test
- CodeQL
types: [completed]
workflow_dispatch:
inputs:
force:
description: "Force promote even when AUTO_PROMOTE_ENABLED is unset (manual override)"
required: false
default: "false"
permissions:
contents: write
pull-requests: write
# actions: write is needed by the post-merge dispatch tail step
# (#2358 / #2357) — `gh workflow run publish-workspace-server-image.yml`
# POSTs to /actions/workflows/.../dispatches which requires this scope.
# Without it the call 403s and the publish/canary/redeploy chain still
# doesn't run on staging→main promotions, undoing #2358.
actions: write
# Serialize auto-promote runs. Multiple staging gate completions can land
# in quick succession (CI + E2E + CodeQL all finish within seconds of
# each other on a green PR) — without this, two parallel runs both:
# 1. Open / re-use the same promote PR.
# 2. Both call `gh pr merge --auto` (idempotent — fine).
# 3. Both poll for the same mergedAt and both `gh workflow run` publish
# → 2× redundant publish builds racing for the same `:staging-latest`
# retag, and 2× canary-verify chains.
# cancel-in-progress: false because we don't want a brand-new run to kill
# a polling-tail that's about to dispatch — the polling tail's 30 min cap
# is the right backstop, not workflow-level cancel.
concurrency:
group: auto-promote-staging
cancel-in-progress: false
jobs:
check-all-gates-green:
# Only consider staging pushes. PRs into staging don't promote.
if: >
(github.event_name == 'workflow_run' &&
github.event.workflow_run.head_branch == 'staging' &&
github.event.workflow_run.event == 'push')
|| github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
outputs:
all_green: ${{ steps.gates.outputs.all_green }}
head_sha: ${{ steps.gates.outputs.head_sha }}
steps:
# Skip empty-tree promotes (the perpetual auto-promote↔auto-sync cycle
# observed 2026-05-03). Sequence: auto-promote merges via the staging
# merge-queue's MERGE strategy, creating a merge commit on main that
# staging doesn't have. auto-sync then merges main back into staging
# via another merge commit (the queue's MERGE strategy applies on
# the staging side too, even when the workflow's local FF would
# have sufficed). Now staging has a new merge-commit SHA whose
# tree == main's tree — but auto-promote sees "staging ahead of
# main by 1" and opens YET another empty promote PR. Each round
# costs ~30-40 min wallclock, ~2 manual approvals, and burns a
# full CodeQL Go run (~15 min). Without this guard the cycle
# repeats indefinitely.
#
# Long-term fix is to switch the merge_queue ruleset's
# `merge_method` away from MERGE so FF-able PRs land cleanly,
# but that's a broader change affecting every staging PR's
# commit shape. This guard is the one-line surgical fix that
# breaks the cycle without touching merge-queue config.
#
# Fail-open: if `git diff` errors for any reason, fall through
# to the gate check (preserve existing behavior). Only skip
# when the diff is DEFINITIVELY empty.
- name: Checkout for tree-diff check
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
ref: staging
- name: Skip if staging tree == main tree (perpetual-cycle break)
id: tree-diff
env:
HEAD_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
run: |
set -eu
git fetch origin main --depth=50 || { echo "::warning::git fetch main failed — proceeding (fail-open)"; exit 0; }
# Compare staging tip's tree against main's tree. `git diff
# --quiet` exits 0 if no differences, 1 if there are.
if git diff --quiet origin/main "$HEAD_SHA" -- 2>/dev/null; then
{
echo "## ⏭ Skipped — no code to promote"
echo
echo "staging tip (\`${HEAD_SHA:0:8}\`) and \`main\` have identical trees."
echo "This is the auto-promote↔auto-sync merge-commit cycle: staging has a"
echo "new SHA (a sync-back merge commit) but the underlying file tree is"
echo "already on main, so there's no real code to ship."
echo
echo "Skipping to avoid opening an empty promote PR. Cycle terminates here."
} >> "$GITHUB_STEP_SUMMARY"
echo "::notice::auto-promote: staging tree == main tree — no code to promote, skipping"
echo "skip=true" >> "$GITHUB_OUTPUT"
else
echo "skip=false" >> "$GITHUB_OUTPUT"
fi
- name: Check all required gates on this SHA
if: steps.tree-diff.outputs.skip != 'true'
id: gates
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
HEAD_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
REPO: ${{ github.repository }}
run: |
set -euo pipefail
# Required gate workflow files. Use file paths (relative to
# .github/workflows/) rather than display names because:
#
# 1. `gh run list --workflow=<name>` is ambiguous when two
# workflows have the same `name:` — observed 2026-04-28
# with "CodeQL" matching both `codeql.yml` (explicit) and
# GitHub's UI-configured Code-quality default setup
# (internal "codeql"). gh CLI returns "could not resolve
# to a unique workflow" → empty result → gate evaluated
# as missing/none → auto-promote dead-locked despite all
# checks actually passing.
#
# 2. File paths are the unique identifier for workflows;
# `name:` is just a display string and can collide.
#
# When adding/removing a gate, update this list AND the
# branch-protection required-checks list (which uses check-run
# display names, not workflow names; the two are decoupled and
# should be kept in sync manually).
GATES=(
"ci.yml"
"e2e-staging-canvas.yml"
"e2e-api.yml"
"codeql.yml"
)
echo "head_sha=${HEAD_SHA}" >> "$GITHUB_OUTPUT"
echo "Checking gates on SHA ${HEAD_SHA}"
ALL_GREEN=true
for gate in "${GATES[@]}"; do
# Query the most recent run of this workflow on this SHA.
# event=push to avoid picking up PR runs. branch=staging to
# guard against someone dispatching the gate on a non-staging
# branch at the same SHA.
RESULT=$(gh run list \
--repo "$REPO" \
--workflow "$gate" \
--branch staging \
--event push \
--commit "$HEAD_SHA" \
--limit 1 \
--json status,conclusion \
--jq '.[0] | "\(.status)/\(.conclusion // "none")"' \
2>/dev/null || echo "missing/none")
echo " $gate → $RESULT"
# Only completed/success counts. completed/failure or
# in_progress/anything or no record at all = abort.
if [ "$RESULT" != "completed/success" ]; then
ALL_GREEN=false
fi
done
echo "all_green=${ALL_GREEN}" >> "$GITHUB_OUTPUT"
if [ "$ALL_GREEN" != "true" ]; then
echo "::notice::auto-promote: not all gates are green on ${HEAD_SHA} — staying on current main"
fi
promote:
needs: check-all-gates-green
if: needs.check-all-gates-green.outputs.all_green == 'true'
runs-on: ubuntu-latest
steps:
- name: Check rollout gate
env:
AUTO_PROMOTE_ENABLED: ${{ vars.AUTO_PROMOTE_ENABLED }}
FORCE_INPUT: ${{ github.event.inputs.force }}
run: |
set -eu
# Repo variable AUTO_PROMOTE_ENABLED=true flips this on. While
# it's unset, the workflow dry-runs (logs what it would have
# done) but doesn't open the promote PR. Set the variable in
# Settings → Secrets and variables → Actions → Variables.
if [ "${AUTO_PROMOTE_ENABLED:-}" != "true" ] && [ "${FORCE_INPUT:-false}" != "true" ]; then
{
echo "## ⏸ Auto-promote disabled"
echo
echo "Repo variable \`AUTO_PROMOTE_ENABLED\` is not set to \`true\`."
echo "All gates are green on staging; would have opened a promote PR to \`main\`."
echo
echo "To enable: Settings → Secrets and variables → Actions → Variables → \`AUTO_PROMOTE_ENABLED=true\`."
echo "To test once manually: workflow_dispatch with \`force=true\`."
} >> "$GITHUB_STEP_SUMMARY"
echo "::notice::auto-promote disabled — dry run only"
exit 0
fi
# Mint the App token BEFORE the promote-PR step so the auto-merge
# call can use it. GITHUB_TOKEN-initiated merges suppress the
# downstream `push` event on main, breaking the
# publish-workspace-server-image → canary-verify → redeploy-tenants
# chain (issue #2357). Using the App token here means the
# merge-queue-landed merge IS able to fire the cascade naturally;
# the polling tail below stays as defense-in-depth.
- name: Mint App token for promote-PR + downstream dispatch
if: ${{ vars.AUTO_PROMOTE_ENABLED == 'true' || github.event.inputs.force == 'true' }}
id: app-token
uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1
with:
app-id: ${{ secrets.MOLECULE_AI_APP_ID }}
private-key: ${{ secrets.MOLECULE_AI_APP_PRIVATE_KEY }}
- name: Open (or reuse) staging → main promote PR + enable auto-merge
if: ${{ vars.AUTO_PROMOTE_ENABLED == 'true' || github.event.inputs.force == 'true' }}
env:
GH_TOKEN: ${{ steps.app-token.outputs.token }}
REPO: ${{ github.repository }}
TARGET_SHA: ${{ needs.check-all-gates-green.outputs.head_sha }}
run: |
set -euo pipefail
# Look for an existing open promote PR (idempotent on re-run
# of the workflow). The PR's head IS the staging branch — the
# whole point is "advance main to staging's tip", so we don't
# need a per-SHA branch like auto-sync-main-to-staging uses.
PR_NUM=$(gh pr list --repo "$REPO" \
--base main --head staging --state open \
--json number --jq '.[0].number // ""')
if [ -z "$PR_NUM" ]; then
TITLE="staging → main: auto-promote ${TARGET_SHA:0:7}"
BODY_FILE=$(mktemp)
cat > "$BODY_FILE" <<EOFBODY
Automated promotion of \`staging\` (\`${TARGET_SHA:0:8}\`) to \`main\`. All required staging gates green at this SHA: CI, E2E Staging Canvas, E2E API Smoke, CodeQL.
This PR is auto-generated by \`.github/workflows/auto-promote-staging.yml\` whenever every required gate completes green on the same staging SHA. It exists because main's branch protection requires status checks "set by the expected GitHub apps" — direct \`git push\` from a workflow can't satisfy that, only PR merges through the queue can.
Merge queue lands this; no human action needed unless gates fail. Reverse-direction sync (the merge commit on main → staging) is handled by \`auto-sync-main-to-staging.yml\`.
EOFBODY
PR_URL=$(gh pr create --repo "$REPO" \
--base main --head staging \
--title "$TITLE" \
--body-file "$BODY_FILE")
PR_NUM=$(echo "$PR_URL" | grep -oE '[0-9]+$' | tail -1)
rm -f "$BODY_FILE"
echo "::notice::Opened PR #${PR_NUM}"
else
echo "::notice::Re-using existing promote PR #${PR_NUM}"
fi
# Enable auto-merge — the merge queue picks it up once
# required gates are green on the merge_group ref.
if ! gh pr merge "$PR_NUM" --repo "$REPO" --auto --merge 2>&1; then
echo "::warning::Failed to enable auto-merge on PR #${PR_NUM} — operator may need to merge manually."
fi
{
echo "## ✅ Auto-promote PR opened"
echo
echo "- Source: staging at \`${TARGET_SHA:0:8}\`"
echo "- PR: #${PR_NUM}"
echo
echo "Merge queue lands the PR once required gates are green; no human action needed unless gates fail."
} >> "$GITHUB_STEP_SUMMARY"
# Hand the PR number to the next step so we can dispatch the
# tenant-redeploy chain after the merge queue lands the merge.
echo "promote_pr_num=${PR_NUM}" >> "$GITHUB_OUTPUT"
id: promote_pr
# The App token minted above (before the promote-PR step) is
# also used by the polling tail below. Defense-in-depth: with
# the merge-queue-landed merge now using the App token, the
# main-branch push event SHOULD fire the publish/canary/redeploy
# cascade naturally — but if for any reason it doesn't (e.g. an
# unrelated event-suppression edge case), the explicit dispatches
# below still wake the chain.
- name: Wait for promote merge, then dispatch publish + redeploy (#2357)
# Defense-in-depth dispatch. With the auto-merge call above
# now using the App token (this commit), the merge-queue-landed
# merge SHOULD fire publish-workspace-server-image naturally
# via on:push:[main] — App-token-initiated pushes DO trigger
# workflow_run cascades, unlike GITHUB_TOKEN-initiated ones
# (the documented "no recursion" rule —
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow).
#
# This explicit dispatch stays as belt-and-suspenders for any
# edge case where the natural cascade misfires. If it never
# observably fires after this token swap (i.e. the publish
# workflow has already started by the time we get here), the
# second dispatch is a harmless no-op (publish-workspace-server-image
# has its own concurrency group that dedupes).
#
# See PR for #2357: pre-fix the merge action was via
# GITHUB_TOKEN, suppressing the cascade and forcing this tail
# to be the SOLE chain trigger. With the auto-merge token swap
# the tail becomes redundant in the happy path; keep until
# we've observed >=10 successful natural cascades, then drop.
if: steps.promote_pr.outputs.promote_pr_num != ''
env:
GH_TOKEN: ${{ steps.app-token.outputs.token }}
REPO: ${{ github.repository }}
PR_NUM: ${{ steps.promote_pr.outputs.promote_pr_num }}
run: |
# Poll for merge — max 30 min (60 × 30s). The merge queue
# typically lands within 5-10 min when gates are green. Break
# early if the PR is closed without merging (operator action,
# gates flipped red post-approval, branch-protection rejection)
# so we don't tie up a runner for the full 30 min on a dead PR.
MERGED=""
STATE=""
for _ in $(seq 1 60); do
VIEW=$(gh pr view "$PR_NUM" --repo "$REPO" --json mergedAt,state)
MERGED=$(echo "$VIEW" | jq -r '.mergedAt // ""')
STATE=$(echo "$VIEW" | jq -r '.state // ""')
if [ -n "$MERGED" ] && [ "$MERGED" != "null" ]; then
echo "::notice::Promote PR #${PR_NUM} merged at ${MERGED}"
break
fi
if [ "$STATE" = "CLOSED" ]; then
echo "::warning::Promote PR #${PR_NUM} was closed without merging — skipping deploy dispatch."
exit 0
fi
sleep 30
done
if [ -z "$MERGED" ] || [ "$MERGED" = "null" ]; then
echo "::warning::Promote PR #${PR_NUM} didn't merge within 30min — skipping deploy dispatch (manually run \`gh workflow run publish-workspace-server-image.yml --ref main\` once it lands)."
exit 0
fi
# Dispatch publish on main using the App token. App-initiated
# workflow_dispatch DOES propagate the workflow_run cascade,
# unlike GITHUB_TOKEN-initiated dispatch.
# publish completes → canary-verify chains via workflow_run →
# redeploy-tenants-on-main chains via workflow_run + branches:[main].
if gh workflow run publish-workspace-server-image.yml \
--repo "$REPO" --ref main 2>&1; then
echo "::notice::Dispatched publish-workspace-server-image on ref=main as molecule-ai App — canary-verify and redeploy-tenants-on-main will chain via workflow_run."
{
echo "## 🚀 Tenant redeploy chain dispatched"
echo
echo "- publish-workspace-server-image (workflow_dispatch on \`main\`, actor: \`molecule-ai[bot]\`)"
echo "- canary-verify will chain on completion"
echo "- redeploy-tenants-on-main will chain on canary green"
} >> "$GITHUB_STEP_SUMMARY"
else
echo "::error::Failed to dispatch publish-workspace-server-image. Run manually: gh workflow run publish-workspace-server-image.yml --ref main"
fi
# ALSO dispatch auto-sync-main-to-staging.yml. Same root cause as
# publish above (issue #2357): the merge-queue-initiated push to
# main is by GITHUB_TOKEN → no `on: push` triggers fire downstream.
# Without this dispatch, every staging→main promote leaves staging
# one merge commit BEHIND main, which silently dead-locks the NEXT
# promote PR as `mergeStateStatus: BEHIND` because main's
# branch-protection has `strict: true`. Verified empirically on
# 2026-05-02 against PR #2442 (Phase 2 promote): only the explicit
# publish-workspace-server-image dispatch fired on the previous
# promote SHA 76c604fb, while auto-sync silently no-op'd, leaving
# staging behind for ~24h until manually bridged.
if gh workflow run auto-sync-main-to-staging.yml \
--repo "$REPO" --ref main 2>&1; then
echo "::notice::Dispatched auto-sync-main-to-staging on ref=main as molecule-ai App — staging will absorb the new main merge commit via PR + merge queue."
else
echo "::error::Failed to dispatch auto-sync-main-to-staging. Run manually: gh workflow run auto-sync-main-to-staging.yml --ref main"
fi

View File

@ -0,0 +1,83 @@
name: auto-promote-stale-alarm
# Hourly cron + on-demand alarm for the silent-block failure mode that
# motivated issue #2975:
# - The auto-promote-staging.yml workflow opened a PR + armed
# auto-merge, but main's branch protection requires a human review
# (reviewDecision=REVIEW_REQUIRED). The PR sat BLOCKED with no
# surface-up-the-stack for 12+ hours, holding 25 commits hostage
# including the Memory v2 redesign and a reno-stars data-loss fix.
#
# This workflow runs `scripts/check-stale-promote-pr.sh` against the
# repo's open auto-promote PRs (base=main head=staging). When a PR has
# been BLOCKED on REVIEW_REQUIRED for >4h, it:
# 1. Emits a workflow-level warning (visible in run summary + the
# Actions UI feed).
# 2. Posts a comment on the PR (idempotent — one alarm per PR).
#
# The detection logic lives in scripts/check-stale-promote-pr.sh so
# it's unit-testable with stubbed `gh` (see test-check-stale-promote-pr.sh).
# This file is the schedule + invocation surface only — SSOT for the
# detector itself.
on:
schedule:
# Hourly. Cheap (one `gh pr list` + jq), and 1h granularity is
# plenty for a 4h staleness threshold — operators see the alarm
# within at most 1h of crossing the threshold.
- cron: "27 * * * *" # at :27 to dodge the cron herd at :00
workflow_dispatch:
inputs:
stale_hours:
description: "Hours after which a BLOCKED+REVIEW_REQUIRED PR is stale (default 4)"
required: false
default: "4"
post_comment:
description: "Post a comment on stale PRs (default true)"
required: false
default: "true"
permissions:
contents: read
pull-requests: write # post comments on stale PRs
# Serialize so the on-demand and scheduled runs don't double-comment
# the same PR. cancel-in-progress=false because the script is idempotent
# (existing comment marker prevents dupes), but a scheduled run firing
# while a manual one runs would just re-list the same PR set.
concurrency:
group: auto-promote-stale-alarm
cancel-in-progress: false
jobs:
scan:
runs-on: ubuntu-latest
steps:
- name: Checkout (need scripts/ only)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
sparse-checkout: |
scripts/check-stale-promote-pr.sh
sparse-checkout-cone-mode: false
- name: Run stale-PR detector
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_REPOSITORY: ${{ github.repository }}
STALE_HOURS: ${{ inputs.stale_hours || '4' }}
POST_COMMENT: ${{ inputs.post_comment || 'true' }}
run: |
# The script's exit code reflects the count of stale PRs.
# We don't want a stale finding to fail the workflow run —
# the warning + comment are the signal, the green/red is
# noise. So convert any non-zero exit to a workflow notice
# and exit 0.
set +e
bash scripts/check-stale-promote-pr.sh
rc=$?
set -e
if [ "$rc" -ne 0 ]; then
echo "::notice::Stale PR detector found $rc PR(s) needing attention. See warnings above + comments on the PRs."
fi
# Always succeed — operator-facing surface is the warning,
# not the workflow status.
exit 0

View File

@ -0,0 +1,237 @@
name: Auto-sync main → staging
# Reflects every push to `main` back onto `staging` so the
# staging-as-superset-of-main invariant holds.
#
# Background:
#
# `auto-promote-staging.yml` advances main via `git merge --ff-only`
# + `git push origin main` — that's a clean fast-forward, no merge
# commit. But manual merges of `staging → main` PRs through the
# GitHub UI / API create a merge commit on main that staging
# doesn't have. The next `staging → main` PR then evaluates as
# "BEHIND" because staging is missing that merge commit, requiring
# a manual `gh pr update-branch` round-trip.
#
# This happened twice on 2026-04-28 (PRs #2202, #2205, both manual
# bridges). Each time the bridge needed update-branch + a re-CI
# round before merging. Operationally annoying and avoidable.
#
# Architecture:
#
# This repo's `staging` branch is protected by a `merge_queue`
# ruleset (id 15500102) that blocks ALL direct pushes — no bypass
# even for org admins or the GitHub Actions integration. Direct
# `git push origin staging` returns GH013. So instead of pushing
# directly, this workflow:
#
# 1. Checks if main is already in staging's ancestry → no-op.
# 2. Creates an `auto-sync/main-<sha>` branch from staging.
# 3. Tries `git merge --ff-only origin/main` → if staging hasn't
# diverged this is a clean ff.
# 4. Otherwise `git merge --no-ff origin/main` to absorb main's
# tip while keeping staging's history.
# 5. Pushes the auto-sync branch.
# 6. Opens a PR (base=staging, head=auto-sync/main-<sha>) and
# enables auto-merge so the merge queue lands it.
#
# This mirrors the path human PRs take through staging — same
# rules, same gates, no special-case bypass.
#
# Loop safety:
#
# `GITHUB_TOKEN`-authored merges (including the merge queue's land
# of the auto-sync PR) do NOT trigger downstream workflow runs
# (GitHub Actions safety). So when the auto-sync PR lands on
# staging, `auto-promote-staging.yml` is NOT triggered by that
# push. The next developer push to staging triggers auto-promote
# normally. No loop possible.
#
# Concurrency:
#
# Two pushes to main in quick succession (e.g., manual UI merge
# immediately followed by auto-promote-staging's ff-merge) could
# otherwise open two overlapping auto-sync PRs. The concurrency
# group serializes runs; the second waits for the first to exit.
# (The first run exits after opening + auto-merge-queueing the PR,
# not after the merge actually completes — so multiple PRs can be
# open simultaneously, but the merge queue handles them serially.)
on:
push:
branches: [main]
# workflow_dispatch lets:
# 1. Operators manually backfill a missed sync (e.g. after a manual
# UI merge that the runner missed).
# 2. auto-promote-staging.yml's polling tail explicitly invoke us
# after the promote PR lands. This is load-bearing: when the
# merge queue lands a promote-PR merge, the resulting push to
# `main` is "by GITHUB_TOKEN", and per GitHub's no-recursion
# rule (https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow)
# that push event does NOT fire any downstream workflows. The
# `on: push` trigger above is silently dead for the very pattern
# we exist to handle. Verified empirically 2026-05-02 against
# SHA 76c604fb (PR #2437 staging→main): only ONE workflow fired
# (publish-workspace-server-image, dispatched explicitly by
# auto-promote's polling tail with an App token). Every other
# `on: push: branches: [main]` workflow — including this one —
# was suppressed. Until the underlying merge call moves to an
# App token, an explicit dispatch is the only reliable path.
workflow_dispatch:
permissions:
contents: write
pull-requests: write
concurrency:
group: auto-sync-main-to-staging
cancel-in-progress: false
jobs:
sync-staging:
# ubuntu-latest matches every other workflow in this repo. The
# earlier `[self-hosted, macos, arm64]` was a copy-paste artefact
# from the molecule-controlplane repo (which IS private and uses a
# Mac runner) — molecule-core has no Mac runner registered, so the
# job sat unassigned whenever the trigger fired. Verified 2026-05-02:
# this is the ONLY workflow in molecule-core/.github/workflows/ with
# a non-ubuntu runs-on.
runs-on: ubuntu-latest
steps:
- name: Checkout staging
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
ref: staging
token: ${{ secrets.GITHUB_TOKEN }}
- name: Configure git author
run: |
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
- name: Check if staging already contains main
id: check
run: |
set -euo pipefail
git fetch origin main
if git merge-base --is-ancestor origin/main HEAD; then
echo "needs_sync=false" >> "$GITHUB_OUTPUT"
{
echo "## ✅ No-op"
echo
echo "staging already contains \`origin/main\` ($(git rev-parse --short=8 origin/main))."
} >> "$GITHUB_STEP_SUMMARY"
else
echo "needs_sync=true" >> "$GITHUB_OUTPUT"
MAIN_SHORT=$(git rev-parse --short=8 origin/main)
echo "main_short=${MAIN_SHORT}" >> "$GITHUB_OUTPUT"
echo "branch=auto-sync/main-${MAIN_SHORT}" >> "$GITHUB_OUTPUT"
echo "::notice::staging is missing main's tip (${MAIN_SHORT}) — opening sync PR"
fi
- name: Create auto-sync branch + merge main
if: steps.check.outputs.needs_sync == 'true'
id: prep
run: |
set -euo pipefail
BRANCH="${{ steps.check.outputs.branch }}"
# If a previous auto-sync run already opened a branch for the
# same main sha, prefer reusing it (idempotent behavior on
# workflow restart). Force-update from latest staging anyway
# so it absorbs any staging-side commits that landed since.
git checkout -B "$BRANCH"
if git merge --ff-only origin/main; then
echo "did_ff=true" >> "$GITHUB_OUTPUT"
echo "::notice::Fast-forwarded ${BRANCH} to origin/main"
else
echo "did_ff=false" >> "$GITHUB_OUTPUT"
if ! git merge --no-ff origin/main -m "chore: sync main → staging (auto)"; then
# Hygiene: leave the work tree clean before failing.
git merge --abort || true
{
echo "## ❌ Conflict"
echo
echo "Auto-merge \`main → staging\` failed with conflicts."
echo "A human needs to resolve manually."
} >> "$GITHUB_STEP_SUMMARY"
exit 1
fi
fi
- name: Push auto-sync branch
if: steps.check.outputs.needs_sync == 'true'
run: |
set -euo pipefail
# Force-with-lease so a concurrent auto-sync run can't
# silently clobber an in-flight branch we just updated. If a
# different writer touched the branch, we abort and the next
# run picks up the latest state.
git push --force-with-lease origin "${{ steps.check.outputs.branch }}"
- name: Open auto-sync PR + enable auto-merge
if: steps.check.outputs.needs_sync == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: ${{ steps.check.outputs.branch }}
MAIN_SHORT: ${{ steps.check.outputs.main_short }}
DID_FF: ${{ steps.prep.outputs.did_ff }}
run: |
set -euo pipefail
# Find existing PR for this branch (idempotent on workflow
# restart) before creating a new one.
PR_NUM=$(gh pr list --head "$BRANCH" --base staging --state open --json number --jq '.[0].number // ""')
if [ -z "$PR_NUM" ]; then
# Body lives in a temp file to keep the multi-line content
# out of the YAML block scalar (un-indented newlines inside
# an inline shell string break YAML parsing).
BODY_FILE=$(mktemp)
if [ "$DID_FF" = "true" ]; then
TITLE="chore: sync main → staging (auto, ff to ${MAIN_SHORT})"
cat > "$BODY_FILE" <<EOFBODY
Automated fast-forward of \`staging\` to \`origin/main\` (\`${MAIN_SHORT}\`). Staging has no in-flight commits that diverge from main. Merge queue lands this; no human action needed.
This PR is auto-generated by \`.github/workflows/auto-sync-main-to-staging.yml\` on every push to \`main\`. It exists because this repo's \`staging\` branch has a \`merge_queue\` ruleset that blocks direct pushes — even from the GitHub Actions integration.
EOFBODY
else
TITLE="chore: sync main → staging (auto, merge ${MAIN_SHORT})"
cat > "$BODY_FILE" <<EOFBODY
Automated merge of \`origin/main\` (\`${MAIN_SHORT}\`) into \`staging\`. Staging has commits main doesn't, so this is a non-ff merge that absorbs main's tip. Merge queue lands this.
This PR is auto-generated by \`.github/workflows/auto-sync-main-to-staging.yml\` on every push to \`main\`.
EOFBODY
fi
# gh pr create prints the URL on stdout; extract the PR number.
PR_URL=$(gh pr create \
--base staging \
--head "$BRANCH" \
--title "$TITLE" \
--body-file "$BODY_FILE")
PR_NUM=$(echo "$PR_URL" | grep -oE '[0-9]+$' | tail -1)
rm -f "$BODY_FILE"
echo "::notice::Opened PR #${PR_NUM}"
else
echo "::notice::Re-using existing PR #${PR_NUM} for ${BRANCH}"
fi
# Enable auto-merge — the merge queue picks it up once
# required gates are green. Use --merge for merge commits
# (matches the rest of this repo's PR convention).
if ! gh pr merge "$PR_NUM" --auto --merge 2>&1; then
echo "::warning::Failed to enable auto-merge on PR #${PR_NUM} — operator may need to merge manually."
fi
{
echo "## ✅ Auto-sync PR opened"
echo
echo "- Branch: \`$BRANCH\`"
echo "- PR: #$PR_NUM"
echo "- Strategy: $([ "$DID_FF" = "true" ] && echo "ff" || echo "merge commit")"
echo
echo "Merge queue lands the PR once required gates are green; no human action needed unless gates fail."
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -57,42 +57,17 @@ jobs:
id: bump
if: steps.skip.outputs.skip != 'true'
env:
# Gitea-shape token (act_runner forwards GITHUB_TOKEN as a
# short-lived per-run secret with read access to this repo).
# We hit `/api/v1/repos/.../pulls?state=closed` directly
# because `gh pr list` calls Gitea's GraphQL endpoint, which
# returns HTTP 405 (issue #75 / post-#66 sweep).
GITEA_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}
GITEA_API_URL: ${{ github.server_url }}/api/v1
PUSH_SHA: ${{ github.sha }}
GH_TOKEN: ${{ github.token }}
run: |
# Find the merged PR whose merge_commit_sha matches this push.
# Gitea's `/repos/{owner}/{repo}/pulls?state=closed` returns
# PRs sorted newest-first; we paginate up to 50 and jq-filter
# on `merge_commit_sha == PUSH_SHA`. Bounded — auto-tag fires
# per push to main, so the matching PR is always among the
# most recent closures. 50 is comfortably more than the
# ~10-20 staging→main promotes that close in any reasonable
# window.
set -euo pipefail
PRS_JSON=$(curl --fail-with-body -sS \
-H "Authorization: token ${GITEA_TOKEN}" \
-H "Accept: application/json" \
"${GITEA_API_URL}/repos/${REPO}/pulls?state=closed&sort=newest&limit=50" \
2>/dev/null || echo "[]")
PR=$(printf '%s' "$PRS_JSON" \
| jq -c --arg sha "$PUSH_SHA" \
'[.[] | select(.merged_at != null and .merge_commit_sha == $sha)] | .[0] // empty')
# The merged PR for this push commit. `gh pr list --search` finds
# closed PRs whose merge commit matches; we take the first.
PR=$(gh pr list --state merged --search "${{ github.sha }}" --json number,labels --jq '.[0]' 2>/dev/null || echo "")
if [ -z "$PR" ] || [ "$PR" = "null" ]; then
echo "No merged PR found for ${PUSH_SHA} — defaulting to patch bump."
echo "No merged PR found for ${{ github.sha }} — defaulting to patch bump."
echo "kind=patch" >> "$GITHUB_OUTPUT"
exit 0
fi
# Gitea returns labels under `.labels[].name`, same shape as
# GitHub's REST. The previous `gh pr list --json number,labels`
# output was identical; jq filter unchanged.
LABELS=$(printf '%s' "$PR" | jq -r '.labels[]?.name // empty')
LABELS=$(echo "$PR" | jq -r '.labels[].name')
if echo "$LABELS" | grep -qx 'release:major'; then
echo "kind=major" >> "$GITHUB_OUTPUT"
elif echo "$LABELS" | grep -qx 'release:minor'; then

View File

@ -1,7 +1,7 @@
name: Block internal-flavored paths
# Hard CI gate. Internal content (positioning, competitive briefs, sales
# playbooks, PMM/press drip, draft campaigns) lives in molecule-ai/internal —
# playbooks, PMM/press drip, draft campaigns) lives in Molecule-AI/internal —
# this public monorepo must never re-acquire those paths. CEO directive
# 2026-04-23 after a fleet-wide audit found 79 internal files leaked here.
#
@ -135,7 +135,7 @@ jobs:
echo "::error::Forbidden internal-flavored paths detected:"
printf "$OFFENDING"
echo ""
echo "These paths belong in molecule-ai/internal, not this public repo."
echo "These paths belong in Molecule-AI/internal, not this public repo."
echo "See docs/internal-content-policy.md for canonical locations."
echo ""
echo "If your file is genuinely public-facing (e.g. a blog post"

View File

@ -19,7 +19,6 @@ on:
branches: [staging, main]
paths:
- 'tools/branch-protection/**'
- '.github/workflows/**'
- '.github/workflows/branch-protection-drift.yml'
permissions:
@ -80,32 +79,3 @@ jobs:
# Repo-admin scope, needed for /branches/:b/protection.
GH_TOKEN: ${{ secrets.GH_TOKEN_FOR_ADMIN_API }}
run: bash tools/branch-protection/drift_check.sh
# Self-test the parity script before running it on the real
# workflows — pins the script's classification logic against
# synthetic safe/unsafe/missing/unsafe-mix/matrix fixtures so a
# regression in the script can't false-pass on the production
# workflow audit. Cheap (~0.5s); always runs.
- name: Self-test check-name parity script
run: bash tools/branch-protection/test_check_name_parity.sh
# Check-name parity gate (#144 / saved memory
# feedback_branch_protection_check_name_parity).
#
# drift_check.sh asserts the live branch protection matches what
# apply.sh would set; check_name_parity.sh closes the orthogonal
# gap: it asserts every required check name in apply.sh maps to a
# workflow job whose "always emits this status" shape is intact.
#
# The two checks fail in different scenarios:
#
# - drift_check fails → live state was rewritten out-of-band
# (UI click, manual PATCH).
# - check_name_parity fails → an apply.sh required name has no
# emitter, OR the emitting workflow has a top-level paths:
# filter without per-step if-gates (the silent-block shape).
#
# Cheap (~1s); runs without the admin token because it only reads
# apply.sh + .github/workflows/ from the checkout.
- name: Run check-name parity gate
run: bash tools/branch-protection/check_name_parity.sh

View File

@ -20,19 +20,6 @@ on:
# a few minutes under load — that's fine for a canary.
- cron: '*/30 * * * *'
workflow_dispatch:
inputs:
keep_on_failure:
description: >-
Skip teardown when the canary fails (debugging only). The
tenant org + EC2 + CF tunnel + DNS stay alive so an operator
can SSM into the workspace EC2 and capture docker logs of the
failing claude-code container. REMEMBER to manually delete
via DELETE /cp/admin/tenants/<slug> when done so the org
doesn't accumulate cost. Only honored on workflow_dispatch;
cron runs always tear down (we don't want unattended cron
to leak resources).
type: boolean
default: false
# Serialise with the full-SaaS workflow so they don't contend for the
# same org-create quota on staging. Different group key from
@ -93,14 +80,6 @@ jobs:
# is "Token Plan only" but cheap-per-token and fast.
E2E_MODEL_SLUG: MiniMax-M2.7-highspeed
E2E_RUN_ID: "canary-${{ github.run_id }}"
# Debug-only: when an operator dispatches with keep_on_failure=true,
# the canary script's E2E_KEEP_ORG=1 path skips teardown so the
# tenant org + EC2 stay alive for SSM-based log capture. Cron runs
# never set this (the input only exists on workflow_dispatch) so
# unattended cron always tears down. See molecule-core#129
# failure mode #1 — capturing the actual exception requires
# docker logs from the live container.
E2E_KEEP_ORG: ${{ github.event.inputs.keep_on_failure == 'true' && '1' || '0' }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
@ -158,28 +137,27 @@ jobs:
id: canary
run: bash tests/e2e/test_staging_full_saas.sh
# Alerting: open a sticky issue on the FIRST failure; comment on
# subsequent failures; auto-close on next green. Comment-on-existing
# de-duplicates so a single open issue accumulates the streak —
# ops sees one issue with N comments rather than N issues.
# Alerting: open an issue only after THREE consecutive failures so
# transient flakes (Cloudflare DNS hiccup, AWS API blip) don't spam
# the issue list. If an issue is already open, we still comment on
# every failure so ops sees the streak. Auto-close on next green.
#
# Why no consecutive-failures threshold (e.g., wait 3 runs before
# filing): the prior threshold check used
# `github.rest.actions.listWorkflowRuns()` which Gitea 1.22.6 does
# not expose (returns 404). On Gitea Actions the threshold call
# ALWAYS failed, breaking the entire alerting step and going days
# silent on real regressions (38h+ chronic red on 2026-05-07/08
# before this fix; tracked in molecule-core#129). Filing on first
# failure is also better UX — we want to know about the first red,
# not wait 90 min for it to "count." Real flakes get one issue +
# a quick close-on-green; persistent reds accumulate comments.
# Threshold rationale: canary fires every 30 min, so 3 failures =
# ~90 min of consecutive red — well past any single-run flake but
# still tight enough that a real outage gets surfaced before the
# next deploy window.
- name: Open issue on failure
if: failure()
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
env:
# Inject the workflow path explicitly — context.workflow is
# the *name*, not the file path the actions API needs.
WORKFLOW_PATH: '.github/workflows/canary-staging.yml'
CONSECUTIVE_THRESHOLD: '3'
with:
script: |
const title = '🔴 Canary failing: staging SaaS smoke';
const runURL = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
// Find an existing open canary issue (stable title match).
// If one exists, this isn't a "first failure" — comment and exit.
@ -199,12 +177,32 @@ jobs:
return;
}
// No open issue yet — file one on this first failure. The
// comment-on-existing branch above means subsequent failures
// accumulate as comments on this same issue, so we don't
// spam new issues per run.
// No open issue yet — check the last N-1 runs' conclusions.
// We open the issue only if the last (THRESHOLD-1) runs ALSO
// failed (so this is the 3rd consecutive red).
const threshold = parseInt(process.env.CONSECUTIVE_THRESHOLD, 10);
const { data: runs } = await github.rest.actions.listWorkflowRuns({
owner: context.repo.owner, repo: context.repo.repo,
workflow_id: process.env.WORKFLOW_PATH,
status: 'completed',
per_page: threshold,
// Skip the current in-progress run; it isn't 'completed' yet.
});
// listWorkflowRuns returns recent first. We need (threshold-1)
// prior failures (current run is the threshold-th).
const priorFailures = (runs.workflow_runs || [])
.slice(0, threshold - 1)
.filter(r => r.id !== context.runId)
.filter(r => r.conclusion === 'failure')
.length;
if (priorFailures < threshold - 1) {
core.info(`Below threshold: ${priorFailures + 1}/${threshold} consecutive failures — not filing yet`);
return;
}
const body =
`Canary run failed at ${new Date().toISOString()}.\n\n` +
`Canary run failed at ${new Date().toISOString()}, ` +
`${threshold} consecutive runs red.\n\n` +
`Run: ${runURL}\n\n` +
`This issue auto-closes on the next green canary run. ` +
`Consecutive failures add a comment here rather than a new issue.`;
@ -213,7 +211,7 @@ jobs:
title, body,
labels: ['canary-staging', 'bug'],
});
core.info('Opened canary failure issue (first red)');
core.info(`Opened canary failure issue (${threshold} consecutive reds)`);
- name: Auto-close canary issue on success
if: success()

View File

@ -108,7 +108,7 @@ jobs:
echo
echo "One or more canary secrets are unset (\`CANARY_TENANT_URLS\`, \`CANARY_ADMIN_TOKENS\`, \`CANARY_CP_SHARED_SECRET\`)."
echo "Phase 2 canary fleet has not been stood up yet —"
echo "see [canary-tenants.md](https://git.moleculesai.app/molecule-ai/molecule-controlplane/blob/main/docs/canary-tenants.md)."
echo "see [canary-tenants.md](https://github.com/Molecule-AI/molecule-controlplane/blob/main/docs/canary-tenants.md)."
echo
echo "**Skipped — promote-to-latest will NOT auto-fire.** Dispatch \`promote-latest.yml\` manually when ready."
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -87,7 +87,7 @@ jobs:
run: go mod download
- if: needs.changes.outputs.platform == 'true'
run: go build ./cmd/server
# CLI (molecli) moved to standalone repo: github.com/molecule-ai/molecule-cli
# CLI (molecli) moved to standalone repo: github.com/Molecule-AI/molecule-cli
- if: needs.changes.outputs.platform == 'true'
run: go vet ./... || true
- if: needs.changes.outputs.platform == 'true'
@ -165,7 +165,7 @@ jobs:
# Strip the package-import prefix so we can match .coverage-allowlist.txt
# entries written as paths relative to workspace-server/.
# Handle both module paths: platform/workspace-server/... and platform/...
rel=$(echo "$file" | sed 's|^github.com/molecule-ai/molecule-monorepo/platform/workspace-server/||; s|^github.com/molecule-ai/molecule-monorepo/platform/||')
rel=$(echo "$file" | sed 's|^github.com/Molecule-AI/molecule-monorepo/platform/workspace-server/||; s|^github.com/Molecule-AI/molecule-monorepo/platform/||')
if echo "$ALLOWLIST" | grep -qxF "$rel"; then
echo "::warning file=workspace-server/$rel::Critical file at ${pct}% coverage (allowlisted, #1823) — fix before expiry."
@ -235,13 +235,7 @@ jobs:
run: npx vitest run --coverage
- name: Upload coverage summary as artifact
if: needs.changes.outputs.canvas == 'true' && always()
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
# implement, surfacing as `GHESNotSupportedError: @actions/artifact
# v2.0.0+, upload-artifact@v4+ and download-artifact@v4+ are not
# currently supported on GHES`. Drop this pin when Gitea ships
# the v4 protocol (tracked: post-Gitea-1.23 followup).
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: canvas-coverage-${{ github.run_id }}
path: canvas/coverage/
@ -249,8 +243,8 @@ jobs:
if-no-files-found: warn
# MCP Server + SDK removed from CI — now in standalone repos:
# - github.com/molecule-ai/molecule-mcp-server (npm CI)
# - github.com/molecule-ai/molecule-sdk-python (PyPI CI)
# - github.com/Molecule-AI/molecule-mcp-server (npm CI)
# - github.com/Molecule-AI/molecule-sdk-python (PyPI CI)
# e2e-api job moved to .github/workflows/e2e-api.yml (issue #458).
# It now has workflow-level concurrency (cancel-in-progress: false) so
@ -440,5 +434,5 @@ jobs:
fi
# SDK + plugin validation moved to standalone repo:
# github.com/molecule-ai/molecule-sdk-python
# github.com/Molecule-AI/molecule-sdk-python

View File

@ -1,92 +1,36 @@
name: CodeQL
# Stub workflow — CodeQL Action is structurally incompatible with Gitea
# Actions (post-2026-05-06 SCM migration off GitHub).
# Controls CodeQL scan triggers for this repo.
#
# Why this is a stub, not a real CodeQL run:
# GitHub's "Code quality" default setup (the UI-configured one) is
# hardcoded to only scan the default branch — on this repo that's
# `staging`, so PRs promoting staging→main would otherwise never be
# scanned. This workflow fills that gap by explicitly scanning both
# branches on push and PR.
#
# 1. github/codeql-action/init@v4 hits api.github.com endpoints
# (CodeQL CLI bundle download + query-pack registry + telemetry)
# that Gitea 1.22.x does NOT proxy. The act_runner has
# GITHUB_SERVER_URL=https://git.moleculesai.app correctly set
# (per saved memory feedback_act_runner_github_server_url and
# /config.yaml on the operator host), but the Gitea API surface
# simply does not implement the codeql-action bundle endpoints.
# Observed in run 1d/3101 (2026-05-07): "::error::404 page not
# found" inside the Initialize CodeQL step, before any analysis.
#
# 2. PR #35 attempted to mark `continue-on-error: true` at the JOB
# level (correct YAML structure). Gitea 1.22.6 does NOT propagate
# job-level continue-on-error to the commit-status API — every
# matrix leg still posts `failure` to the status surface, which
# keeps OVERALL=failure on every push to main + staging and
# blocks visual auto-promote signals (#156).
#
# 3. Hongming policy decision (2026-05-07, task #156): CodeQL is
# ADVISORY, not blocking, on Gitea Actions. We do not block PR
# merge or staging→main promotion on CodeQL findings until we
# have a Gitea-compatible static-analysis pipeline.
#
# What this stub preserves:
#
# - Workflow name `CodeQL` (referenced by auto-promote-staging.yml
# line 67 as a workflow_run gate — must stay stable).
# - Job name template `Analyze (${{ matrix.language }})` and the
# 3-leg matrix (go, javascript-typescript, python). Branch
# protection / required-check parity (#144) keys on these
# exact context names.
# - merge_group + push + pull_request + schedule triggers, so the
# merge-queue check name still resolves (per saved memory
# feedback_branch_protection_check_name_parity).
#
# Re-enabling real analysis (future work):
#
# - Option A: self-hosted Semgrep / OpenGrep via a custom action
# that doesn't hit api.github.com. Tracked behind #156 follow-up.
# - Option B: Sonatype Nexus IQ or similar, called from a step
# that uses the Gitea-issued token only.
# - Option C: re-host this workflow on a small GitHub mirror used
# ONLY for SAST (push-mirrored from Gitea). Acceptable trade-off
# if/when payment is restored on a non-suspended GitHub org —
# but per saved memory feedback_no_single_source_of_truth, we
# should design for multi-vendor backup, not GitHub-only SAST.
#
# Until one of those lands, this stub keeps commit-status green so
# the auto-promote chain isn't permanently red on a tool we cannot
# actually run.
#
# Security policy: ADVISORY. We accept the residual risk of un-scanned
# pushes during this window. Compensating controls in place:
# - secret-scan.yml runs on every push (active, blocks on hits)
# - block-internal-paths.yml blocks forbidden file paths
# - lint-curl-status-capture.yml catches one specific class of bug
# - branch-protection-drift.yml + the merge_group required-checks
# parity keep the gate surface stable
# These are not equivalent to CodeQL coverage. Status of the
# replacement plan is tracked in #156.
# Runs on ubuntu-latest (GHA-hosted — public repo, free). GHAS is NOT
# enabled on this repo, so results are not uploaded to the Security
# tab — the scan fails the PR check on findings, and the SARIF is
# kept as a workflow artifact for triage.
on:
push:
branches: [main, staging]
pull_request:
branches: [main, staging]
# Required so the matrix legs emit a real result on the queued
# commit instead of a false-green when merge queue is enabled.
# Per saved memory feedback_branch_protection_check_name_parity:
# path-filtered / matrix workflows MUST emit the protected name
# via a job that always runs.
# GitHub merge queue fires `merge_group` for the queue's pre-merge CI run.
# Required so CodeQL Analyze checks get a real result on the queued
# commit instead of a false-green. Event only fires once merge queue is
# enabled on the target branch — safe to add unconditionally.
merge_group:
types: [checks_requested]
schedule:
# Weekly heartbeat. Cheap on a stub (the no-op job is ~5s) but
# keeps the workflow visible in Gitea's Actions UI so the next
# operator notices it's a stub instead of a missing surface.
# Weekly run picks up findings in code that hasn't been touched.
- cron: '30 1 * * 0'
# Workflow-level concurrency: only one stub run per branch/PR at a
# time. cancel-in-progress: false because a quick follow-up push
# shouldn't kill an in-flight run — even though the stub is fast,
# the contract should match a real CodeQL run for when we re-enable.
# Workflow-level concurrency: only one CodeQL run per branch/PR at a time.
# `cancel-in-progress: false` queues new runs so a quick follow-up push
# doesn't nuke a 45-min analysis mid-flight.
concurrency:
group: codeql-${{ github.ref }}
cancel-in-progress: false
@ -94,17 +38,13 @@ concurrency:
permissions:
actions: read
contents: read
# No security-events: write — we don't call the upload API anyway,
# GHAS isn't on Gitea.
# No security-events: write — we don't call the upload API.
jobs:
analyze:
# Job NAME shape is load-bearing — auto-promote-staging.yml +
# branch protection both key on `Analyze (${{ matrix.language }})`.
# Do NOT rename without coordinating both surfaces.
name: Analyze (${{ matrix.language }})
runs-on: ubuntu-latest
timeout-minutes: 5
timeout-minutes: 45
strategy:
fail-fast: false
@ -112,25 +52,77 @@ jobs:
language: [go, javascript-typescript, python]
steps:
# Single-step stub: log the policy decision + emit success.
# Exit 0 explicitly so the commit-status API records `success`
# for each of the three matrix legs.
- name: CodeQL stub (advisory, non-blocking on Gitea)
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Checkout sibling plugin repo
# Same reasoning as publish-workspace-server-image.yml — the Go
# module's replace directive needs the plugin source so
# CodeQL's "go build" phase can resolve.
if: matrix.language == 'go'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: Molecule-AI/molecule-ai-plugin-github-app-auth
path: molecule-ai-plugin-github-app-auth
token: ${{ secrets.PLUGIN_REPO_PAT || secrets.GITHUB_TOKEN }}
# jq is pre-installed on ubuntu-latest — no setup step needed.
- name: Initialize CodeQL
uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
with:
languages: ${{ matrix.language }}
# security-extended widens past the default to include the
# full security-query set for a public SaaS surface.
queries: security-extended
- name: Autobuild
uses: github/codeql-action/autobuild@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
- name: Perform CodeQL Analysis
id: analyze
uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
with:
category: "/language:${{ matrix.language }}"
# upload: never — GHAS isn't enabled on this repo, so the
# upload API 403s. Write SARIF locally instead.
upload: never
output: sarif-results/${{ matrix.language }}
- name: Parse SARIF + fail on findings
# The analyze step writes <database>.sarif into the output
# directory — database name is the short CodeQL lang id, not
# the matrix value (e.g. "javascript-typescript" →
# javascript.sarif), so glob rather than hardcode.
# Filter to error/warning severity: security-extended emits
# "note" rows for informational findings we don't want to fail
# the build over.
shell: bash
run: |
set -euo pipefail
cat <<EOF
CodeQL is currently ADVISORY on Gitea Actions (post-2026-05-06).
Language matrix leg: ${{ matrix.language }}
Reason: github/codeql-action/init@v4 calls api.github.com
bundle endpoints that Gitea 1.22.x does not implement.
Observed: "::error::404 page not found" in the Init
CodeQL step on every prior run.
Policy: per Hongming decision 2026-05-07 (#156), CodeQL is
non-blocking until a Gitea-compatible SAST pipeline
lands. See workflow file header for replacement
options + compensating controls.
Status: emitting success so auto-promote isn't permanently
red on a tool we cannot actually run today.
EOF
echo "::notice::CodeQL ${{ matrix.language }} — advisory stub, success."
dir="sarif-results/${{ matrix.language }}"
sarif=$(ls "$dir"/*.sarif 2>/dev/null | head -1 || true)
if [ -z "$sarif" ] || [ ! -f "$sarif" ]; then
echo "::error::No SARIF file found under $dir"
ls -la "$dir" 2>/dev/null || true
exit 1
fi
echo "Parsing $sarif"
count=$(jq '[.runs[].results[] | select(.level == "error" or .level == "warning")] | length' "$sarif")
echo "CodeQL findings (error+warning) for ${{ matrix.language }}: $count"
if [ "$count" -gt 0 ]; then
echo "::error::CodeQL found $count issues. Details below; full SARIF in the artifact."
jq -r '.runs[].results[] | select(.level == "error" or .level == "warning") | " - [\(.level)] \(.ruleId // "?"): \(.message.text // "(no message)") @ \(.locations[0].physicalLocation.artifactLocation.uri // "?"):\(.locations[0].physicalLocation.region.startLine // "?")"' "$sarif"
exit 1
fi
- name: Upload SARIF artifact
# Keep SARIF around on success + failure so triagers can diff.
# 14-day retention — longer than default 3, short enough not
# to bloat quota.
if: always()
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: codeql-sarif-${{ matrix.language }}
path: sarif-results/${{ matrix.language }}/
retention-days: 14

View File

@ -12,59 +12,6 @@ name: E2E API Smoke Test
# spending CI cycles. See the in-job comment on the `e2e-api` job for
# why this is one job (not two-jobs-sharing-name) and the 2026-04-29
# PR #2264 incident that drove the consolidation.
#
# Parallel-safety (Class B Hongming-owned CICD red sweep, 2026-05-08)
# -------------------------------------------------------------------
# Same substrate hazard as PR #98 (handlers-postgres-integration). Our
# Gitea act_runner runs with `container.network: host` (operator host
# `/opt/molecule/runners/config.yaml`), which means:
#
# * Two concurrent runs both try to bind their `-p 15432:5432` /
# `-p 16379:6379` host ports — the second postgres/redis FATALs
# with `Address in use` and `docker run` returns exit 125 with
# `Conflict. The container name "/molecule-ci-postgres" is already
# in use by container ...`. Verified in run a7/2727 on 2026-05-07.
# * The fixed container names `molecule-ci-postgres` / `-redis` (the
# pre-fix shape) collide on name AS WELL AS port. The cleanup-with-
# `docker rm -f` at the start of the second job KILLS the first
# job's still-running postgres/redis.
#
# Fix shape (mirrors PR #98's bridge-net pattern, adapted because
# platform-server is a Go binary on the host, not a containerised
# step):
#
# 1. Unique container names per run:
# pg-e2e-api-${RUN_ID}-${RUN_ATTEMPT}
# redis-e2e-api-${RUN_ID}-${RUN_ATTEMPT}
# `${RUN_ID}-${RUN_ATTEMPT}` is unique even across reruns of the
# same run_id.
# 2. Ephemeral host port per run (`-p 0:5432`), then read the actual
# bound port via `docker port` and export DATABASE_URL/REDIS_URL
# pointing at it. No fixed host-port → no port collision.
# 3. `127.0.0.1` (NOT `localhost`) in URLs — IPv6 first-resolve was
# the original flake fixed in #92 and the script's still IPv6-
# enabled.
# 4. `if: always()` cleanup so containers don't leak when test steps
# fail.
#
# Issue #94 items #2 + #3 (also fixed here):
# * Pre-pull `alpine:latest` so the platform-server's provisioner
# (`internal/handlers/container_files.go`) can stand up its
# ephemeral token-write helper without a daemon.io round-trip.
# * Create `molecule-core-net` bridge network if missing so the
# provisioner's container.HostConfig {NetworkMode: ...} attach
# succeeds.
# Item #1 (timeouts) — evidence on recent runs (77/3191, ae/4270, 0e/
# 2318) shows Postgres ready in 3s, Redis in 1s, Platform in 1s when
# they DO come up. Timeouts are not the bottleneck; not bumped.
#
# Item explicitly NOT fixed here: failing test `Status back online`
# fails because the platform's langgraph workspace template image
# (ghcr.io/molecule-ai/workspace-template-langgraph:latest) returns
# 403 Forbidden post-2026-05-06 GitHub org suspension. That is a
# template-registry resolution issue (ADR-002 / local-build mode) and
# belongs in a separate change that touches workspace-server, not
# this workflow file.
on:
push:
@ -131,14 +78,11 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 15
env:
# Unique per-run container names so concurrent runs on the host-
# network act_runner don't collide on name OR port.
# `${RUN_ID}-${RUN_ATTEMPT}` stays unique across reruns of the
# same run_id. PORT is set later (after docker port lookup) since
# we let Docker assign an ephemeral host port.
PG_CONTAINER: pg-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
REDIS_CONTAINER: redis-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
DATABASE_URL: postgres://dev:dev@localhost:15432/molecule?sslmode=disable
REDIS_URL: redis://localhost:16379
PORT: "8080"
PG_CONTAINER: molecule-ci-postgres
REDIS_CONTAINER: molecule-ci-redis
steps:
- name: No-op pass (paths filter excluded this commit)
if: needs.detect-changes.outputs.api != 'true'
@ -153,53 +97,11 @@ jobs:
go-version: 'stable'
cache: true
cache-dependency-path: workspace-server/go.sum
- name: Pre-pull alpine + ensure provisioner network (Issue #94 items #2 + #3)
if: needs.detect-changes.outputs.api == 'true'
run: |
# Provisioner uses alpine:latest for ephemeral token-write
# containers (workspace-server/internal/handlers/container_files.go).
# Pre-pull so the first provision in test_api.sh doesn't race
# the daemon's pull cache. Idempotent — `docker pull` is a no-op
# when the image is already present.
docker pull alpine:latest >/dev/null
# Provisioner attaches workspace containers to
# molecule-core-net (workspace-server/internal/provisioner/
# provisioner.go::DefaultNetwork). The bridge already exists on
# the operator host's docker daemon — `network create` is
# idempotent via `|| true`.
docker network create molecule-core-net >/dev/null 2>&1 || true
echo "alpine:latest pre-pulled; molecule-core-net ensured."
- name: Start Postgres (docker)
if: needs.detect-changes.outputs.api == 'true'
run: |
# Defensive cleanup — only matches THIS run's container name,
# so it cannot kill a sibling run's postgres. (Pre-fix the
# name was static and this rm hit other runs' containers.)
docker rm -f "$PG_CONTAINER" 2>/dev/null || true
# `-p 0:5432` requests an ephemeral host port; we read it back
# below and export DATABASE_URL.
docker run -d --name "$PG_CONTAINER" \
-e POSTGRES_USER=dev -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=molecule \
-p 0:5432 postgres:16 >/dev/null
# Resolve the host-side port assignment. `docker port` prints
# `0.0.0.0:NNNN` (and on host-net runners may also print an
# IPv6 line — take the first IPv4 line).
PG_PORT=$(docker port "$PG_CONTAINER" 5432/tcp | awk -F: '/^0\.0\.0\.0:/ {print $2; exit}')
if [ -z "$PG_PORT" ]; then
# Fallback: any first line. Some Docker versions print only
# one line.
PG_PORT=$(docker port "$PG_CONTAINER" 5432/tcp | head -1 | awk -F: '{print $NF}')
fi
if [ -z "$PG_PORT" ]; then
echo "::error::Could not resolve host port for $PG_CONTAINER"
docker port "$PG_CONTAINER" 5432/tcp || true
docker logs "$PG_CONTAINER" || true
exit 1
fi
# 127.0.0.1 (NOT localhost) — IPv6 first-resolve flake (#92).
echo "PG_PORT=${PG_PORT}" >> "$GITHUB_ENV"
echo "DATABASE_URL=postgres://dev:dev@127.0.0.1:${PG_PORT}/molecule?sslmode=disable" >> "$GITHUB_ENV"
echo "Postgres host port: ${PG_PORT}"
docker run -d --name "$PG_CONTAINER" -e POSTGRES_USER=dev -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=molecule -p 15432:5432 postgres:16
for i in $(seq 1 30); do
if docker exec "$PG_CONTAINER" pg_isready -U dev >/dev/null 2>&1; then
echo "Postgres ready after ${i}s"
@ -214,20 +116,7 @@ jobs:
if: needs.detect-changes.outputs.api == 'true'
run: |
docker rm -f "$REDIS_CONTAINER" 2>/dev/null || true
docker run -d --name "$REDIS_CONTAINER" -p 0:6379 redis:7 >/dev/null
REDIS_PORT=$(docker port "$REDIS_CONTAINER" 6379/tcp | awk -F: '/^0\.0\.0\.0:/ {print $2; exit}')
if [ -z "$REDIS_PORT" ]; then
REDIS_PORT=$(docker port "$REDIS_CONTAINER" 6379/tcp | head -1 | awk -F: '{print $NF}')
fi
if [ -z "$REDIS_PORT" ]; then
echo "::error::Could not resolve host port for $REDIS_CONTAINER"
docker port "$REDIS_CONTAINER" 6379/tcp || true
docker logs "$REDIS_CONTAINER" || true
exit 1
fi
echo "REDIS_PORT=${REDIS_PORT}" >> "$GITHUB_ENV"
echo "REDIS_URL=redis://127.0.0.1:${REDIS_PORT}" >> "$GITHUB_ENV"
echo "Redis host port: ${REDIS_PORT}"
docker run -d --name "$REDIS_CONTAINER" -p 16379:6379 redis:7
for i in $(seq 1 15); do
if docker exec "$REDIS_CONTAINER" redis-cli ping 2>/dev/null | grep -q PONG; then
echo "Redis ready after ${i}s"
@ -246,15 +135,13 @@ jobs:
if: needs.detect-changes.outputs.api == 'true'
working-directory: workspace-server
run: |
# DATABASE_URL + REDIS_URL exported by the start-postgres /
# start-redis steps point at this run's per-run host ports.
./platform-server > platform.log 2>&1 &
echo $! > platform.pid
- name: Wait for /health
if: needs.detect-changes.outputs.api == 'true'
run: |
for i in $(seq 1 30); do
if curl -sf http://127.0.0.1:8080/health > /dev/null; then
if curl -sf http://localhost:8080/health > /dev/null; then
echo "Platform up after ${i}s"
exit 0
fi
@ -298,9 +185,6 @@ jobs:
kill "$(cat workspace-server/platform.pid)" 2>/dev/null || true
fi
- name: Stop service containers
# always() so containers don't leak when test steps fail. The
# cleanup is best-effort: if the container is already gone
# (e.g. concurrent rerun race), don't fail the job.
if: always() && needs.detect-changes.outputs.api == 'true'
run: |
docker rm -f "$PG_CONTAINER" 2>/dev/null || true

View File

@ -22,9 +22,9 @@ on:
# spending CI cycles. See e2e-api.yml for the rationale on why this
# is a single job rather than two-jobs-sharing-name.
push:
branches: [main]
branches: [main, staging]
pull_request:
branches: [main]
branches: [main, staging]
workflow_dispatch:
schedule:
# Weekly on Sunday 08:00 UTC — catches Chrome / Playwright / Next.js
@ -139,11 +139,7 @@ jobs:
- name: Upload Playwright report on failure
if: failure() && needs.detect-changes.outputs.canvas == 'true'
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
# implement (see ci.yml upload step for the canonical error
# cite). Drop this pin when Gitea ships the v4 protocol.
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: playwright-report-staging
path: canvas/playwright-report-staging/
@ -151,8 +147,7 @@ jobs:
- name: Upload screenshots on failure
if: failure() && needs.detect-changes.outputs.canvas == 'true'
# Pinned to v3 for Gitea act_runner v0.6 compatibility (see above).
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
with:
name: playwright-screenshots
path: canvas/test-results/

View File

@ -32,7 +32,7 @@ name: E2E Staging External Runtime
on:
push:
branches: [main]
branches: [staging, main]
paths:
- 'workspace-server/internal/handlers/workspace.go'
- 'workspace-server/internal/handlers/registry.go'
@ -44,7 +44,7 @@ on:
- 'tests/e2e/test_staging_external_runtime.sh'
- '.github/workflows/e2e-staging-external.yml'
pull_request:
branches: [main]
branches: [staging, main]
paths:
- 'workspace-server/internal/handlers/workspace.go'
- 'workspace-server/internal/handlers/registry.go'

View File

@ -20,12 +20,13 @@ name: E2E Staging SaaS (full lifecycle)
# via the same paths watcher that e2e-api.yml uses)
on:
# Trunk-based (Phase 3 of internal#81): main is the only branch.
# Previously this fired on staging push too because staging was a
# superset of main and ran the gate ahead of auto-promote; with no
# staging branch, main is where E2E gates the deploy.
# Fire on staging push too — previously this only ran on main, which
# meant the most thorough end-to-end test caught regressions AFTER
# they shipped to staging (and then to the auto-promote PR). Running
# on staging push catches them BEFORE the staging→main promotion
# opens, so a green canary into auto-promote is more meaningful.
push:
branches: [main]
branches: [staging, main]
paths:
- 'workspace-server/internal/handlers/registry.go'
- 'workspace-server/internal/handlers/workspace_provision.go'
@ -35,7 +36,7 @@ on:
- 'tests/e2e/test_staging_full_saas.sh'
- '.github/workflows/e2e-staging-saas.yml'
pull_request:
branches: [main]
branches: [staging, main]
paths:
- 'workspace-server/internal/handlers/registry.go'
- 'workspace-server/internal/handlers/workspace_provision.go'

View File

@ -14,42 +14,12 @@ name: Handlers Postgres Integration
# self-review caught it took 2 minutes to set up and would have caught
# the bug at PR-time.
#
# Why this workflow does NOT use `services: postgres:` (Class B fix)
# ------------------------------------------------------------------
# Our act_runner config has `container.network: host` (operator host
# /opt/molecule/runners/config.yaml), which act_runner applies to BOTH
# the job container AND every service container. With host-net, two
# concurrent runs of this workflow both try to bind 0.0.0.0:5432 — the
# second postgres FATALs with `could not create any TCP/IP sockets:
# Address in use`, and Docker auto-removes it (act_runner sets
# AutoRemove:true on service containers). By the time the migrations
# step runs `psql`, the postgres container is gone, hence
# `Connection refused` then `failed to remove container: No such
# container` at cleanup time.
# This job spins a Postgres service container, applies the migration,
# and runs `go test -tags=integration` against a live DB. Required
# check on staging branch protection — backend handler PRs cannot
# merge without a real-DB regression gate.
#
# Per-job `container.network` override is silently ignored by
# act_runner — `--network and --net in the options will be ignored.`
# appears in the runner log. Documented constraint.
#
# So we sidestep `services:` entirely. The job container still uses
# host-net (inherited from runner config; required for cache server
# discovery on the bridge IP 172.18.0.17:42631). We launch a sibling
# postgres on the existing `molecule-core-net` bridge with a
# UNIQUE name per run — `pg-handlers-${RUN_ID}-${RUN_ATTEMPT}` — and
# read its bridge IP via `docker inspect`. A host-net job container
# can reach a bridge-net container directly via the bridge IP (verified
# manually on operator host 2026-05-08).
#
# Trade-offs vs. the original `services:` shape:
# + No host-port collision; N parallel runs share the bridge cleanly
# + `if: always()` cleanup runs even on test-step failure
# - One more step in the workflow (+~3 lines)
# - Requires `molecule-core-net` to exist on the operator host
# (it does; declared in docker-compose.yml + docker-compose.infra.yml)
#
# Class B Hongming-owned CICD red sweep, 2026-05-08.
#
# Cost: ~30s job (postgres pull from cache + go build + 4 tests).
# Cost: ~30s job (postgres pull from GH cache + go build + 4 tests).
on:
push:
@ -89,14 +59,20 @@ jobs:
name: Handlers Postgres Integration
needs: detect-changes
runs-on: ubuntu-latest
env:
# Unique name per run so concurrent jobs don't collide on the
# bridge network. ${RUN_ID}-${RUN_ATTEMPT} is unique even across
# workflow_dispatch reruns of the same run_id.
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
# Bridge network already exists on the operator host (declared
# in docker-compose.yml + docker-compose.infra.yml).
PG_NETWORK: molecule-core-net
services:
postgres:
image: postgres:15-alpine
env:
POSTGRES_PASSWORD: test
POSTGRES_DB: molecule
ports:
- 5432:5432
# GHA spins this with --health-cmd built in for postgres images.
options: >-
--health-cmd pg_isready
--health-interval 5s
--health-timeout 5s
--health-retries 10
defaults:
run:
working-directory: workspace-server
@ -113,57 +89,16 @@ jobs:
with:
go-version: 'stable'
- if: needs.detect-changes.outputs.handlers == 'true'
name: Start sibling Postgres on bridge network
working-directory: .
run: |
# Sanity: the bridge network must exist on the operator host.
# Hard-fail loud if it doesn't — easier to spot than a silent
# auto-create that diverges from the rest of the stack.
if ! docker network inspect "${PG_NETWORK}" >/dev/null 2>&1; then
echo "::error::Bridge network '${PG_NETWORK}' missing on operator host. Re-run docker-compose.infra.yml or check ops handbook."
exit 1
fi
# If a stale container with the same name exists (rerun on
# the same run_id), wipe it first.
docker rm -f "${PG_NAME}" >/dev/null 2>&1 || true
docker run -d \
--name "${PG_NAME}" \
--network "${PG_NETWORK}" \
--health-cmd "pg_isready -U postgres" \
--health-interval 5s \
--health-timeout 5s \
--health-retries 10 \
-e POSTGRES_PASSWORD=test \
-e POSTGRES_DB=molecule \
postgres:15-alpine >/dev/null
# Read back the bridge IP. Always present immediately after
# `docker run -d` for bridge networks.
PG_HOST=$(docker inspect "${PG_NAME}" \
--format "{{(index .NetworkSettings.Networks \"${PG_NETWORK}\").IPAddress}}")
if [ -z "${PG_HOST}" ]; then
echo "::error::Could not resolve PG_HOST for ${PG_NAME} on ${PG_NETWORK}"
docker logs "${PG_NAME}" || true
exit 1
fi
echo "PG_HOST=${PG_HOST}" >> "$GITHUB_ENV"
echo "INTEGRATION_DB_URL=postgres://postgres:test@${PG_HOST}:5432/molecule?sslmode=disable" >> "$GITHUB_ENV"
echo "Started ${PG_NAME} at ${PG_HOST}:5432"
- if: needs.detect-changes.outputs.handlers == 'true'
name: Apply migrations to Postgres service
env:
PGPASSWORD: test
run: |
# Wait for postgres to actually accept connections. Docker's
# health-cmd handles container-side readiness, but the wire
# to the bridge IP is best-tested with pg_isready directly.
# Wait for postgres to actually accept connections (the
# GHA --health-cmd is best-effort but psql can still race).
for i in {1..15}; do
if pg_isready -h "${PG_HOST}" -p 5432 -U postgres -q; then break; fi
echo "waiting for postgres at ${PG_HOST}:5432..."; sleep 2
if pg_isready -h localhost -p 5432 -U postgres -q; then break; fi
echo "waiting for postgres..."; sleep 2
done
# Apply every .up.sql in lexicographic order with
@ -186,17 +121,9 @@ jobs:
# Per-migration result is logged so a failed migration that
# SHOULD have been replayable surfaces in the CI log instead
# of silently failing.
# Apply both *.sql (legacy, lives next to its module) and
# *.up.sql (newer up/down convention) in a single
# lexicographically-sorted pass. Excluding *.down.sql so the
# newest-naming-convention pairs don't undo themselves mid-run.
# Pre-#149-followup this loop only globbed *.up.sql, which
# silently skipped 001_workspaces.sql + 009_activity_logs.sql
# — fine while no integration test depended on those tables,
# not fine once a cross-table atomicity test came in.
set +e
for migration in $(ls migrations/*.sql 2>/dev/null | grep -v '\.down\.sql$' | sort); do
if psql -h "${PG_HOST}" -U postgres -d molecule -v ON_ERROR_STOP=1 \
for migration in migrations/*.up.sql; do
if psql -h localhost -U postgres -d molecule -v ON_ERROR_STOP=1 \
-f "$migration" >/dev/null 2>&1; then
echo "✓ $(basename "$migration")"
else
@ -205,48 +132,29 @@ jobs:
done
set -e
# Sanity: the delegations + workspaces + activity_logs tables
# MUST exist for the integration tests to be meaningful. Hard-
# fail if any didn't land — that would be a real regression we
# want loud.
for tbl in delegations workspaces activity_logs pending_uploads; do
if ! psql -h "${PG_HOST}" -U postgres -d molecule -tA \
-c "SELECT 1 FROM information_schema.tables WHERE table_name = '$tbl'" \
| grep -q 1; then
echo "::error::$tbl table missing after migration replay — handler integration tests would be meaningless"
exit 1
fi
echo "✓ $tbl table present"
done
# Sanity: the delegations table MUST exist for the integration
# tests to be meaningful. Hard-fail if 049 didn't land — that
# would be a real regression we want loud.
if ! psql -h localhost -U postgres -d molecule -tA \
-c "SELECT 1 FROM information_schema.tables WHERE table_name = 'delegations'" \
| grep -q 1; then
echo "::error::delegations table missing after migration replay — handler integration tests would be meaningless"
exit 1
fi
echo "✓ delegations table present"
- if: needs.detect-changes.outputs.handlers == 'true'
name: Run integration tests
env:
INTEGRATION_DB_URL: postgres://postgres:test@localhost:5432/molecule?sslmode=disable
run: |
# INTEGRATION_DB_URL is exported by the start-postgres step;
# points at the per-run bridge IP, not 127.0.0.1, so concurrent
# workflow runs don't fight over a host-net 5432 port.
go test -tags=integration -timeout 5m -v ./internal/handlers/ -run "^TestIntegration_"
- if: failure() && needs.detect-changes.outputs.handlers == 'true'
- if: needs.detect-changes.outputs.handlers == 'true' && failure()
name: Diagnostic dump on failure
env:
PGPASSWORD: test
run: |
echo "::group::postgres container status"
docker ps -a --filter "name=${PG_NAME}" --format '{{.Status}} {{.Names}}' || true
docker logs "${PG_NAME}" 2>&1 | tail -50 || true
echo "::endgroup::"
echo "::group::delegations table state"
psql -h "${PG_HOST}" -U postgres -d molecule -c "SELECT * FROM delegations LIMIT 50;" || true
psql -h localhost -U postgres -d molecule -c "SELECT * FROM delegations LIMIT 50;" || true
echo "::endgroup::"
- if: always() && needs.detect-changes.outputs.handlers == 'true'
name: Stop sibling Postgres
working-directory: .
run: |
# always() so containers don't leak when migrations or tests
# fail. The cleanup is best-effort: if the container is
# already gone (e.g. concurrent rerun race), don't fail the job.
docker rm -f "${PG_NAME}" >/dev/null 2>&1 || true
echo "Cleaned up ${PG_NAME}"

View File

@ -95,68 +95,16 @@ jobs:
- if: needs.detect-changes.outputs.run == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# github-app-auth sibling-checkout removed 2026-05-07 (#157):
# the plugin was dropped + Dockerfile.tenant no longer COPYs it.
# Pre-clone manifest deps before docker compose builds the tenant
# image (Task #173 followup — same pattern as
# publish-workspace-server-image.yml's "Pre-clone manifest deps"
# step).
#
# Why pre-clone here too: tests/harness/compose.yml builds tenant-alpha
# and tenant-beta from workspace-server/Dockerfile.tenant with
# context=../.. (repo root). That Dockerfile expects
# .tenant-bundle-deps/{workspace-configs-templates,org-templates,plugins}
# to be present at build context root (post-#173 it COPYs from there
# instead of running an in-image clone — the in-image clone failed
# with "could not read Username for https://git.moleculesai.app"
# because there's no auth path inside the build sandbox).
#
# Without this step harness-replays fails before any replay runs,
# with `failed to calculate checksum of ref ...
# "/.tenant-bundle-deps/plugins": not found`. Caught by run #892
# (main, 2026-05-07T20:28:53Z) and run #964 (staging — same
# symptom, different root cause: staging still has the in-image
# clone path, hits the auth error directly).
#
# 2026-05-08 sub-finding (#192): the clone step ALSO fails when
# any referenced workspace-template repo is private and the
# AUTO_SYNC_TOKEN bearer (devops-engineer persona) lacks read
# access. Root cause: 5 of 9 workspace-template repos
# (openclaw, codex, crewai, deepagents, gemini-cli) had been
# marked private with no team grant. Resolution: flipped them
# to public per `feedback_oss_first_repo_visibility_default`
# (the OSS surface should be public). Layer-3 (customer-private +
# marketplace third-party repos) tracked separately in
# internal#102.
#
# Token shape matches publish-workspace-server-image.yml: AUTO_SYNC_TOKEN
# is the devops-engineer persona PAT, NOT the founder PAT (per
# `feedback_per_agent_gitea_identity_default`). clone-manifest.sh
# embeds it as basic-auth for the duration of the clones and strips
# .git directories — the token never enters the resulting image.
- name: Pre-clone manifest deps
- name: Checkout sibling plugin repo
# Dockerfile.tenant copies molecule-ai-plugin-github-app-auth/
# at the build-context root (see workspace-server/Dockerfile.tenant
# line 19). PLUGIN_REPO_PAT pattern matches publish-workspace-server-image.yml.
if: needs.detect-changes.outputs.run == 'true'
env:
MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
run: |
set -euo pipefail
if [ -z "${MOLECULE_GITEA_TOKEN}" ]; then
echo "::error::AUTO_SYNC_TOKEN secret is empty — register the devops-engineer persona PAT in repo Actions secrets"
exit 1
fi
mkdir -p .tenant-bundle-deps
bash scripts/clone-manifest.sh \
manifest.json \
.tenant-bundle-deps/workspace-configs-templates \
.tenant-bundle-deps/org-templates \
.tenant-bundle-deps/plugins
# Sanity-check counts so a silent partial clone fails fast
# instead of producing a half-empty image.
ws_count=$(find .tenant-bundle-deps/workspace-configs-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
org_count=$(find .tenant-bundle-deps/org-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
plugins_count=$(find .tenant-bundle-deps/plugins -mindepth 1 -maxdepth 1 -type d | wc -l)
echo "Cloned: ws=$ws_count org=$org_count plugins=$plugins_count"
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: Molecule-AI/molecule-ai-plugin-github-app-auth
path: molecule-ai-plugin-github-app-auth
token: ${{ secrets.PLUGIN_REPO_PAT || secrets.GITHUB_TOKEN }}
- name: Install Python deps for replays
# peer-discovery-404 (and future replays) eval Python against the

View File

@ -1,25 +1,14 @@
name: pr-guards
# PR-time guards. Today the only guard is "disable auto-merge when a
# new commit is pushed after auto-merge was enabled" — added 2026-04-27
# after PR #2174 auto-merged with only its first commit because the
# second commit was pushed after the merge queue had locked the PR's
# SHA.
# Thin caller that delegates to the molecule-ci reusable guard. Today
# the guard is just "disable auto-merge when a new commit is pushed
# after auto-merge was enabled" — added 2026-04-27 after PR #2174
# auto-merged with only its first commit because the second commit
# was pushed after the merge queue had locked the PR's SHA.
#
# Why this is inlined (not delegated to molecule-ci's reusable
# workflow): the reusable workflow uses `gh pr merge --disable-auto`,
# which calls GitHub's GraphQL API. Gitea has no GraphQL endpoint and
# returns HTTP 405 on /api/graphql, so the job failed on every Gitea
# PR push since the 2026-05-06 migration. Gitea also has no `--auto`
# merge primitive that this job could be acting on, so the right
# behaviour on Gitea is "no-op + green status" — not a 405.
#
# Inlining (vs. an `if:` on the `uses:` line) keeps the job ALWAYS
# running, which matters for branch protection: required-check names
# need a job that emits SUCCESS terminal state, not SKIPPED. See
# `feedback_branch_protection_check_name_parity` and `feedback_pr_merge_safety_guards`.
#
# Issue #88 item 1.
# When more PR-time guards land in molecule-ci, add them here as
# additional jobs that share the same pull_request:synchronize
# trigger.
on:
pull_request:
@ -30,34 +19,4 @@ permissions:
jobs:
disable-auto-merge-on-push:
runs-on: ubuntu-latest
steps:
# Detect Gitea Actions. act_runner sets GITEA_ACTIONS=true in the
# step env on every job. Belt-and-suspenders: also check the repo
# url's host, which is independent of any runner-side env config
# (covers a future Gitea host where the env var is forgotten).
- name: Detect runner host
id: host
run: |
if [[ "${GITEA_ACTIONS:-}" == "true" ]] || [[ "${{ github.server_url }}" == *moleculesai.app* ]] || [[ "${{ github.event.repository.html_url }}" == *moleculesai.app* ]]; then
echo "is_gitea=true" >> "$GITHUB_OUTPUT"
echo "::notice::Gitea Actions detected — auto-merge gating is not applicable here (Gitea has no --auto merge primitive). Job will no-op."
else
echo "is_gitea=false" >> "$GITHUB_OUTPUT"
fi
- name: Disable auto-merge (GitHub only)
if: steps.host.outputs.is_gitea != 'true'
env:
GH_TOKEN: ${{ github.token }}
PR: ${{ github.event.pull_request.number }}
REPO: ${{ github.repository }}
NEW_SHA: ${{ github.sha }}
run: |
set -eu
gh pr merge "$PR" --disable-auto -R "$REPO" || true
gh pr comment "$PR" -R "$REPO" --body "🔒 Auto-merge disabled — new commit (\`${NEW_SHA:0:7}\`) pushed after auto-merge was enabled. The merge queue locks SHAs at entry, so subsequent pushes can race. Verify the new commit and re-enable with \`gh pr merge --auto\`."
- name: Gitea no-op
if: steps.host.outputs.is_gitea == 'true'
run: echo "Gitea Actions — auto-merge gating not applicable; no-op (job intentionally green so branch protection's required-check name lands SUCCESS)."
uses: Molecule-AI/molecule-ci/.github/workflows/disable-auto-merge-on-push.yml@main

View File

@ -25,7 +25,7 @@ name: publish-runtime
# 3. Publishes to PyPI via the PyPA Trusted Publisher action (OIDC).
# No static API token is stored — PyPI verifies the workflow's
# OIDC claim against the trusted-publisher config registered for
# molecule-ai-workspace-runtime (molecule-ai/molecule-core,
# molecule-ai-workspace-runtime (Molecule-AI/molecule-core,
# publish-runtime.yml, environment pypi-publish).
#
# After publish: the 8 template repos pick up the new version on their
@ -166,7 +166,7 @@ jobs:
- name: Publish to PyPI (Trusted Publisher / OIDC)
# PyPI side is configured: project molecule-ai-workspace-runtime →
# publisher molecule-ai/molecule-core, workflow publish-runtime.yml,
# publisher Molecule-AI/molecule-core, workflow publish-runtime.yml,
# environment pypi-publish. The action mints a short-lived OIDC
# token and exchanges it for a PyPI upload credential — no static
# API token in this repo's secrets.
@ -282,33 +282,42 @@ jobs:
echo "::error::Refusing to fan out cascade against stale or corrupt PyPI surfaces."
exit 1
- name: Fan out via push to .runtime-version
- name: Fan out repository_dispatch
env:
# Gitea PAT with write:repository scope on the 8 cascade-active
# template repos. Used here for `git push` (NOT for an API
# dispatch — Gitea 1.22.6 has no repository_dispatch endpoint;
# empirically verified across 6 candidate paths in molecule-
# core#20 issuecomment-913). The push trips each template's
# existing `on: push: branches: [main]` trigger on
# publish-image.yml, which then reads the updated
# .runtime-version via its resolve-version job.
DISPATCH_TOKEN: ${{ secrets.DISPATCH_TOKEN }}
# Fine-grained PAT with `actions:write` on the 8 template repos.
# GITHUB_TOKEN can't fire dispatches across repos — needs an explicit
# token. Stored as a repo secret; rotate per the standard schedule.
DISPATCH_TOKEN: ${{ secrets.TEMPLATE_DISPATCH_TOKEN }}
# Single source of truth: the publish job's output, which handles
# tag/manual-input/auto-bump uniformly. The previous fallback
# (`steps.version.outputs.version` from inside the cascade job)
# was a dead reference — different job, no shared step scope.
RUNTIME_VERSION: ${{ needs.publish.outputs.version }}
run: |
set +e # don't abort on a single repo failure — collect them all
# Soft-skip on workflow_dispatch when the token is missing
# (operator ad-hoc test); hard-fail on push so unattended
# publishes can't silently skip the cascade. Same shape as
# the original v1, intentional split per the schedule-vs-
# dispatch hardening 2026-04-28.
# Schedule-vs-dispatch behaviour split (hardened 2026-04-28
# after the sweep-cf-orphans soft-skip incident — same class
# of bug):
#
# The earlier "skipping cascade. templates will pick up the
# new version on their own next rebuild" message was wrong —
# templates only build on this dispatch trigger; without it
# they stay pinned to whatever runtime version they last saw.
# A silent skip here means "PyPI is current, templates are
# not" and the gap is invisible until someone notices a
# template still on the old version weeks later.
#
# - push → exit 1 (red CI surfaces the gap)
# - workflow_dispatch → exit 0 with a warning (operator
# ran this ad-hoc; let them rerun
# after fixing the secret)
if [ -z "$DISPATCH_TOKEN" ]; then
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "::warning::DISPATCH_TOKEN secret not set — skipping cascade."
echo "::warning::TEMPLATE_DISPATCH_TOKEN secret not set — skipping cascade."
echo "::warning::set it at Settings → Secrets and Variables → Actions, then rerun. Templates will stay on the prior runtime version until either this token is set or each template is rebuilt manually."
exit 0
fi
echo "::error::DISPATCH_TOKEN secret missing — cascade cannot fan out."
echo "::error::TEMPLATE_DISPATCH_TOKEN secret missing — cascade cannot fan out."
echo "::error::PyPI was published, but the 8 template repos will NOT pick up the new version until this token is restored and a republish dispatches the cascade."
echo "::error::set it at Settings → Secrets and Variables → Actions; then re-trigger publish-runtime via workflow_dispatch."
exit 1
@ -318,119 +327,37 @@ jobs:
echo "::error::publish job did not expose a version output — cascade cannot fan out"
exit 1
fi
# All 9 workspace templates declared in manifest.json. The list
# MUST stay aligned with manifest.json's workspace_templates —
# cascade-list-drift-gate.yml enforces this in CI per the
# codex-stuck-on-stale-runtime invariant from PR #2556.
# Long-term goal: derive this list from manifest.json so it
# can't drift even on a manifest edit (RFC #388 Phase-1).
#
# Per-template publish-image.yml presence is checked at
# cascade-time below: codex doesn't ship one today, so the
# cascade soft-skips it with an informational message rather
# than dropping it from this list (which would re-introduce
# the drift the gate exists to catch).
GITEA_URL="${GITEA_URL:-https://git.moleculesai.app}"
# All 9 active workspace template repos. The PR #2536 pruning
# ("deprecated, no shipping images") was empirically wrong:
# continuous-synth-e2e.yml defaults to langgraph as its primary
# canary (line 44), and every excluded template had successful
# publish-image runs as of 2026-05-03 — none were dormant.
# Symptom of the prune: today's a2a-sdk strict-mode fix
# (#2566 / commit e1628c4) cascaded to 4 templates but never
# reached langgraph, so the synth-E2E correctly canary'd a fix
# that had landed but not deployed. Re-added the 5 templates.
# Long-term: derive this list from manifest.json so cascade
# scope can't drift from E2E scope — tracked in RFC #388 as a
# Phase-1 invariant.
TEMPLATES="claude-code hermes openclaw codex langgraph crewai autogen deepagents gemini-cli"
FAILED=""
SKIPPED=""
# Configure git identity once. The persona owning DISPATCH_TOKEN
# is the same identity that authored this commit on each
# template; using a generic "publish-runtime cascade" co-author
# trailer in the message keeps the audit trail honest about the
# workflow-driven origin.
git config --global user.name "publish-runtime cascade"
git config --global user.email "publish-runtime@moleculesai.app"
WORKDIR="$(mktemp -d)"
for tpl in $TEMPLATES; do
REPO="molecule-ai/molecule-ai-workspace-template-$tpl"
CLONE="$WORKDIR/$tpl"
# Pre-check: skip templates without a publish-image.yml.
# The cascade's job is to trip the template's on-push
# rebuild — if there's no rebuild workflow, pushing a
# .runtime-version commit is just noise on the target
# repo. Use the Gitea contents API (no clone required for
# the probe). 200 = present; 404 = absent.
HTTP=$(curl -sS -o /dev/null -w "%{http_code}" \
-H "Authorization: token $DISPATCH_TOKEN" \
"$GITEA_URL/api/v1/repos/$REPO/contents/.github/workflows/publish-image.yml")
if [ "$HTTP" = "404" ]; then
echo "↷ $tpl has no publish-image.yml — soft-skip (informational; manifest still tracks it)"
SKIPPED="$SKIPPED $tpl"
continue
fi
if [ "$HTTP" != "200" ]; then
echo "::warning::$tpl publish-image.yml probe returned HTTP $HTTP — proceeding anyway, push will surface the real failure if any"
fi
# Use a per-template attempt loop so a transient race (e.g.
# human pushing to the same template at the same instant)
# doesn't lose the cascade. Bounded retries (3) — beyond
# that we surface the failure and let the operator retry.
attempt=0
success=false
while [ $attempt -lt 3 ]; do
attempt=$((attempt + 1))
rm -rf "$CLONE"
if ! git clone --depth=1 \
"https://x-access-token:${DISPATCH_TOKEN}@${GITEA_URL#https://}/$REPO.git" \
"$CLONE" >/tmp/clone.log 2>&1; then
echo "::warning::clone $tpl attempt $attempt failed: $(tail -n3 /tmp/clone.log)"
sleep 2
continue
fi
cd "$CLONE"
echo "$VERSION" > .runtime-version
# Idempotency guard: if the file already matches, this
# publish is a re-run for a version already cascaded.
# Don't push a no-op commit (would spuriously re-trip the
# template's on-push and rebuild for nothing).
if git diff --quiet -- .runtime-version; then
echo "✓ $tpl already at $VERSION — no commit needed (idempotent)"
success=true
cd - >/dev/null
break
fi
git add .runtime-version
git commit -m "chore: pin runtime to $VERSION (publish-runtime cascade)" \
-m "Co-Authored-By: publish-runtime cascade <publish-runtime@moleculesai.app>" \
>/dev/null
if git push origin HEAD:main >/tmp/push.log 2>&1; then
echo "✓ $tpl pushed $VERSION on attempt $attempt"
success=true
cd - >/dev/null
break
fi
# Likely a non-fast-forward — pull-rebase and retry.
# Don't force-push: that would silently overwrite a racing
# human/cascade commit.
echo "::warning::push $tpl attempt $attempt failed, pull-rebasing: $(tail -n3 /tmp/push.log)"
git pull --rebase origin main >/tmp/rebase.log 2>&1 || true
cd - >/dev/null
done
if [ "$success" != "true" ]; then
REPO="Molecule-AI/molecule-ai-workspace-template-$tpl"
STATUS=$(curl -sS -o /tmp/dispatch.out -w "%{http_code}" \
-X POST "https://api.github.com/repos/$REPO/dispatches" \
-H "Authorization: Bearer $DISPATCH_TOKEN" \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
-d "{\"event_type\":\"runtime-published\",\"client_payload\":{\"runtime_version\":\"$VERSION\"}}")
if [ "$STATUS" = "204" ]; then
echo "✓ dispatched $tpl ($VERSION)"
else
echo "::warning::✗ failed to dispatch $tpl: HTTP $STATUS — $(cat /tmp/dispatch.out)"
FAILED="$FAILED $tpl"
fi
done
rm -rf "$WORKDIR"
if [ -n "$FAILED" ]; then
echo "::error::Cascade incomplete after 3 retries each. Failed templates:$FAILED"
echo "::error::PyPI publish succeeded; failed templates lag the new version. Re-run this workflow_dispatch with the same version to retry only the laggers (idempotent — already-cascaded templates skip)."
exit 1
fi
if [ -n "$SKIPPED" ]; then
echo "Cascade complete: pinned $VERSION on cascade-active templates. Soft-skipped (no publish-image.yml):$SKIPPED"
else
echo "Cascade complete: $VERSION pinned across all manifest workspace_templates."
echo "::warning::Cascade incomplete. Failed templates:$FAILED"
# Don't fail the whole job — PyPI publish already succeeded;
# operators can retry the failed templates manually.
fi

View File

@ -37,7 +37,6 @@ on:
- 'workspace-server/**'
- 'canvas/**'
- 'manifest.json'
- 'scripts/**'
- '.github/workflows/publish-workspace-server-image.yml'
workflow_dispatch:
@ -61,8 +60,8 @@ permissions:
packages: write
env:
IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform
TENANT_IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform-tenant
IMAGE_NAME: ghcr.io/molecule-ai/platform
TENANT_IMAGE_NAME: ghcr.io/molecule-ai/platform-tenant
jobs:
build-and-push:
@ -71,91 +70,40 @@ jobs:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
# github-app-auth sibling-checkout removed 2026-05-07 (#157):
# plugin was dropped + workspace-server/Dockerfile no longer
# COPYs it.
- name: Checkout sibling plugin repo
# workspace-server/Dockerfile expects
# ./molecule-ai-plugin-github-app-auth at build-context root because
# the Go module has a `replace` directive pointing at /plugin inside
# the image. Pre-repo-split the plugin lived in the monorepo; the
# 2026-04-18 restructure moved it out but didn't add this clone step
# — which is why publish was failing after that restructure.
#
# Uses a fine-grained PAT (PLUGIN_REPO_PAT) because the plugin repo
# is private and the default GITHUB_TOKEN is scoped to THIS repo.
# The PAT needs Contents:Read on Molecule-AI/molecule-ai-plugin-
# github-app-auth. Falls back to the default token for the (rare)
# case where an operator made the plugin repo public.
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
repository: Molecule-AI/molecule-ai-plugin-github-app-auth
path: molecule-ai-plugin-github-app-auth
token: ${{ secrets.PLUGIN_REPO_PAT || secrets.GITHUB_TOKEN }}
# ECR auth + buildx setup are now inline in each build step
# below (Task #173, 2026-05-07).
#
# Why moved inline: aws-actions/configure-aws-credentials@v4 +
# aws-actions/amazon-ecr-login@v2 + docker/setup-buildx-action
# all left auth state in places that the actual `docker push`
# couldn't see on Gitea Actions:
# - The actions wrote to a step-scoped DOCKER_CONFIG path
# that didn't survive into subsequent shell steps.
# - Buildx couldn't bridge the runner container ↔
# operator-host docker daemon auth gap (401 on the
# docker-container driver, "no basic auth credentials"
# with the action-driven login).
#
# Doing AWS+ECR auth inline (`aws ecr get-login-password |
# docker login`) in the same shell step as `docker build` +
# `docker push` is the operator-host manual approach, mapped
# 1:1 into CI. Auth state is guaranteed to live in the env that
# `docker push` actually runs from.
#
# Post-suspension target is the operator's ECR org
# (153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/*),
# which already hosts platform-tenant + workspace-template-* +
# runner-base images. AWS creds come from the
# AWS_ACCESS_KEY_ID/SECRET secrets bound to the molecule-cp
# IAM user. Closes #161.
- name: Log in to GHCR
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Compute tags
id: tags
run: |
echo "sha=${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT"
# Pre-clone manifest deps before docker build (Task #173 fix).
#
# Why pre-clone: post-2026-05-06, every workspace-template-* repo on
# Gitea (codex, crewai, deepagents, gemini-cli, langgraph) plus all
# 7 org-template-* repos are private. The pre-fix Dockerfile.tenant
# ran `git clone` inside an in-image stage, which had no auth path
# — every CI build failed with "fatal: could not read Username for
# https://git.moleculesai.app". For weeks, every workspace-server
# rebuild required a manual operator-host push. Now we clone in the
# trusted CI context (where AUTO_SYNC_TOKEN is naturally available)
# and Dockerfile.tenant just COPYs from .tenant-bundle-deps/.
#
# Token shape: AUTO_SYNC_TOKEN is the devops-engineer persona PAT
# (see /etc/molecule-bootstrap/agent-secrets.env). Per saved memory
# `feedback_per_agent_gitea_identity_default`, every CI surface uses
# a per-persona token, never the founder PAT. clone-manifest.sh
# embeds it as basic-auth (oauth2:<token>) for the duration of the
# clones, then strips .git directories — the token never enters
# the resulting image.
#
# Idempotent: if a re-run finds populated dirs, clone-manifest.sh
# skips them; safe to retrigger via path-filter or workflow_dispatch.
- name: Pre-clone manifest deps
env:
MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
run: |
set -euo pipefail
if [ -z "${MOLECULE_GITEA_TOKEN}" ]; then
echo "::error::AUTO_SYNC_TOKEN secret is empty — register the devops-engineer persona PAT in repo Actions secrets"
exit 1
fi
mkdir -p .tenant-bundle-deps
bash scripts/clone-manifest.sh \
manifest.json \
.tenant-bundle-deps/workspace-configs-templates \
.tenant-bundle-deps/org-templates \
.tenant-bundle-deps/plugins
# Sanity-check counts so a silent partial clone fails fast
# instead of producing a half-empty image.
ws_count=$(find .tenant-bundle-deps/workspace-configs-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
org_count=$(find .tenant-bundle-deps/org-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
plugins_count=$(find .tenant-bundle-deps/plugins -mindepth 1 -maxdepth 1 -type d | wc -l)
echo "Cloned: ws=$ws_count org=$org_count plugins=$plugins_count"
# Counts are derived from manifest.json (9 ws / 7 org / 21
# plugins as of 2026-05-07). If manifest.json grows but the
# clone step regresses silently, the find above caps at the
# actual disk state — but clone-manifest.sh's own EXPECTED vs
# CLONED check (line ~95) is the authoritative fail-fast.
# Canary-gated release flow:
# - This step always publishes :staging-<sha> + :staging-latest.
# - On staging push, staging-CP picks up :staging-latest immediately
@ -181,82 +129,58 @@ jobs:
# were running pre-RFC code. Adding the staging trigger above closes
# that gap. Earlier 2026-04-24 incident: a static :staging-<sha> pin
# drifted 10 days behind staging — same class of bug, different
# mechanism. ECR repo molecule-ai/platform created 2026-05-07.
# Build + push platform image with plain `docker` (no buildx).
# GIT_SHA bakes into the Go binary via -ldflags so /buildinfo
# returns it at runtime — see Dockerfile + buildinfo/buildinfo.go.
# The OCI revision label below carries the same value for registry
# tooling; the duplication is intentional.
- name: Build & push platform image to ECR (staging-<sha> + staging-latest)
env:
IMAGE_NAME: ${{ env.IMAGE_NAME }}
TAG_SHA: staging-${{ steps.tags.outputs.sha }}
TAG_LATEST: staging-latest
GIT_SHA: ${{ github.sha }}
REPO: ${{ github.repository }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2
run: |
set -euo pipefail
# ECR auth in-step so config.json is populated in the same
# shell env that runs `docker push`. ECR get-login-password
# tokens last 12h, plenty for a single-step build+push.
ECR_REGISTRY="${IMAGE_NAME%%/*}"
aws ecr get-login-password --region us-east-2 | \
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
docker build \
--file ./workspace-server/Dockerfile \
--build-arg GIT_SHA="${GIT_SHA}" \
--label "org.opencontainers.image.source=https://github.com/${REPO}" \
--label "org.opencontainers.image.revision=${GIT_SHA}" \
--label "org.opencontainers.image.description=Molecule AI platform (Go API server) — pending canary verify" \
--tag "${IMAGE_NAME}:${TAG_SHA}" \
--tag "${IMAGE_NAME}:${TAG_LATEST}" \
.
docker push "${IMAGE_NAME}:${TAG_SHA}"
docker push "${IMAGE_NAME}:${TAG_LATEST}"
# Canvas uses same-origin fetches. The tenant Go platform
# reverse-proxies /cp/* to the SaaS CP via its CP_UPSTREAM_URL
# env; the tenant's /canvas/viewport, /approvals/pending,
# /org/templates etc. live on the tenant platform itself.
# Both legs share one origin (the tenant subdomain) so
# PLATFORM_URL="" forces canvas to fetch paths as relative,
# which land same-origin.
#
# Self-hosted / private-label deployments override this at
# build time with a specific backend (e.g. local dev:
# NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080).
- name: Build & push tenant image to ECR (staging-<sha> + staging-latest)
env:
TENANT_IMAGE_NAME: ${{ env.TENANT_IMAGE_NAME }}
TAG_SHA: staging-${{ steps.tags.outputs.sha }}
TAG_LATEST: staging-latest
GIT_SHA: ${{ github.sha }}
REPO: ${{ github.repository }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2
run: |
set -euo pipefail
# Re-login: the platform-image step's docker login wrote to
# the same config.json, so this is technically redundant — but
# making each push step self-contained keeps the workflow
# robust to step reordering / future extraction.
ECR_REGISTRY="${TENANT_IMAGE_NAME%%/*}"
aws ecr get-login-password --region us-east-2 | \
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
docker build \
--file ./workspace-server/Dockerfile.tenant \
--build-arg NEXT_PUBLIC_PLATFORM_URL= \
--build-arg GIT_SHA="${GIT_SHA}" \
--label "org.opencontainers.image.source=https://github.com/${REPO}" \
--label "org.opencontainers.image.revision=${GIT_SHA}" \
--label "org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify" \
--tag "${TENANT_IMAGE_NAME}:${TAG_SHA}" \
--tag "${TENANT_IMAGE_NAME}:${TAG_LATEST}" \
.
docker push "${TENANT_IMAGE_NAME}:${TAG_SHA}"
docker push "${TENANT_IMAGE_NAME}:${TAG_LATEST}"
# mechanism.
- name: Build & push platform image to GHCR (staging-<sha> + staging-latest)
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
context: .
file: ./workspace-server/Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ env.IMAGE_NAME }}:staging-${{ steps.tags.outputs.sha }}
${{ env.IMAGE_NAME }}:staging-latest
cache-from: type=gha
cache-to: type=gha,mode=max
# GIT_SHA bakes into the Go binary via -ldflags so /buildinfo
# returns it at runtime — see Dockerfile + buildinfo/buildinfo.go.
# This is the same value as the OCI revision label below; passing
# it twice is intentional, the OCI label is for registry tooling
# while /buildinfo is for the redeploy verification step.
build-args: |
GIT_SHA=${{ github.sha }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.description=Molecule AI platform (Go API server) — pending canary verify
- name: Build & push tenant image to GHCR (staging-<sha> + staging-latest)
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
context: .
file: ./workspace-server/Dockerfile.tenant
platforms: linux/amd64
push: true
tags: |
${{ env.TENANT_IMAGE_NAME }}:staging-${{ steps.tags.outputs.sha }}
${{ env.TENANT_IMAGE_NAME }}:staging-latest
cache-from: type=gha
cache-to: type=gha,mode=max
# Canvas uses same-origin fetches. The tenant Go platform
# reverse-proxies /cp/* to the SaaS CP via its CP_UPSTREAM_URL
# env; the tenant's /canvas/viewport, /approvals/pending,
# /org/templates etc. live on the tenant platform itself.
# Both legs share one origin (the tenant subdomain) so
# PLATFORM_URL="" forces canvas to fetch paths as relative,
# which land same-origin.
#
# Self-hosted / private-label deployments override this at
# build time with a specific backend (e.g. local dev:
# NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080).
build-args: |
NEXT_PUBLIC_PLATFORM_URL=
GIT_SHA=${{ github.sha }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify

View File

@ -9,7 +9,7 @@ name: redeploy-tenants-on-main
#
# This workflow closes the gap by calling the control-plane admin
# endpoint that performs a canary-first, batched, health-gated rolling
# redeploy across every live tenant. Implemented in molecule-ai/
# redeploy across every live tenant. Implemented in Molecule-AI/
# molecule-controlplane as POST /cp/admin/tenants/redeploy-fleet
# (feat/tenant-auto-redeploy, landing alongside this workflow).
#
@ -146,7 +146,7 @@ jobs:
- name: Call CP redeploy-fleet
# CP_ADMIN_API_TOKEN must be set as a repo/org secret on
# molecule-ai/molecule-core, matching the staging/prod CP's
# Molecule-AI/molecule-core, matching the staging/prod CP's
# CP_ADMIN_API_TOKEN env. Stored in Railway, mirrored to this
# repo's secrets for CI.
env:

View File

@ -36,7 +36,7 @@ on:
workflow_run:
workflows: ['publish-workspace-server-image']
types: [completed]
branches: [main]
branches: [staging]
workflow_dispatch:
inputs:
target_tag:
@ -97,7 +97,7 @@ jobs:
- name: Call staging-CP redeploy-fleet
# CP_STAGING_ADMIN_API_TOKEN must be set as a repo/org secret
# on molecule-ai/molecule-core, matching staging-CP's
# on Molecule-AI/molecule-core, matching staging-CP's
# CP_ADMIN_API_TOKEN env var (visible in Railway controlplane
# / staging environment). Stored separately from the prod
# CP_ADMIN_API_TOKEN so a leak of one doesn't auth the other.

View File

@ -0,0 +1,105 @@
name: Retarget main PRs to staging
# Mechanical enforcement of SHARED_RULES rule 8 ("Staging-first workflow, no
# exceptions"). When a bot opens a PR against main, retarget it to staging
# automatically and leave an explanatory comment. Human CEO-authored PRs (the
# staging→main promotion PR, etc.) are left alone — they're the authorised
# exception to the rule.
#
# Why an Action instead of only a prompt rule: prompt rules depend on every
# role's system-prompt.md staying in sync. Today 5 of 8 engineer roles
# (core-be, core-fe, app-fe, app-qa, devops-engineer) don't have the
# staging-first section — the bot keeps opening PRs to main. An Action
# enforces the invariant regardless of prompt drift.
on:
pull_request_target:
types: [opened, reopened]
branches: [main]
permissions:
pull-requests: write
jobs:
retarget:
name: Retarget to staging
runs-on: ubuntu-latest
# Only fire for bot-authored PRs. Human CEO PRs (staging→main promotion)
# are intentional and pass through.
#
# Head-ref guard: never retarget a PR whose head IS `staging` — those
# are the auto-promote staging→main PRs (opened by molecule-ai[bot]
# since #2586 switched to an App token, which now passes the bot
# filter below). Retargeting head=staging onto base=staging fails
# with HTTP 422 "no new commits between base 'staging' and head
# 'staging'", which used to surface as a noisy red workflow run on
# every auto-promote (caught 2026-05-03 on PR #2588).
if: >-
github.event.pull_request.head.ref != 'staging'
&& (
github.event.pull_request.user.type == 'Bot'
|| endsWith(github.event.pull_request.user.login, '[bot]')
|| github.event.pull_request.user.login == 'app/molecule-ai'
|| github.event.pull_request.user.login == 'molecule-ai[bot]'
)
steps:
- name: Retarget PR base to staging
id: retarget
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
# Issue #1884: when the bot opens a PR against main and there's
# already another PR on the same head branch targeting staging,
# GitHub's PATCH /pulls returns 422 with
# "A pull request already exists for base branch 'staging' …".
# The retarget can't proceed — but the right response is to
# close the now-redundant main-PR, not to fail the workflow
# noisily. Detect that specific 422 and close instead.
run: |
set +e
echo "Retargeting PR #${PR_NUMBER} (author: ${PR_AUTHOR}) from main → staging"
PATCH_OUTPUT=$(gh api -X PATCH \
"repos/${{ github.repository }}/pulls/${PR_NUMBER}" \
-f base=staging \
--jq '.base.ref' 2>&1)
PATCH_EXIT=$?
set -e
if [ "$PATCH_EXIT" -eq 0 ]; then
echo "::notice::Retargeted PR #${PR_NUMBER} → staging"
echo "outcome=retargeted" >> "$GITHUB_OUTPUT"
exit 0
fi
# Specifically match the 422 duplicate-base/head error so
# any OTHER PATCH failure (auth, deleted PR, etc.) still
# surfaces as a real workflow failure.
if echo "$PATCH_OUTPUT" | grep -q "pull request already exists for base branch 'staging'"; then
echo "::notice::PR #${PR_NUMBER}: duplicate target-staging PR exists on same head — closing this main-PR as redundant."
gh pr close "$PR_NUMBER" \
--repo "${{ github.repository }}" \
--comment "[retarget-bot] Closing — another PR on the same head branch already targets \`staging\`. This PR is redundant. See issue #1884 for the rationale."
echo "outcome=closed-as-duplicate" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "::error::Retarget PATCH failed and was NOT a duplicate-base error:"
echo "$PATCH_OUTPUT" >&2
exit 1
- name: Post explainer comment
if: steps.retarget.outputs.outcome == 'retargeted'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
gh pr comment "$PR_NUMBER" \
--repo "${{ github.repository }}" \
--body "$(cat <<'BODY'
[retarget-bot] This PR was opened against `main` and has been retargeted to `staging` automatically.
**Why:** per [SHARED_RULES rule 8](https://github.com/Molecule-AI/molecule-ai-org-template-molecule-dev/blob/main/SHARED_RULES.md), all feature work targets `staging` first; the CEO promotes `staging → main` separately.
**What changed:** just the base branch — no code change. CI will re-run against `staging`. If you get merge conflicts, rebase on `staging`.
**If this PR is the CEO's staging→main promotion:** the Action skipped you (only bot-authored PRs are retargeted). If you see this comment on your CEO PR, that's a bug — please tag @HongmingWang-Rabbit.
BODY
)"

View File

@ -12,7 +12,7 @@ name: Secret scan
#
# jobs:
# secret-scan:
# uses: molecule-ai/molecule-core/.github/workflows/secret-scan.yml@staging
# uses: Molecule-AI/molecule-core/.github/workflows/secret-scan.yml@staging
#
# Pin to @staging not @main — staging is the active default branch,
# main lags via the staging-promotion workflow. Updates ride along

View File

@ -108,14 +108,6 @@ jobs:
python3 > stale_slugs.txt <<'PY'
import json, os
from datetime import datetime, timezone, timedelta
# SSOT for this list lives in the controlplane Go code:
# molecule-controlplane/internal/slugs/ephemeral.go
# (var EphemeralPrefixes). The redeploy-fleet auto-rollout
# also reads from there to SKIP these slugs — without that
# filter, fleet redeploy SSM-failed in-flight E2E tenants
# whose containers were still booting, breaking the test
# that just spun them up (molecule-controlplane#493).
# Update both files together.
EPHEMERAL_PREFIXES = ("e2e-", "rt-e2e-")
with open("orgs.json") as f:
data = json.load(f)
@ -193,47 +185,7 @@ jobs:
# sweeper is best-effort. Next hourly tick re-attempts. We
# only fail loud at the safety-cap gate above.
- name: Sweep orphan tunnels
# Stale-org cleanup deletes the org (which cascades to tunnel
# delete inside the CP). But when that cascade fails partway —
# CP transient 5xx after the org row is deleted but before the
# CF tunnel delete completes — the tunnel persists with no
# matching org row. The reconciler in internal/sweep flags this
# as `cf_tunnel kind=orphan`, but nothing automatically reaps it.
#
# `/cp/admin/orphan-tunnels/cleanup` is the operator-triggered
# reaper. Calling it here at the end of every sweep tick
# converges the staging CF account to clean even when CP
# cascades half-fail.
#
# PR #492 made the underlying DeleteTunnel actually check
# status — pre-fix it silent-succeeded on CF code 1022
# ("active connections"), so this step would have been a no-op
# against stuck connectors. Post-fix the cleanup invokes
# CleanupTunnelConnections + retry, which actually clears the
# 1022 case. (#2987)
#
# Best-effort. Failure here doesn't fail the workflow — next
# tick re-attempts. Errors flow to step output for ops review.
if: env.DRY_RUN != 'true'
run: |
set +e
curl -sS -o /tmp/cleanup_resp -w "%{http_code}" \
--max-time 60 \
-X POST "$MOLECULE_CP_URL/cp/admin/orphan-tunnels/cleanup" \
-H "Authorization: Bearer $ADMIN_TOKEN" >/tmp/cleanup_code
set -e
http_code=$(cat /tmp/cleanup_code 2>/dev/null || echo "000")
body=$(cat /tmp/cleanup_resp 2>/dev/null | head -c 500)
if [ "$http_code" = "200" ]; then
count=$(echo "$body" | python3 -c "import sys,json; d=json.loads(sys.stdin.read() or '{}'); print(d.get('deleted_count', 0))" 2>/dev/null || echo "0")
failed_n=$(echo "$body" | python3 -c "import sys,json; d=json.loads(sys.stdin.read() or '{}'); print(len(d.get('failed') or {}))" 2>/dev/null || echo "0")
echo "Orphan-tunnel sweep: deleted=$count failed=$failed_n"
else
echo "::warning::orphan-tunnels cleanup returned HTTP $http_code — body: $body"
fi
- name: Dry-run summary
if: env.DRY_RUN == 'true'
run: |
echo "DRY RUN — would have deleted ${{ steps.identify.outputs.count }} org(s) AND triggered orphan-tunnels cleanup. Re-run with dry_run=false to actually delete."
echo "DRY RUN — would have deleted ${{ steps.identify.outputs.count }} org(s). Re-run with dry_run=false to actually delete."

7
.gitignore vendored
View File

@ -131,13 +131,6 @@ backups/
# Cloned by publish-workspace-server-image.yml so the Dockerfile's
# replace-directive path resolves. Lives in its own repo.
/molecule-ai-plugin-github-app-auth/
# Tenant-image build context — populated by the workflow's
# "Pre-clone manifest deps" step. Mirrors the public manifest, holds the
# same content as the three /<>/ dirs above but namespaced under one
# parent so the Docker build context is a single COPY-friendly tree.
# Each entry is a transient working-dir, never source-of-truth, never
# committed.
/.tenant-bundle-deps/
# Internal-flavored content lives in Molecule-AI/internal — NEVER in this
# public monorepo. Migrated 2026-04-23 (CEO directive). The CI workflow

View File

@ -22,7 +22,7 @@ development workflow, conventions, and how to get your changes merged.
```bash
# Clone the repo
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
git clone https://github.com/Molecule-AI/molecule-core.git
cd molecule-core
# Install git hooks
@ -57,7 +57,7 @@ See `CLAUDE.md` for a full list of environment variables and their purposes.
This repo is scoped to **code** (canvas, workspace, workspace-server, related
infra). Public content (blog posts, marketing copy, OG images, SEO briefs,
DevRel demos) lives in [`Molecule-AI/docs`](https://git.moleculesai.app/molecule-ai/docs).
DevRel demos) lives in [`Molecule-AI/docs`](https://github.com/Molecule-AI/docs).
The `Block forbidden paths` CI gate fails any PR that writes to `marketing/`
or other removed paths — open against `Molecule-AI/docs` instead.
@ -110,7 +110,7 @@ causing a render loop when any node position changed.
1. **Repo-wide:** "Automatically delete head branches" is on. Once a PR merges, the branch is deleted server-side. Any subsequent `git push` to that branch fails with `remote rejected — no such branch`.
2. **CI:** the `pr-guards` workflow (calling [molecule-ci `disable-auto-merge-on-push`](https://git.moleculesai.app/molecule-ai/molecule-ci/src/branch/main/.github/workflows/disable-auto-merge-on-push.yml)) fires on every push to an open PR. If auto-merge was already enabled, it's disabled and a comment is posted. You must explicitly re-enable after verifying the new commit.
2. **CI:** the `pr-guards` workflow (calling [molecule-ci `disable-auto-merge-on-push`](https://github.com/Molecule-AI/molecule-ci/blob/main/.github/workflows/disable-auto-merge-on-push.yml)) fires on every push to an open PR. If auto-merge was already enabled, it's disabled and a comment is posted. You must explicitly re-enable after verifying the new commit.
**Workflow rules that follow from the guards:**
- Push **all** commits before running `gh pr merge --auto`.
@ -180,9 +180,9 @@ and run CI manually.
Code in this repo lands in molecule-core. Some related runtime artifacts
live in their own repos:
- [`Molecule-AI/molecule-ai-workspace-runtime`](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-runtime) — Python adapter SDK (`molecule_runtime`) that runs inside containerized Molecule workspaces. Bridges Claude Code SDK / hermes / langgraph / etc. → A2A queue.
- [`Molecule-AI/molecule-sdk-python`](https://git.moleculesai.app/molecule-ai/molecule-sdk-python) — `A2AServer` + `RemoteAgentClient` for external agents that register over the public `/registry/register` flow.
- [`Molecule-AI/molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel) — Claude Code channel plugin. Bridges A2A traffic into a running Claude Code session via MCP `notifications/claude/channel`. Polling-based (no tunnel required); install with `claude --channels plugin:molecule@Molecule-AI/molecule-mcp-claude-channel`.
- [`Molecule-AI/molecule-ai-workspace-runtime`](https://github.com/Molecule-AI/molecule-ai-workspace-runtime) — Python adapter SDK (`molecule_runtime`) that runs inside containerized Molecule workspaces. Bridges Claude Code SDK / hermes / langgraph / etc. → A2A queue.
- [`Molecule-AI/molecule-sdk-python`](https://github.com/Molecule-AI/molecule-sdk-python) — `A2AServer` + `RemoteAgentClient` for external agents that register over the public `/registry/register` flow.
- [`Molecule-AI/molecule-mcp-claude-channel`](https://github.com/Molecule-AI/molecule-mcp-claude-channel) — Claude Code channel plugin. Bridges A2A traffic into a running Claude Code session via MCP `notifications/claude/channel`. Polling-based (no tunnel required); install with `claude --channels plugin:molecule@Molecule-AI/molecule-mcp-claude-channel`.
When extending the **A2A surface** in molecule-core (`workspace-server/internal/handlers/a2a_proxy.go` etc.), consider whether the change has a downstream impact on the runtime SDK or the channel plugin — they're versioned independently but share the wire shape.

View File

@ -1,28 +0,0 @@
# Top-level Makefile — convenience wrappers around docker compose.
#
# Most molecule-core dev work happens via these shortcuts. CI doesn't
# use this Makefile; CI calls docker compose / go test directly so the
# Makefile can evolve without breaking the build.
.PHONY: help dev up down logs build test
help: ## Show this help.
@grep -E '^[a-zA-Z_-]+:.*?## ' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-12s\033[0m %s\n", $$1, $$2}'
dev: ## Start the full stack with air hot-reload for the platform service.
docker compose -f docker-compose.yml -f docker-compose.dev.yml up
up: ## Start the full stack in production-shape mode (no air, normal Dockerfile).
docker compose up
down: ## Stop the stack and remove containers (volumes preserved).
docker compose down
logs: ## Tail logs from all services (Ctrl-C to detach).
docker compose logs -f
build: ## Force a fresh build of the platform image (no cache).
docker compose build --no-cache platform
test: ## Run Go unit tests in workspace-server/.
cd workspace-server && go test -race ./...

View File

@ -1,7 +1,7 @@
<div align="center">
<p>
<img src="./docs/assets/branding/molecule-icon.svg" alt="Molecule AI" width="160" />
<img src="./docs/assets/branding/molecule-icon.png" alt="Molecule AI Icon Logo" width="160" />
</p>
<p>
@ -39,8 +39,8 @@
<a href="./docs/agent-runtime/workspace-runtime.md"><strong>Workspace Runtime</strong></a>
</p>
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/new/template?template=https://git.moleculesai.app/molecule-ai/molecule-core)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://git.moleculesai.app/molecule-ai/molecule-core)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/new/template?template=https://github.com/Molecule-AI/molecule-monorepo)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/Molecule-AI/molecule-monorepo)
</div>
@ -53,8 +53,8 @@ Molecule AI is the most powerful way to govern an AI agent organization in produ
It combines the parts that are usually scattered across demos, internal glue code, and framework-specific tooling into one product:
- one org-native control plane for teams, roles, hierarchy, and lifecycle
- one runtime layer that lets **eight** agent runtimes — LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, **Hermes**, **Gemini CLI**, and OpenClaw run side by side behind one workspace contract
- one memory model that keeps recall, sharing, and skill evolution aligned with organizational boundaries (Memory v2 backed by pgvector for semantic recall)
- one runtime layer that lets LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, and OpenClaw run side by side
- one memory model that keeps recall, sharing, and skill evolution aligned with organizational boundaries
- one operational surface for observing, pausing, restarting, inspecting, and improving live workspaces
Most teams can build a workflow, a strong single agent, a coding agent, or a custom multi-agent graph.
@ -75,7 +75,7 @@ You do not wire collaboration paths by hand. Hierarchy defines the default commu
### 3. Runtime choice stops being a dead-end decision
LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, Hermes, Gemini CLI, and OpenClaw can all plug into the same workspace abstraction. Teams can standardize governance without forcing every group onto one runtime.
LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, and OpenClaw can all plug into the same workspace abstraction. Teams can standardize governance without forcing every group onto one runtime.
### 4. Memory is treated like infrastructure
@ -117,8 +117,6 @@ Molecule AI is not trying to replace the frameworks below. It is the system that
| **Claude Code** | Shipping on `main` | Real coding workflows, CLI-native continuity | Secure workspace abstraction, A2A delegation, org boundaries, shared control plane |
| **CrewAI** | Shipping on `main` | Role-based crews | Persistent workspace identity, policy consistency, shared canvas and registry |
| **AutoGen** | Shipping on `main` | Assistant/tool orchestration | Standardized deployment, hierarchy-aware collaboration, shared ops plane |
| **Hermes 4** | Shipping on `main` | Hybrid reasoning, native tools, json_schema (NousResearch/hermes-agent) | Option B upstream hook, A2A bridge to OpenAI-compat API, multi-provider provider derivation |
| **Gemini CLI** | Shipping on `main` | Google Gemini CLI continuity | Workspace lifecycle, A2A, hierarchy-aware collaboration, shared ops plane |
| **OpenClaw** | Shipping on `main` | CLI-native runtime with its own session model | Workspace lifecycle, templates, activity logs, topology-aware collaboration |
| **NemoClaw** | WIP on `feat/nemoclaw-t4-docker` | NVIDIA-oriented runtime path | Planned to join the same abstraction once merged; not yet part of `main` |
@ -184,10 +182,9 @@ The result is not just “an agent that learns.” It is **an organization that
## What Ships In `main`
### Canvas (v4)
### Canvas
- Next.js 15 + React Flow + Zustand
- **warm-paper theme system** — light / dark / follow-system, SSR cookie + nonce'd boot script + ThemeProvider; terminal + code surfaces stay dark unconditionally
- drag-to-nest team building
- empty-state deployment + onboarding wizard
- template palette
@ -196,9 +193,8 @@ The result is not just “an agent that learns.” It is **an organization that
### Platform
- Go 1.25 / Gin control plane (80+ HTTP endpoints + Gorilla WebSocket fanout)
- workspace CRUD and provisioning (pluggable Provisioner — Docker locally, EC2 + SSM in production)
- **A2A response path is a typed discriminated union (RFC #2967)** — frozen dataclasses + total parser; 100% unit + adversarial fuzz coverage
- Go/Gin control plane
- workspace CRUD and provisioning
- registry and heartbeats
- browser-safe A2A proxy
- team expansion/collapse
@ -208,10 +204,10 @@ The result is not just “an agent that learns.” It is **an organization that
### Runtime
- unified `workspace/` image; thin AMI in production (us-east-2)
- adapter-driven execution across **8 runtimes** (Claude Code, Hermes, Gemini CLI, LangGraph, DeepAgents, CrewAI, AutoGen, OpenClaw)
- unified `workspace/` image
- adapter-driven execution
- Agent Card registration
- awareness-backed memory integration; **Memory v2 backed by pgvector** for semantic recall
- awareness-backed memory integration
- plugin-mounted shared rules/skills
- hot-reloadable local skills
- coordinator-only delegation path
@ -225,21 +221,6 @@ The result is not just “an agent that learns.” It is **an organization that
- runtime tiers
- direct workspace inspection through terminal and files
### SaaS (via [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane))
- multi-tenant on AWS EC2 + Neon (per-tenant Postgres branch) + Cloudflare Tunnels (per-tenant, no public ports)
- WorkOS AuthKit + Stripe Checkout + Customer Portal
- AWS KMS envelope encryption (DB / Redis connection strings); AWS Secrets Manager for tenant bootstrap
- `tenant_resources` audit table + 30-min boot-event-aware reconciler — every CF / AWS lifecycle event recorded, claim vs live state diffed
### Bring your own Claude Code session (via [`molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel))
- Claude Code plugin that bridges Molecule A2A traffic into a local Claude Code session via MCP
- subscribe to one or more workspaces; peer messages surface as conversation turns; replies route back through Molecule's A2A
- no tunnel, no public endpoint — the plugin self-registers each watched workspace as `delivery_mode=poll` and long-polls `/activity?since_id=…`
- multi-tenant friendly: one plugin install can watch workspaces across multiple Molecule tenants (`MOLECULE_PLATFORM_URLS` per-workspace)
- install via the standard marketplace flow: `/plugin marketplace add Molecule-AI/molecule-mcp-claude-channel``/plugin install molecule-channel@molecule-mcp-claude-channel`
## Built For Teams That Need More Than A Demo
Molecule AI is especially strong when you need to run:
@ -252,30 +233,24 @@ Molecule AI is especially strong when you need to run:
## Architecture
```text
Canvas (Next.js 15, warm-paper :3000) <--HTTP / WS--> Platform (Go 1.25 :8080) <---> Postgres + Redis
| |
| +--> Provisioner: Docker (local) / EC2 + SSM (prod)
| +--> bundles · templates · secrets · KMS
Canvas (Next.js :3000) <--HTTP / WS--> Platform (Go :8080) <---> Postgres + Redis
| |
| +--> Docker provisioner / bundles / templates / secrets
|
+------------------------- shows ------------------------> workspaces, teams, tasks, traces, events
+-------------------- shows --------------------> workspaces, teams, tasks, traces, events
Workspace Runtime (Python ≥3.11, image with adapters)
- 8 adapters: LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / Hermes / Gemini CLI / OpenClaw
- Agent Card + A2A server (typed-SSOT response path, RFC #2967)
- heartbeat + activity + awareness-backed memory (Memory v2 — pgvector semantic recall)
Workspace Runtime (Python image with adapters)
- LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / OpenClaw
- Agent Card + A2A server
- heartbeat + activity + awareness-backed memory
- skills + plugins + hot reload
SaaS Control Plane (molecule-controlplane, private)
- per-tenant EC2 + Neon (Postgres branch) + Cloudflare Tunnel
- WorkOS · Stripe · KMS · AWS Secrets Manager
- tenant_resources audit + 30-min reconciler
```
## Quick Start
```bash
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
cd molecule-core
git clone https://github.com/Molecule-AI/molecule-monorepo.git
cd molecule-monorepo
cp .env.example .env
# Defaults boot the stack locally out of the box. See .env.example for
@ -284,7 +259,7 @@ cp .env.example .env
./infra/scripts/setup.sh
# Boots Postgres (:5432), Redis (:6379), Langfuse (:3001),
# and Temporal (:7233 gRPC, :8233 UI) on the shared
# `molecule-core-net` Docker network. Temporal runs with
# `molecule-monorepo-net` Docker network. Temporal runs with
# no auth on localhost — dev-only; production must gate it.
#
# Also populates the template/plugin registry by cloning every repo
@ -328,11 +303,7 @@ Then open `http://localhost:3000`:
## Current Scope
The current `main` branch ships the core platform, Canvas v4 (warm-paper themed), Memory v2 (pgvector semantic recall), the typed-SSOT A2A response path (RFC #2967), **eight production adapters** (Claude Code, Hermes, Gemini CLI, LangGraph, DeepAgents, CrewAI, AutoGen, OpenClaw), skill lifecycle, and operational surfaces.
The companion private repo [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) provides the SaaS surface — multi-tenant orchestration on EC2 + Neon + Cloudflare Tunnels, KMS envelope encryption, WorkOS auth, Stripe billing, and a `tenant_resources` audit table with a 30-min reconciler.
Adjacent runtime work such as **NemoClaw** remains branch-level until merged, and this README keeps that distinction explicit on purpose.
The current `main` branch already includes the core platform, canvas, memory model, six production adapters, skill lifecycle, and operational surfaces. Adjacent runtime work such as **NemoClaw** remains branch-level until merged, and this README keeps that distinction explicit on purpose.
## License

View File

@ -1,7 +1,7 @@
<div align="center">
<p>
<img src="./docs/assets/branding/molecule-icon.svg" alt="Molecule AI" width="160" />
<img src="./docs/assets/branding/molecule-icon.png" alt="Molecule AI 图案 Logo" width="160" />
</p>
<p>
@ -38,8 +38,8 @@
<a href="./docs/agent-runtime/workspace-runtime.md"><strong>Workspace Runtime</strong></a>
</p>
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/new/template?template=https://git.moleculesai.app/molecule-ai/molecule-core)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://git.moleculesai.app/molecule-ai/molecule-core)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/new/template?template=https://github.com/Molecule-AI/molecule-core)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/Molecule-AI/molecule-core)
</div>
@ -52,8 +52,8 @@ Molecule AI 是目前最强的 AI Agent 组织治理方案之一,用来把 age
它把过去分散在 demo、内部胶水代码和各类 framework 私有工具里的关键能力,收敛成一个产品:
- 一套组织原生 control plane管理团队、角色、层级和生命周期
- 一套 runtime abstraction**8 个** agent runtime —— LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、**Hermes**、**Gemini CLI**、OpenClaw —— 共用一套 workspace 契约
- 一套与组织边界对齐的 memory 模型,把 recall、sharing 和 skill evolution 放进同一体系Memory v2 由 pgvector 支撑语义召回)
- 一套 runtime abstraction让 LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、OpenClaw 并存运行
- 一套与组织边界对齐的 memory 模型,把 recall、sharing 和 skill evolution 放进同一体系
- 一套面向线上 workspace 的运维面,统一完成观测、暂停、重启、检查和持续改进
今天很多团队能做好 workflow、单 agent、coding agent或者自定义 multi-agent graph 中的一种。
@ -74,7 +74,7 @@ Molecule AI 填的就是这个空白。
### 3. Runtime 选择不再是死路
LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、Hermes、Gemini CLI、OpenClaw 都可以挂到同一个 workspace abstraction 下。团队可以统一治理方式,而不必统一到底层 runtime。
LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、OpenClaw 都可以挂到同一个 workspace abstraction 下。团队可以统一治理方式,而不必统一到底层 runtime。
### 4. Memory 被当成基础设施来做
@ -116,8 +116,6 @@ Molecule AI 并不是要替代下面这些 framework而是把它们纳入更
| **Claude Code** | `main` 已支持 | 真实编码工作流、CLI-native continuity | 安全 workspace 抽象、A2A delegation、组织边界、共享 control plane |
| **CrewAI** | `main` 已支持 | 角色型 crew 模式清晰 | 持久 workspace 身份、统一策略、共享 Canvas 和 registry |
| **AutoGen** | `main` 已支持 | assistant/tool orchestration | 统一部署、层级协作、共享运维平面 |
| **Hermes 4** | `main` 已支持 | 混合推理、原生工具调用、json_schema 输出NousResearch/hermes-agent | Option B 上游 hook、A2A 桥接 OpenAI 兼容 API、多 provider 自动派生 |
| **Gemini CLI** | `main` 已支持 | Google Gemini CLI 持续会话 | workspace 生命周期、A2A、层级感知协作、共享运维平面 |
| **OpenClaw** | `main` 已支持 | CLI-native runtime自有 session 模型 | workspace 生命周期、templates、activity logs、拓扑感知协作 |
| **NemoClaw** | `feat/nemoclaw-t4-docker` 分支 WIP | NVIDIA 方向 runtime 路线 | 计划并入同一抽象层,但当前还不是 `main` 已合并能力 |
@ -183,10 +181,9 @@ Molecule AI 并不是要替代下面这些 framework而是把它们纳入更
## `main` 分支已经具备什么
### Canvasv4
### Canvas
- Next.js 15 + React Flow + Zustand
- **warm-paper 主题系统** —— light / dark / 跟随系统SSR cookie + nonce'd boot 脚本 + ThemeProvider终端与代码面板始终保持深色
- drag-to-nest 团队构建
- empty state + onboarding wizard
- template palette
@ -195,9 +192,8 @@ Molecule AI 并不是要替代下面这些 framework而是把它们纳入更
### Platform
- Go 1.25 / Gin control plane80+ HTTP 端点 + Gorilla WebSocket fanout
- workspace CRUD 和 provisioning可插拔 Provisioner —— 本地 Docker、生产 EC2 + SSM
- **A2A 响应路径已收敛为类型化的判别联合RFC #2967** —— 冻结 dataclass + 全量 parser100% 单元测试 + 对抗性 fuzz 覆盖
- Go/Gin control plane
- workspace CRUD 和 provisioning
- registry 与 heartbeat
- 浏览器安全的 A2A proxy
- team expansion/collapse
@ -207,10 +203,10 @@ Molecule AI 并不是要替代下面这些 framework而是把它们纳入更
### Runtime
- 统一 `workspace/` 镜像;生产环境采用 thin AMIus-east-2
- adapter 驱动执行,覆盖 **8 个 runtime**Claude Code、Hermes、Gemini CLI、LangGraph、DeepAgents、CrewAI、AutoGen、OpenClaw
- 统一 `workspace/` 镜像
- adapter 驱动执行
- Agent Card 注册
- awareness-backed memory**Memory v2 由 pgvector 支撑**语义召回
- awareness-backed memory
- plugin 挂载共享 rules/skills
- 本地 skills 热加载
- coordinator-only delegation 路径
@ -224,21 +220,6 @@ Molecule AI 并不是要替代下面这些 framework而是把它们纳入更
- runtime tiers
- 终端与文件层面的 workspace 直接排障
### SaaS由 [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) 提供)
- 多租户运行在 AWS EC2 + Neon每租户一个 Postgres branch+ Cloudflare Tunnels每租户一条隧道对外不开任何端口
- WorkOS AuthKit + Stripe Checkout + Customer Portal
- AWS KMS 信封加密DB / Redis 连接串AWS Secrets Manager 负责租户 bootstrap
- `tenant_resources` 审计表 + 30 分钟 boot-event-aware reconciler —— 每个 CF / AWS lifecycle 事件都有记录,每 30 分钟比对 claim 与实际状态
### 在 Claude Code 里直接接入(由 [`molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel) 提供)
- 把 Molecule A2A 流量桥接到本地 Claude Code 会话的 MCP 插件
- 订阅一个或多个 workspacepeer 的消息会以 user-turn 出现,回复会经 Molecule A2A 路由出去
- 无需公网隧道、无需公开端点 —— 插件启动时自动把每个 watched workspace 注册成 `delivery_mode=poll`,长轮询 `/activity?since_id=…`
- 多租户友好:单次安装即可同时 watch 跨多个 Molecule 租户的 workspace`MOLECULE_PLATFORM_URLS` 按 workspace 配置)
- 通过标准 marketplace 流程安装:`/plugin marketplace add Molecule-AI/molecule-mcp-claude-channel` → `/plugin install molecule-channel@molecule-mcp-claude-channel`
## 适合什么团队
Molecule AI 特别适合下面这些场景:
@ -251,29 +232,23 @@ Molecule AI 特别适合下面这些场景:
## 架构总览
```text
Canvas (Next.js 15, warm-paper :3000) <--HTTP / WS--> Platform (Go 1.25 :8080) <---> Postgres + Redis
| |
| +--> Provisioner: Docker (本地) / EC2 + SSM (生产)
| +--> bundles · templates · secrets · KMS
Canvas (Next.js :3000) <--HTTP / WS--> Platform (Go :8080) <---> Postgres + Redis
| |
| +--> Docker provisioner / bundles / templates / secrets
|
+------------------------- 展示 ------------------------> workspaces, teams, tasks, traces, events
+-------------------- 展示 --------------------> workspaces, teams, tasks, traces, events
Workspace Runtime (Python ≥3.11,含 adapter 集合的镜像)
- 8 个 adapter: LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / Hermes / Gemini CLI / OpenClaw
- Agent Card + A2A servertyped-SSOT 响应路径RFC #2967
- heartbeat + activity + awareness-backed memoryMemory v2 —— pgvector 语义召回)
Workspace Runtime (Python image with adapters)
- LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / OpenClaw
- Agent Card + A2A server
- heartbeat + activity + awareness-backed memory
- skills + plugins + hot reload
SaaS Control Plane (molecule-controlplane私有)
- 每租户 EC2 + Neon (Postgres branch) + Cloudflare Tunnel
- WorkOS · Stripe · KMS · AWS Secrets Manager
- tenant_resources 审计 + 30 分钟 reconciler
```
## 快速开始
```bash
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
git clone https://github.com/Molecule-AI/molecule-core.git
cd molecule-core
cp .env.example .env
@ -283,7 +258,7 @@ cp .env.example .env
./infra/scripts/setup.sh
# 启动 Postgres (:5432)、Redis (:6379)、Langfuse (:3001)
# 以及 Temporal (:7233 gRPC, :8233 UI),全部挂在共享的
# `molecule-core-net` Docker 网络上。Temporal 默认无鉴权,
# `molecule-monorepo-net` Docker 网络上。Temporal 默认无鉴权,
# 仅用于本地开发;生产环境必须加 mTLS / API Key。
#
# 同时会根据 manifest.json 拉取所有模板/插件仓库到
@ -321,11 +296,7 @@ npm run dev
## 当前范围说明
当前 `main` 已经包含核心平台、Canvas v4warm-paper 主题、Memory v2pgvector 语义召回、typed-SSOT A2A 响应路径RFC #2967)、**8 个正式 adapter**Claude Code、Hermes、Gemini CLI、LangGraph、DeepAgents、CrewAI、AutoGen、OpenClaw、skill lifecycle以及主要运维面。
配套的私有仓库 [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) 提供 SaaS 层 —— 多租户编排EC2 + Neon + Cloudflare Tunnels、KMS 信封加密、WorkOS 鉴权、Stripe 计费,以及 `tenant_resources` 审计表加 30 分钟 reconciler。
**NemoClaw** 这样的相邻 runtime 路线仍然属于分支级工作,只有合并后才会进入正式支持列表,这里会明确区分。
当前 `main` 已经包含核心平台、Canvas、memory model、6 个正式 adapter、skill lifecycle 和主要运维面。像 **NemoClaw** 这样的相邻 runtime 路线仍然属于分支级工作,只有合并后才会进入正式支持列表,这里会明确区分。
## License

View File

@ -1,10 +0,0 @@
# Excluded from `docker build` context. Without this, the COPY . . step in
# canvas/Dockerfile clobbers the freshly-installed node_modules with the
# host's (potentially broken / wrong-arch) copy — the @tailwindcss/oxide
# native binary disagreed and broke `next build`.
node_modules
.next
.git
*.log
.env*
!.env.example

View File

@ -1,11 +1,7 @@
FROM node:22-alpine AS builder
WORKDIR /app
COPY package.json package-lock.json* ./
# `npm ci` (not `install`) for lockfile-exact reproducibility.
# `--include=optional` ensures the platform-specific @tailwindcss/oxide
# native binary lands — without it, postcss fails with "Cannot read
# properties of undefined (reading 'All')" at build time.
RUN npm ci --include=optional
RUN npm install
COPY . .
ARG NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080
ARG NEXT_PUBLIC_WS_URL=ws://localhost:8080/ws

View File

@ -17,24 +17,6 @@ import { dirname, join } from "node:path";
// update one heuristic. Production is unaffected: `output: "standalone"`
// bakes resolved env into the build, and the marker file isn't shipped.
loadMonorepoEnv();
// Boot-time matched-pair guard for ADMIN_TOKEN / NEXT_PUBLIC_ADMIN_TOKEN.
// When ADMIN_TOKEN is set on the workspace-server (server-side bearer
// gate, wsauth_middleware.go ~L245), the canvas MUST send the matching
// NEXT_PUBLIC_ADMIN_TOKEN as `Authorization: Bearer ...` on every API
// call. If only one is set, every workspace API call 401s silently —
// the canvas hydrates with empty data and the user sees a broken page
// with no console hint about the auth-config mismatch.
//
// Pre-fix the matched-pair contract was descriptive only (a comment in
// .env): future devs/agents could re-misconfigure with one of the two
// unset and silently 401. Closes the post-PR-#174 self-review gap.
//
// Warn-only (not exit) — production canvas Docker images bake these
// vars into the build at image-build time, and a missed pair there
// would still emit the warning at runtime via the standalone server's
// startup. Killing the process on misconfiguration would turn a
// recoverable auth issue into a hard crashloop.
checkAdminTokenPair();
const nextConfig: NextConfig = {
output: "standalone",
@ -75,43 +57,6 @@ function loadMonorepoEnv() {
);
}
// Boot-time matched-pair guard. Runs after .env has been loaded so the
// check sees the post-load state. The two env vars must be set or
// unset together; one-without-the-other is the silent-401 footgun.
//
// Treats empty string ("") as unset. An explicitly-empty `KEY=` in
// .env counts as set-to-empty in `process.env`, but for auth purposes
// an empty bearer token is equivalent to no token — so both
// `ADMIN_TOKEN=` and an unset ADMIN_TOKEN are equivalent relative to
// the matched-pair invariant.
//
// Returns void; side effect is the console.error warning. Kept as a
// separate function (exported) so a future test can reset env, call
// this, and assert on captured stderr.
export function checkAdminTokenPair(): void {
const serverSet = !!process.env.ADMIN_TOKEN;
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (serverSet === clientSet) return;
// Distinct messages so the operator can tell which half is missing
// — the fix is symmetric (set the other one) but the diagnostic
// mentions which side is currently set so they don't have to grep.
if (serverSet && !clientSet) {
// eslint-disable-next-line no-console
console.error(
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
"canvas will 401 against workspace-server because the bearer header " +
"is never attached. Set both to the same value, or unset both.",
);
} else {
// eslint-disable-next-line no-console
console.error(
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
"workspace-server will reject the bearer because no AdminAuth gate " +
"is configured. Set both to the same value, or unset both.",
);
}
}
function findMonorepoRoot(start: string): string | null {
let dir = start;
for (let i = 0; i < 6; i++) {

View File

@ -3,7 +3,6 @@ import { cookies, headers } from "next/headers";
import "./globals.css";
import { AuthGate } from "@/components/AuthGate";
import { CookieConsent } from "@/components/CookieConsent";
import { PurchaseSuccessModal } from "@/components/PurchaseSuccessModal";
import { ThemeProvider } from "@/lib/theme-provider";
import {
THEME_COOKIE,
@ -87,12 +86,6 @@ export default async function RootLayout({
vercel preview URL, apex) pass through unchanged. */}
<AuthGate>{children}</AuthGate>
<CookieConsent />
{/* Demo Mock #1: post-purchase success toast. Mounted at the
layout level so it persists across page state transitions
(loading hydrated error) without being unmounted and
losing its open-state. Reads ?purchase_success=1 from the
URL on first paint, then strips the param. */}
<PurchaseSuccessModal />
</ThemeProvider>
</body>
</html>

View File

@ -41,7 +41,7 @@ export default function PricingPage() {
<p className="mt-2 text-ink-mid">
We publish the{" "}
<a
href="https://git.moleculesai.app/molecule-ai/molecule-monorepo"
href="https://github.com/Molecule-AI/molecule-monorepo"
className="text-accent underline hover:text-accent"
>
full source on GitHub

View File

@ -1,10 +1,9 @@
'use client';
import { useEffect, useMemo, useCallback, useRef } from "react";
import { useEffect, useMemo, useCallback } from "react";
import { type Edge, MarkerType } from "@xyflow/react";
import { api } from "@/lib/api";
import { useCanvasStore } from "@/store/canvas";
import { useSocketEvent } from "@/hooks/useSocketEvent";
import type { ActivityEntry } from "@/types/activity";
// ── Constants ─────────────────────────────────────────────────────────────────
@ -12,6 +11,9 @@ import type { ActivityEntry } from "@/types/activity";
/** 60-minute look-back window for delegation activity */
export const A2A_WINDOW_MS = 60 * 60 * 1000;
/** Polling interval — refresh edges every 60 seconds */
export const A2A_POLL_MS = 60 * 1_000;
/** Threshold for "hot" edges: < 5 minutes → animated + violet stroke */
export const A2A_HOT_MS = 5 * 60 * 1_000;
@ -129,20 +131,6 @@ export function buildA2AEdges(
* `a2aEdges`. Canvas.tsx merges these with topology edges and passes the
* combined list to ReactFlow.
*
* Update shape (issue #61 Stage 2, replaces the 60s polling loop):
* - On mount (when showA2AEdges): one HTTP fan-out per visible workspace
* (delegation rows, 60-min window). Bootstraps the local row buffer.
* - Steady state: subscribes to ACTIVITY_LOGGED via useSocketEvent.
* Each delegation event from a visible workspace is appended to the
* buffer; edges are re-derived via the existing buildA2AEdges helper.
* - showA2AEdges toggle off: clears edges + buffer.
* - Visible-ID-set change: re-bootstraps so a freshly-shown workspace
* backfills its 60-min history (existing visibleIdsKey selector
* behaviour preserved that's the 2026-05-04 render-loop fix).
*
* No interval poll. The singleton ReconnectingSocket already owns
* reconnect / backoff / health-check; useSocketEvent inherits those.
*
* Mount this inside CanvasInner (no ReactFlow hook dependency).
*/
export function A2ATopologyOverlay() {
@ -169,9 +157,7 @@ export function A2ATopologyOverlay() {
// the symptom of this re-render storm.
//
// The fix is purely the dependency-stability change here; the fetch
// logic is unchanged. Post-#61 the polling-driven fetch is gone, but
// the visibleIdsKey gate is still required so a peer-discovery write
// doesn't trigger a wasteful re-bootstrap.
// logic is unchanged.
const visibleIdsKey = useCanvasStore((s) =>
s.nodes
.filter((n) => !n.hidden)
@ -185,42 +171,16 @@ export function A2ATopologyOverlay() {
[visibleIdsKey]
);
// Local rolling buffer of delegation rows. Pruned by A2A_WINDOW_MS on
// each rebuild so a long-lived session doesn't accumulate unbounded
// history. The buffer's high-water mark is approximately:
// visibleIds.length × bootstrap-fetch-limit (500) + WS arrivals
// Real-world ceiling: ~3000 entries at the 60-min boundary, all of
// which buildA2AEdges aggregates into at most N² edges.
const bufferRef = useRef<ActivityEntry[]>([]);
// visibleIdsRef gives the WS handler the latest visible-ID set without
// re-subscribing on every render. The bus listener is registered
// exactly once per mount; subscriber-side filtering reads from this ref.
const visibleIdsRef = useRef(visibleIds);
visibleIdsRef.current = visibleIds;
// Re-derive overlay edges from the current buffer + push to store.
// Prunes by A2A_WINDOW_MS first so memory stays bounded across long
// sessions and the aggregation cost stays O(window-size).
const recomputeAndPush = useCallback(() => {
const cutoff = Date.now() - A2A_WINDOW_MS;
bufferRef.current = bufferRef.current.filter(
(r) => new Date(r.created_at).getTime() > cutoff
);
setA2AEdges(buildA2AEdges(bufferRef.current));
}, [setA2AEdges]);
// Bootstrap fan-out — one HTTP per visible workspace. Replaces the
// 60s polling loop entirely. Race-aware: any WS arrivals that landed
// in the buffer DURING the fetch (between the await and resume) are
// preserved by id-dedup-with-fetched-first ordering.
const bootstrap = useCallback(async () => {
// Fetch delegation activity for all visible workspaces and rebuild overlay edges.
const fetchAndUpdate = useCallback(async () => {
if (visibleIds.length === 0) {
bufferRef.current = [];
setA2AEdges([]);
return;
}
try {
const fetchedRows = (
// Fan-out — one request per visible workspace.
// Per-request failures are swallowed so one broken workspace doesn't blank the overlay.
const allRows = (
await Promise.all(
visibleIds.map((id) =>
api
@ -232,76 +192,24 @@ export function A2ATopologyOverlay() {
)
).flat();
// Merge: fetched rows first, then any in-flight WS arrivals that
// accumulated during the await. Dedup by id so rows that appear
// in both paths are not double-counted in the aggregation.
const merged = [...fetchedRows, ...bufferRef.current];
const seen = new Set<string>();
bufferRef.current = merged.filter((r) => {
if (seen.has(r.id)) return false;
seen.add(r.id);
return true;
});
recomputeAndPush();
setA2AEdges(buildA2AEdges(allRows));
} catch {
// Overlay failure is non-critical — canvas remains functional
}
}, [visibleIds, setA2AEdges, recomputeAndPush]);
}, [visibleIds, setA2AEdges]);
useEffect(() => {
if (!showA2AEdges) {
// Clear edges + buffer immediately when toggled off
bufferRef.current = [];
// Clear edges immediately when toggled off
setA2AEdges([]);
return;
}
void bootstrap();
}, [showA2AEdges, bootstrap, setA2AEdges]);
// Live-update path. Filters server-side ACTIVITY_LOGGED events down
// to delegation initiations from visible workspaces and appends each
// into the rolling buffer, re-deriving edges via buildA2AEdges.
//
// Only `method === "delegate"` rows count — the same filter
// buildA2AEdges applies — so delegate_result rows arriving over the
// wire don't double-count.
useSocketEvent((msg) => {
if (!showA2AEdges) return;
if (msg.event !== "ACTIVITY_LOGGED") return;
const p = (msg.payload || {}) as Record<string, unknown>;
if (p.activity_type !== "delegation") return;
if (p.method !== "delegate") return;
const wsId = msg.workspace_id;
if (!visibleIdsRef.current.includes(wsId)) return;
// Synthesise an ActivityEntry from the WS payload so buildA2AEdges
// (which the bootstrap path also feeds) handles it identically.
const entry: ActivityEntry = {
id:
(p.id as string) ||
`ws-push-${msg.timestamp || Date.now()}-${wsId}`,
workspace_id: wsId,
activity_type: "delegation",
source_id: (p.source_id as string | null) ?? null,
target_id: (p.target_id as string | null) ?? null,
method: "delegate",
summary: (p.summary as string | null) ?? null,
request_body: null,
response_body: null,
duration_ms: (p.duration_ms as number | null) ?? null,
status: (p.status as string) || "ok",
error_detail: null,
created_at:
(p.created_at as string) ||
msg.timestamp ||
new Date().toISOString(),
};
bufferRef.current = [...bufferRef.current, entry];
recomputeAndPush();
});
// Initial fetch, then poll every 60 s
void fetchAndUpdate();
const timer = setInterval(() => void fetchAndUpdate(), A2A_POLL_MS);
return () => clearInterval(timer);
}, [showA2AEdges, fetchAndUpdate, setA2AEdges]);
// Pure side-effect — renders nothing
return null;

View File

@ -3,7 +3,6 @@
import { useState, useEffect, useCallback, useRef } from "react";
import { useCanvasStore } from "@/store/canvas";
import { api } from "@/lib/api";
import { useSocketEvent } from "@/hooks/useSocketEvent";
import { COMM_TYPE_LABELS } from "@/lib/design-tokens";
interface Communication {
@ -19,71 +18,32 @@ interface Communication {
durationMs: number | null;
}
/** Workspace-server `ACTIVITY_LOGGED` payload shape. Pulled out so the
* WS handler below has a typed view of the same fields the HTTP
* bootstrap consumes drift between the two paths is a class of bug
* AgentCommsPanel hit historically. */
interface ActivityLoggedPayload {
id?: string;
activity_type?: string;
source_id?: string | null;
target_id?: string | null;
workspace_id?: string;
summary?: string | null;
status?: string;
duration_ms?: number | null;
created_at?: string;
}
/** Fan-out cap for the bootstrap HTTP fetch on mount / on visibility
* re-open. Kept at 3 (carried over from the 2026-05-04 fix) so a
* freshly-mounted overlay on a 15-workspace tenant only spends 3
* round-trips bootstrapping. Live updates after that arrive via the
* WS subscription below no polling, no fan-out to maintain. */
const BOOTSTRAP_FAN_OUT_CAP = 3;
/** Cap on the rendered list. Bootstrap + every WS push prepends, the
* list is sliced to this size after each update. Mirrors the prior
* polling-loop behaviour. */
const COMMS_RENDER_CAP = 20;
/**
* Overlay showing recent A2A communications between workspaces.
*
* Update shape (issue #61 Stage 1, replaces the 30s polling loop):
* - On mount (when visible): one HTTP bootstrap per online workspace,
* capped at BOOTSTRAP_FAN_OUT_CAP. Yields the initial recent-comms
* window without waiting for live events.
* - Steady state: subscribes to ACTIVITY_LOGGED via useSocketEvent.
* Each event with a matching activity_type from a visible online
* workspace gets synthesised into a Communication and prepended.
* - Visibility re-open: re-bootstraps so the user sees the freshest
* window even if WS was idle while collapsed.
*
* No interval poll. The singleton ReconnectingSocket in `store/socket.ts`
* already owns reconnect/backoff/health-check, and `useSocketEvent`
* inherits those guarantees. If WS is genuinely unhealthy, the overlay
* shows the bootstrap snapshot until the next visibility re-open or
* the next WS reconnect (which fires its own rehydrate burst).
* Renders as a floating log panel that auto-updates.
*/
export function CommunicationOverlay() {
const [comms, setComms] = useState<Communication[]>([]);
const [visible, setVisible] = useState(true);
const selectedNodeId = useCanvasStore((s) => s.selectedNodeId);
const nodes = useCanvasStore((s) => s.nodes);
// nodesRef gives the WS handler current node-name resolution without
// re-subscribing on every node-list change. The bus listener is
// registered exactly once per mount; subscriber-side filtering reads
// the latest value via this ref.
const nodesRef = useRef(nodes);
nodesRef.current = nodes;
const bootstrapComms = useCallback(async () => {
const fetchComms = useCallback(async () => {
try {
// Fan-out cap: each polled workspace = 1 round-trip. The platform
// rate limits at 600 req/min/IP; combined with heartbeats + other
// canvas polling, every workspace polled here costs ~6 req/min
// (1 every 30s × 1 per workspace). Capping at 3 keeps this
// overlay's footprint at 18 req/min worst case — well under
// budget even with 8+ workspaces visible. Caught 2026-05-04 when
// a user with 8+ workspaces (Design Director + 6 sub-agents +
// 3 standalones) saw sustained 429s in canvas console.
const onlineNodes = nodesRef.current.filter((n) => n.data.status === "online");
const allComms: Communication[] = [];
for (const node of onlineNodes.slice(0, BOOTSTRAP_FAN_OUT_CAP)) {
for (const node of onlineNodes.slice(0, 3)) {
try {
const activities = await api.get<Array<{
id: string;
@ -99,8 +59,8 @@ export function CommunicationOverlay() {
for (const a of activities) {
if (a.activity_type === "a2a_send" || a.activity_type === "a2a_receive") {
const sourceNode = nodesRef.current.find((n) => n.id === (a.source_id || a.workspace_id));
const targetNode = nodesRef.current.find((n) => n.id === (a.target_id || ""));
const sourceNode = nodes.find((n) => n.id === (a.source_id || a.workspace_id));
const targetNode = nodes.find((n) => n.id === (a.target_id || ""));
allComms.push({
id: a.id,
sourceId: a.source_id || a.workspace_id,
@ -116,12 +76,11 @@ export function CommunicationOverlay() {
}
}
} catch {
// Per-workspace failures must not blank the panel — the same
// robustness the polling version had.
// Skip workspaces that fail
}
}
// Newest-first with id-dedup, capped at COMMS_RENDER_CAP.
// Sort by timestamp, newest first, dedupe
const seen = new Set<string>();
const sorted = allComms
.sort((a, b) => b.timestamp.localeCompare(a.timestamp))
@ -130,78 +89,29 @@ export function CommunicationOverlay() {
seen.add(c.id);
return true;
})
.slice(0, COMMS_RENDER_CAP);
.slice(0, 20);
setComms(sorted);
} catch {
// Bootstrap failure is non-blocking — the WS subscription below
// will populate the panel as live events arrive.
// Silently handle API errors
}
}, []);
// Bootstrap once on mount + every time the user re-opens after a
// collapse. Closed-panel state intentionally drops live updates so
// the panel doesn't churn invisible state — the next open reloads.
useEffect(() => {
// Gate polling on visibility — when the user collapses the overlay
// the data isn't being read, so the per-workspace fan-out becomes
// pure rate-limit overhead. Pre-fix this overlay polled regardless
// of whether the panel was shown, costing ~36 req/min from a
// hidden surface.
if (!visible) return;
bootstrapComms();
}, [bootstrapComms, visible]);
// Live-update path. Filters server-side ACTIVITY_LOGGED events down
// to the comm-overlay-relevant subset and prepends each into the
// rendered list with the same dedup the bootstrap path uses.
//
// Scope guard: ignore events for workspaces not in the visible online
// set, so a user collapsing one workspace doesn't see its comms
// continue to scroll in. Same shape the bootstrap path applies.
useSocketEvent((msg) => {
if (!visible) return;
if (msg.event !== "ACTIVITY_LOGGED") return;
const p = (msg.payload || {}) as ActivityLoggedPayload;
const type = p.activity_type;
if (type !== "a2a_send" && type !== "a2a_receive" && type !== "task_update") return;
const wsId = msg.workspace_id;
const onlineSet = new Set(
nodesRef.current.filter((n) => n.data.status === "online").map((n) => n.id),
);
if (!onlineSet.has(wsId)) return;
const sourceId = p.source_id || wsId;
const targetId = p.target_id || "";
const sourceNode = nodesRef.current.find((n) => n.id === sourceId);
const targetNode = nodesRef.current.find((n) => n.id === targetId);
const incoming: Communication = {
id: p.id || `${msg.timestamp || Date.now()}:${sourceId}:${targetId}`,
sourceId,
targetId,
sourceName: sourceNode?.data.name || "Unknown",
targetName: targetNode?.data.name || "Unknown",
type: type as Communication["type"],
summary: p.summary || "",
status: p.status || "ok",
timestamp: p.created_at || msg.timestamp || new Date().toISOString(),
durationMs: p.duration_ms ?? null,
};
setComms((prev) => {
// Prepend, dedup by id, re-cap. Functional setState is necessary
// because two ACTIVITY_LOGGED events arriving in the same React
// batch would otherwise read a stale `comms` from the closure.
const seen = new Set<string>();
const merged = [incoming, ...prev]
.sort((a, b) => b.timestamp.localeCompare(a.timestamp))
.filter((c) => {
if (seen.has(c.id)) return false;
seen.add(c.id);
return true;
})
.slice(0, COMMS_RENDER_CAP);
return merged;
});
});
fetchComms();
// 30s cadence (was 10s). At 3-workspace fan-out that's 6 req/min
// worst case from this overlay. Combined with heartbeats (~30/min)
// and other canvas polling, leaves ample headroom under the 600/
// min/IP server-side rate limit even at 8+ workspace tenants.
const interval = setInterval(fetchComms, 30000);
return () => clearInterval(interval);
}, [fetchComms, visible]);
if (!visible || comms.length === 0) {
return (

View File

@ -325,6 +325,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {
{dropdownOptions.map((opt) => (
<option key={opt.value} value={opt.value}>
{opt.label}
{opt.kind ? ` (${opt.kind})` : ''}
</option>
))}
</select>

View File

@ -1,175 +0,0 @@
"use client";
/**
* PurchaseSuccessModal demo-only post-purchase confirmation.
*
* Mounted on the canvas root (`app/page.tsx`). On first paint it inspects
* `?purchase_success=1[&item=<name>]` on the current URL. If present, it
* renders a centred modal styled after `ConfirmDialog`, schedules a 5s
* auto-dismiss, and rewrites the URL via `history.replaceState` to drop
* the params so a refresh after dismiss does NOT re-show the modal.
*
* Mock for the funding demo there is no real billing surface behind
* this. The marketplace "Purchase" button on the landing page redirects
* here with the params; this modal is the only thing the user sees of
* the "transaction".
*
* Styling matches the warm-paper @theme tokens (surface-sunken / line /
* ink / good) so it tracks light + dark without per-mode overrides.
*/
import { useEffect, useRef, useState } from "react";
import { createPortal } from "react-dom";
const AUTO_DISMISS_MS = 5000;
function readPurchaseParams(): { open: boolean; item: string | null } {
if (typeof window === "undefined") return { open: false, item: null };
const sp = new URLSearchParams(window.location.search);
const flag = sp.get("purchase_success");
if (flag !== "1" && flag !== "true") return { open: false, item: null };
return { open: true, item: sp.get("item") };
}
function stripPurchaseParams() {
if (typeof window === "undefined") return;
const url = new URL(window.location.href);
url.searchParams.delete("purchase_success");
url.searchParams.delete("item");
// replaceState (not pushState) so back-button doesn't return to the
// pre-strip URL and re-trigger the modal.
window.history.replaceState({}, "", url.toString());
}
export function PurchaseSuccessModal() {
const [open, setOpen] = useState(false);
const [item, setItem] = useState<string | null>(null);
const [mounted, setMounted] = useState(false);
const dialogRef = useRef<HTMLDivElement>(null);
// Read the URL params once on mount. We don't subscribe to navigation —
// this modal is a one-shot for the demo redirect, not a persistent
// listener.
useEffect(() => {
setMounted(true);
const { open: shouldOpen, item: itemName } = readPurchaseParams();
if (shouldOpen) {
setOpen(true);
setItem(itemName);
// Clean the URL immediately so a refresh after the modal is closed
// (or even while it's still open) does NOT re-trigger it.
stripPurchaseParams();
}
}, []);
// Auto-dismiss timer + Escape handler.
useEffect(() => {
if (!open) return;
const t = window.setTimeout(() => setOpen(false), AUTO_DISMISS_MS);
const onKey = (e: KeyboardEvent) => {
if (e.key === "Escape") setOpen(false);
};
window.addEventListener("keydown", onKey);
// Focus the close button so keyboard users land on it after redirect.
const raf = requestAnimationFrame(() => {
dialogRef.current?.querySelector<HTMLButtonElement>("button")?.focus();
});
return () => {
window.clearTimeout(t);
window.removeEventListener("keydown", onKey);
cancelAnimationFrame(raf);
};
}, [open]);
if (!open || !mounted) return null;
const itemLabel = item ? decodeURIComponent(item) : "Your new agent";
return createPortal(
<div
className="fixed inset-0 z-[9999] flex items-center justify-center"
data-testid="purchase-success-modal"
>
{/* Backdrop — click closes, matches ConfirmDialog backdrop. */}
<div
className="absolute inset-0 bg-black/60 backdrop-blur-sm"
onClick={() => setOpen(false)}
aria-hidden="true"
/>
<div
ref={dialogRef}
role="dialog"
aria-modal="true"
aria-labelledby="purchase-success-title"
className="relative bg-surface-sunken border border-line rounded-xl shadow-2xl shadow-black/50 max-w-[420px] w-full mx-4 overflow-hidden"
>
<div className="px-6 pt-6 pb-4">
<div className="flex items-start gap-4">
{/* Success glyph uses --color-good so it tracks the theme.
Inline SVG over an emoji so it stays readable + on-brand
in both light and dark. */}
<div
className="flex h-10 w-10 flex-shrink-0 items-center justify-center rounded-full"
style={{
background:
"color-mix(in srgb, var(--color-good) 15%, transparent)",
color: "var(--color-good)",
}}
>
<svg
width="22"
height="22"
viewBox="0 0 24 24"
fill="none"
aria-hidden="true"
>
<circle
cx="12"
cy="12"
r="10"
stroke="currentColor"
strokeWidth="1.5"
/>
<path
d="M7.5 12.5L10.5 15.5L16.5 9.5"
stroke="currentColor"
strokeWidth="1.8"
strokeLinecap="round"
strokeLinejoin="round"
/>
</svg>
</div>
<div className="flex-1">
<h3
id="purchase-success-title"
className="text-base font-semibold text-ink"
>
Purchase successful
</h3>
<p className="mt-1.5 text-[13px] leading-relaxed text-ink-mid">
<span className="font-medium text-ink">{itemLabel}</span> has
been added to your workspace. Provisioning starts in the
background you can keep working while it spins up.
</p>
</div>
</div>
</div>
<div className="flex items-center justify-between gap-3 px-6 py-3 border-t border-line bg-surface/50">
<span className="font-mono text-[10.5px] uppercase tracking-[0.12em] text-ink-soft">
auto-dismiss · {AUTO_DISMISS_MS / 1000}s
</span>
<button
type="button"
onClick={() => setOpen(false)}
className="px-3.5 py-1.5 text-[13px] rounded-lg bg-accent hover:bg-accent-strong text-white transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:ring-offset-surface-sunken focus-visible:ring-accent/60"
>
Close
</button>
</div>
</div>
</div>,
document.body,
);
}

View File

@ -287,7 +287,7 @@ export function SidePanel() {
{panelTab === "config" && <ConfigTab key={selectedNodeId} workspaceId={selectedNodeId} />}
{panelTab === "schedule" && <ScheduleTab key={selectedNodeId} workspaceId={selectedNodeId} />}
{panelTab === "channels" && <ChannelsTab key={selectedNodeId} workspaceId={selectedNodeId} />}
{panelTab === "files" && <FilesTab key={selectedNodeId} workspaceId={selectedNodeId} data={node.data} />}
{panelTab === "files" && <FilesTab key={selectedNodeId} workspaceId={selectedNodeId} />}
{panelTab === "memory" && <MemoryInspectorPanel key={selectedNodeId} workspaceId={selectedNodeId} />}
{panelTab === "traces" && <TracesTab key={selectedNodeId} workspaceId={selectedNodeId} />}
{panelTab === "events" && <EventsTab key={selectedNodeId} workspaceId={selectedNodeId} />}

View File

@ -41,10 +41,6 @@ vi.mock("@/store/canvas", () => ({
// ── Imports (after mocks) ─────────────────────────────────────────────────────
import { api } from "@/lib/api";
import {
emitSocketEvent,
_resetSocketEventListenersForTests,
} from "@/store/socket-events";
import {
buildA2AEdges,
formatA2ARelativeTime,
@ -346,151 +342,6 @@ describe("A2ATopologyOverlay component", () => {
expect(mockGet.mock.calls.length).toBe(callsAfterMount);
});
// ── #61 Stage 2: ACTIVITY_LOGGED subscription tests ────────────────────────
//
// Pin the post-#61 behaviour: WS push for delegation contributes to
// the overlay's edge buffer with NO additional HTTP fetch. Same shape
// as Stage 1 (CommunicationOverlay).
describe("#61 stage 2 — ACTIVITY_LOGGED subscription", () => {
beforeEach(() => {
_resetSocketEventListenersForTests();
});
afterEach(() => {
_resetSocketEventListenersForTests();
});
function emitDelegation(overrides: {
workspaceId?: string;
sourceId?: string;
targetId?: string;
method?: string;
activityType?: string;
} = {}) {
// Use Date.now() (real time, fake-timer-frozen) rather than the
// hardcoded NOW constant — buildA2AEdges prunes by Date.now() -
// A2A_WINDOW_MS, so a row dated against the wrong epoch silently
// falls outside the window and the test fails for a confusing
// reason ("edges array empty" vs "filter dropped my row").
const realNow = Date.now();
emitSocketEvent({
event: "ACTIVITY_LOGGED",
workspace_id: overrides.workspaceId ?? "ws-a",
timestamp: new Date(realNow).toISOString(),
payload: {
id: `act-${Math.random().toString(36).slice(2)}`,
activity_type: overrides.activityType ?? "delegation",
method: overrides.method ?? "delegate",
source_id: overrides.sourceId ?? "ws-a",
target_id: overrides.targetId ?? "ws-b",
status: "ok",
created_at: new Date(realNow - 30_000).toISOString(),
},
});
}
it("does NOT poll on a 60s interval after bootstrap (post-#61)", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);
render(<A2ATopologyOverlay />);
await act(async () => { await Promise.resolve(); });
const callsAfterBootstrap = mockGet.mock.calls.length;
expect(callsAfterBootstrap).toBe(2); // ws-a + ws-b
// Pre-#61: a 60s clock tick would fire a fresh fan-out (2 more
// calls). Post-#61: no interval, no extra calls.
await act(async () => {
vi.advanceTimersByTime(120_000);
});
expect(mockGet.mock.calls.length).toBe(callsAfterBootstrap);
});
it("WS push for a delegation event from a visible workspace updates edges with NO HTTP call", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);
render(<A2ATopologyOverlay />);
await act(async () => { await Promise.resolve(); await Promise.resolve(); });
mockGet.mockClear();
mockStoreState.setA2AEdges.mockClear();
await act(async () => {
emitDelegation({ sourceId: "ws-a", targetId: "ws-b" });
});
// Edges-set called with at least one a2a edge for the new push.
const calls = mockStoreState.setA2AEdges.mock.calls;
expect(calls.length).toBeGreaterThanOrEqual(1);
const lastCall = calls[calls.length - 1][0] as Array<{ id: string }>;
expect(lastCall.some((e) => e.id === "a2a-ws-a-ws-b")).toBe(true);
// Critical: no HTTP fetch fired during the WS path.
expect(mockGet).not.toHaveBeenCalled();
});
it("WS push for a non-delegation activity_type is ignored", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);
render(<A2ATopologyOverlay />);
await act(async () => { await Promise.resolve(); });
mockStoreState.setA2AEdges.mockClear();
await act(async () => {
emitDelegation({ activityType: "a2a_send" });
});
// setA2AEdges must not be called by the WS handler — the only
// setA2AEdges calls in this test came from the initial bootstrap.
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
});
it("WS push for a delegate_result row is ignored (mirrors buildA2AEdges filter)", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);
render(<A2ATopologyOverlay />);
await act(async () => { await Promise.resolve(); });
mockStoreState.setA2AEdges.mockClear();
await act(async () => {
emitDelegation({ method: "delegate_result" });
});
// delegate_result rows do not contribute to the edge count — they
// are completion signals, not initiations.
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
});
it("WS push from a hidden workspace is ignored", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);
render(<A2ATopologyOverlay />);
await act(async () => { await Promise.resolve(); });
mockStoreState.setA2AEdges.mockClear();
await act(async () => {
emitDelegation({ workspaceId: "ws-hidden" });
});
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
});
it("WS push while showA2AEdges is false is ignored", async () => {
mockStoreState.showA2AEdges = false;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);
render(<A2ATopologyOverlay />);
// The mount path with showA2AEdges=false calls setA2AEdges([])
// once — clear that to isolate the WS path.
mockStoreState.setA2AEdges.mockClear();
await act(async () => {
emitDelegation();
});
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
expect(mockGet).not.toHaveBeenCalled();
});
});
it("re-fetches when the visible ID set actually changes", async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
mockGet.mockResolvedValue([] as any);

View File

@ -36,10 +36,6 @@ vi.mock("@/hooks/useWorkspaceName", () => ({
useWorkspaceName: () => () => "Test WS",
}));
import {
emitSocketEvent,
_resetSocketEventListenersForTests,
} from "@/store/socket-events";
import { ActivityTab } from "../tabs/ActivityTab";
// ── Fixtures ──────────────────────────────────────────────────────────────────
@ -362,191 +358,6 @@ describe("ActivityTab — refresh button", () => {
});
});
// ── Suite 6.5: ACTIVITY_LOGGED subscription (#61 stage 3) ─────────────────────
//
// Pin the post-#61 behaviour: WS push extends the rendered list with NO
// additional HTTP fetch. The 5s polling loop is gone; live updates
// arrive over the WebSocket bus.
describe("ActivityTab — #61 stage 3: ACTIVITY_LOGGED subscription", () => {
beforeEach(() => {
vi.clearAllMocks();
mockGet.mockResolvedValue([]);
_resetSocketEventListenersForTests();
});
afterEach(() => {
cleanup();
_resetSocketEventListenersForTests();
});
function emitActivity(overrides: {
workspaceId?: string;
activityType?: string;
summary?: string;
id?: string;
} = {}) {
const realNow = Date.now();
emitSocketEvent({
event: "ACTIVITY_LOGGED",
workspace_id: overrides.workspaceId ?? "ws-1",
timestamp: new Date(realNow).toISOString(),
payload: {
id: overrides.id ?? `act-${Math.random().toString(36).slice(2)}`,
activity_type: overrides.activityType ?? "agent_log",
source_id: null,
target_id: null,
method: null,
summary: overrides.summary ?? "live-pushed",
status: "ok",
created_at: new Date(realNow - 5_000).toISOString(),
},
});
}
it("WS push for matching workspace prepends to the list with NO HTTP call", async () => {
render(<ActivityTab workspaceId="ws-1" />);
await waitFor(() => {
expect(screen.getByText(/0 activities|no activity/i)).toBeTruthy();
});
mockGet.mockClear();
await act(async () => {
emitActivity({ summary: "live-row-from-bus" });
});
await waitFor(() => {
expect(screen.getByText(/live-row-from-bus/)).toBeTruthy();
});
expect(mockGet).not.toHaveBeenCalled();
});
it("WS push for a different workspace is ignored", async () => {
render(<ActivityTab workspaceId="ws-1" />);
await waitFor(() => screen.getByText(/no activity/i));
await act(async () => {
emitActivity({
workspaceId: "ws-other",
summary: "should-not-render-other-ws",
});
});
expect(screen.queryByText(/should-not-render-other-ws/)).toBeNull();
});
it("WS push respects the active filter — non-matching activity_type is ignored", async () => {
render(<ActivityTab workspaceId="ws-1" />);
await waitFor(() => screen.getByText(/no activity/i));
// Apply "Tasks" filter.
clickButton(/tasks/i);
await waitFor(() => {
expect(
screen.getByRole("button", { name: /tasks/i }).getAttribute("aria-pressed"),
).toBe("true");
});
// Push an a2a_send (does NOT match task_update filter).
await act(async () => {
emitActivity({
activityType: "a2a_send",
summary: "should-not-render-filter-mismatch",
});
});
expect(
screen.queryByText(/should-not-render-filter-mismatch/),
).toBeNull();
});
it("WS push respects the active filter — matching activity_type is rendered", async () => {
render(<ActivityTab workspaceId="ws-1" />);
await waitFor(() => screen.getByText(/no activity/i));
clickButton(/tasks/i);
await waitFor(() => {
expect(
screen.getByRole("button", { name: /tasks/i }).getAttribute("aria-pressed"),
).toBe("true");
});
await act(async () => {
emitActivity({
activityType: "task_update",
summary: "task-filter-match",
});
});
await waitFor(() => {
expect(screen.getByText(/task-filter-match/)).toBeTruthy();
});
});
it("WS push while autoRefresh is paused is ignored", async () => {
render(<ActivityTab workspaceId="ws-1" />);
await waitFor(() => screen.getByText(/no activity/i));
// Toggle Live → Paused.
clickButton(/live/i);
await waitFor(() => {
expect(screen.getByText(/Paused/)).toBeTruthy();
});
await act(async () => {
emitActivity({ summary: "should-not-render-paused" });
});
expect(screen.queryByText(/should-not-render-paused/)).toBeNull();
});
it("WS push for a row already in the list is deduped (no double-render)", async () => {
// Bootstrap with one row — same id as the WS push to trigger dedup.
mockGet.mockResolvedValueOnce([
makeEntry({ id: "shared-id", summary: "bootstrap-summary" }),
]);
render(<ActivityTab workspaceId="ws-1" />);
await waitFor(() => {
expect(screen.getByText(/bootstrap-summary/)).toBeTruthy();
});
mockGet.mockClear();
// Push a row with the SAME id but a different summary — must not
// render the new summary; original row stays.
await act(async () => {
emitActivity({
id: "shared-id",
summary: "should-not-replace-existing",
});
});
expect(screen.queryByText(/should-not-replace-existing/)).toBeNull();
// Also verify count didn't grow.
expect(screen.getByText(/1 activities/)).toBeTruthy();
});
it("does NOT poll on a 5s interval after mount (post-#61)", async () => {
vi.useFakeTimers();
try {
render(<ActivityTab workspaceId="ws-1" />);
// Drain the mount-time bootstrap promise.
await act(async () => {
await Promise.resolve();
await Promise.resolve();
});
const callsAfterBootstrap = mockGet.mock.calls.length;
expect(callsAfterBootstrap).toBeGreaterThanOrEqual(1);
// Pre-#61: a 30s clock advance fires 6 more polls. Post-#61: 0.
await act(async () => {
vi.advanceTimersByTime(30_000);
});
expect(mockGet.mock.calls.length).toBe(callsAfterBootstrap);
} finally {
vi.useRealTimers();
}
});
});
// ── Suite 7: Activity count ───────────────────────────────────────────────────
describe("ActivityTab — activity count", () => {

View File

@ -1,28 +1,18 @@
// @vitest-environment jsdom
/**
* CommunicationOverlay tests pin both the 2026-05-04 fan-out cap fix
* AND the 2026-05-07 polling ACTIVITY_LOGGED-subscriber refactor
* (issue #61 stage 1).
* CommunicationOverlay tests pin the rate-limit fix shipped 2026-05-04.
*
* The overlay used to poll /workspaces/:id/activity?limit=5 on a 30s
* interval per online workspace (capped at 3). Post-#61: it bootstraps
* once on mount via the same HTTP path (cap of 3 retained), then
* subscribes to ACTIVITY_LOGGED via the global socket bus for live
* updates. No interval poll.
* The overlay polls /workspaces/:id/activity?limit=5 for each online
* workspace. Pre-fix it (a) polled regardless of visibility and (b)
* fanned out to 6 workspaces every 10s. With 8+ workspaces a user
* triggered sustained 429s (server-side rate limit is 600 req/min/IP).
*
* These tests pin:
* 1. Bootstrap fan-out cap of 3 even with 6 online nodes, only 3
* HTTP fetches on mount.
* 2. Visibility gate when collapsed, no HTTP fetches; re-open
* re-bootstraps.
* 3. NO interval polling advancing the clock past 30s does not fire
* additional HTTP calls.
* 4. WS push extends the rendered list without firing any HTTP call.
* 5. WS push for an offline workspace is ignored.
* 6. WS push for a non-comm activity_type is ignored.
* 1. Fan-out cap of 3 even with 6 online nodes, only 3 fetches
* 2. Visibility gate when collapsed, no polling
*
* If a future refactor regresses any of these, CI fails before the
* regression hits a paying tenant.
* If a future refactor pushes either dial back up, CI fails before
* the regression hits a paying tenant.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { render, cleanup, act, fireEvent } from "@testing-library/react";
@ -33,7 +23,7 @@ vi.mock("@/lib/api", () => ({
api: { get: vi.fn() },
}));
// Six online nodes — enough to verify the bootstrap cap of 3.
// Six online nodes — enough to verify the cap of 3.
const mockStoreState = {
selectedNodeId: null as string | null,
nodes: [
@ -66,10 +56,6 @@ vi.mock("@/lib/design-tokens", () => ({
// ── Imports (after mocks) ─────────────────────────────────────────────────────
import { api } from "@/lib/api";
import {
emitSocketEvent,
_resetSocketEventListenersForTests,
} from "@/store/socket-events";
import { CommunicationOverlay } from "../CommunicationOverlay";
const mockGet = vi.mocked(api.get);
@ -80,34 +66,30 @@ beforeEach(() => {
vi.useFakeTimers();
mockGet.mockReset();
mockGet.mockResolvedValue([]);
// Drop any subscribers the previous test left on the singleton bus —
// each render adds one via useSocketEvent.
_resetSocketEventListenersForTests();
});
afterEach(() => {
cleanup();
vi.useRealTimers();
_resetSocketEventListenersForTests();
});
// ── Tests ─────────────────────────────────────────────────────────────────────
describe("CommunicationOverlay — bootstrap fan-out cap", () => {
it("bootstraps at most 3 of 6 online workspaces (rate-limit floor preserved post-#61)", async () => {
describe("CommunicationOverlay — fan-out cap", () => {
it("polls at most 3 of 6 online workspaces (rate-limit floor)", async () => {
await act(async () => {
render(<CommunicationOverlay />);
});
// Mount fires the bootstrap synchronously — pre-#61 this was the
// first poll cycle; post-#61 it's the only HTTP fetch (live updates
// arrive via WS push). 6 nodes → 3 fetches.
// Mount fires the first poll synchronously (no interval tick yet).
// Pre-fix: 6 calls. Post-fix: 3.
expect(mockGet).toHaveBeenCalledTimes(3);
// Verify the calls are for the FIRST 3 online nodes (slice order).
expect(mockGet).toHaveBeenCalledWith("/workspaces/ws-1/activity?limit=5");
expect(mockGet).toHaveBeenCalledWith("/workspaces/ws-2/activity?limit=5");
expect(mockGet).toHaveBeenCalledWith("/workspaces/ws-3/activity?limit=5");
});
it("never bootstraps offline workspaces", async () => {
it("never polls offline workspaces", async () => {
await act(async () => {
render(<CommunicationOverlay />);
});
@ -117,39 +99,40 @@ describe("CommunicationOverlay — bootstrap fan-out cap", () => {
});
});
describe("CommunicationOverlay — no interval polling (post-#61)", () => {
// The pre-#61 implementation re-fetched every 30s per workspace.
// Post-#61 the only HTTP path is the bootstrap on mount + on
// visibility-toggle. This test pins the absence of any interval
// poll: a 60s clock advance must not produce a second round of
// fetches.
it("does NOT poll on a 30s interval after bootstrap", async () => {
describe("CommunicationOverlay — cadence", () => {
it("uses 30s interval cadence (was 10s pre-fix)", async () => {
await act(async () => {
render(<CommunicationOverlay />);
});
expect(mockGet).toHaveBeenCalledTimes(3); // initial bootstrap
mockGet.mockClear();
expect(mockGet).toHaveBeenCalledTimes(3); // initial mount poll
// Advance 60s — well past any plausible cadence the prior version
// could have used.
// Advance 10s — pre-fix this would fire another poll. Post-fix: silent.
await act(async () => {
vi.advanceTimersByTime(60_000);
vi.advanceTimersByTime(10_000);
});
expect(mockGet).not.toHaveBeenCalled();
expect(mockGet).toHaveBeenCalledTimes(3);
// Advance to 30s — interval fires.
await act(async () => {
vi.advanceTimersByTime(20_000);
});
expect(mockGet).toHaveBeenCalledTimes(6); // +3 from second tick
});
});
describe("CommunicationOverlay — visibility gate", () => {
// The visibility gate now does two things post-#61:
// - while closed, the WS handler short-circuits (no setComms churn)
// - re-opening triggers a fresh bootstrap so the list reflects
// anything that happened while the panel was collapsed
// The visibility gate is the dial that drops collapsed-panel polling
// to ZERO. The cadence test above can't catch its removal — if a
// refactor dropped `if (!visible) return`, the cadence test would
// still pass because the effect would still fire every 30s.
//
// Direct probe: render with comms-returning mock so the panel
// actually renders (close button only exists in the expanded panel,
// not the collapsed button-state). Click close, advance the clock,
// assert no further fetches.
it("stops fetching while collapsed and re-bootstraps on re-open", async () => {
it("stops polling after the user collapses the panel", async () => {
// Mock returns one a2a_send so comms.length > 0 → panel renders →
// close button accessible.
mockGet.mockResolvedValue([
{
id: "act-1",
@ -167,202 +150,29 @@ describe("CommunicationOverlay — visibility gate", () => {
const { getByLabelText } = await act(async () => {
return render(<CommunicationOverlay />);
});
// Drain pending microtasks (resolves the await in bootstrap) so
// setComms lands and the panel renders. Don't advance time — it's
// not load-bearing for the gate test, but matches the pattern used
// pre-#61 for stability.
// Drain pending microtasks (resolves the await in fetchComms) so
// setComms lands and the panel renders. Don't advance time — that
// would fire the next interval tick and pollute the assertion.
await act(async () => {
await Promise.resolve();
await Promise.resolve();
await Promise.resolve();
});
expect(mockGet).toHaveBeenCalledTimes(3); // initial bootstrap
// Initial mount polled 3 workspaces.
expect(mockGet).toHaveBeenCalledTimes(3);
mockGet.mockClear();
// Click close. While closed, no fetches and no WS-driven updates.
// Click the close button. Synchronous getByLabelText avoids
// findBy's internal setTimeout (deadlocks under useFakeTimers).
const closeBtn = getByLabelText("Close communications panel");
await act(async () => {
fireEvent.click(closeBtn);
});
// Advance well past the 30s cadence — gate should suppress the tick.
await act(async () => {
vi.advanceTimersByTime(60_000);
});
expect(mockGet).not.toHaveBeenCalled();
// Re-open via the collapsed button. Must trigger a fresh bootstrap.
const openBtn = getByLabelText("Show communications panel");
await act(async () => {
fireEvent.click(openBtn);
});
await act(async () => {
await Promise.resolve();
await Promise.resolve();
});
expect(mockGet).toHaveBeenCalledTimes(3); // re-bootstrap on re-open
});
});
describe("CommunicationOverlay — WS subscription (#61 stage 1 core)", () => {
// The load-bearing post-#61 behaviour. Every test in this block must
// verify (a) the WS push DID update the rendered comms list, and
// (b) NO additional HTTP call was fired — the whole point of the
// refactor is to remove the polling-driven HTTP traffic.
function emitActivityLogged(overrides: Partial<{
workspaceId: string;
payload: Record<string, unknown>;
}> = {}) {
emitSocketEvent({
event: "ACTIVITY_LOGGED",
workspace_id: overrides.workspaceId ?? "ws-1",
timestamp: new Date().toISOString(),
payload: {
id: `act-${Math.random().toString(36).slice(2)}`,
activity_type: "a2a_send",
source_id: "ws-1",
target_id: "ws-2",
summary: "live push",
status: "ok",
duration_ms: 42,
created_at: new Date().toISOString(),
...overrides.payload,
},
});
}
it("WS push for a comm activity_type extends the rendered list with NO additional HTTP call", async () => {
const { container } = await act(async () => {
return render(<CommunicationOverlay />);
});
expect(mockGet).toHaveBeenCalledTimes(3); // bootstrap
mockGet.mockClear();
await act(async () => {
emitActivityLogged({ payload: { summary: "hello" } });
});
await act(async () => {
await Promise.resolve();
});
// Two pins:
// 1. comms list reflects the live push (look for the summary text)
// 2. zero HTTP fetches fired during the WS path
expect(container.textContent).toContain("hello");
expect(mockGet).not.toHaveBeenCalled();
});
it("WS push for an offline workspace is ignored", async () => {
const { container } = await act(async () => {
return render(<CommunicationOverlay />);
});
mockGet.mockClear();
await act(async () => {
emitActivityLogged({
workspaceId: "ws-offline",
payload: { source_id: "ws-offline", summary: "should-not-render" },
});
});
await act(async () => {
await Promise.resolve();
});
expect(container.textContent).not.toContain("should-not-render");
expect(mockGet).not.toHaveBeenCalled();
});
it("WS push for a non-comm activity_type is ignored (e.g. delegation)", async () => {
const { container } = await act(async () => {
return render(<CommunicationOverlay />);
});
mockGet.mockClear();
await act(async () => {
emitActivityLogged({
payload: {
activity_type: "delegation",
summary: "should-not-render-delegation",
},
});
});
await act(async () => {
await Promise.resolve();
});
expect(container.textContent).not.toContain("should-not-render-delegation");
expect(mockGet).not.toHaveBeenCalled();
});
it("WS push while the panel is collapsed is ignored (no churn on hidden state)", async () => {
// Bootstrap with one comm so the panel renders → close button
// accessible. Then collapse, emit a WS push, re-open: the rendered
// list must come from the re-bootstrap, NOT from the WS-push that
// arrived during the closed state. Also: nothing visible while
// closed (the collapsed button shows only the count, not summaries).
mockGet.mockResolvedValue([
{
id: "act-bootstrap",
workspace_id: "ws-1",
activity_type: "a2a_send",
source_id: "ws-1",
target_id: "ws-2",
summary: "bootstrap-summary",
status: "ok",
duration_ms: 1,
created_at: new Date().toISOString(),
},
]);
const { getByLabelText, container } = await act(async () => {
return render(<CommunicationOverlay />);
});
await act(async () => {
await Promise.resolve();
await Promise.resolve();
});
// Collapse.
const closeBtn = getByLabelText("Close communications panel");
await act(async () => {
fireEvent.click(closeBtn);
});
// Bootstrap mock returns nothing on the re-open path so we can
// distinguish "WS push leaked through the gate" from "re-bootstrap
// refilled the list."
mockGet.mockReset();
mockGet.mockResolvedValue([]);
await act(async () => {
emitActivityLogged({
payload: { summary: "leaked-while-closed" },
});
});
await act(async () => {
await Promise.resolve();
});
// Closed state: rendered DOM must not show any push-derived text.
expect(container.textContent).not.toContain("leaked-while-closed");
});
it("non-ACTIVITY_LOGGED events are ignored (e.g. WORKSPACE_OFFLINE)", async () => {
const { container } = await act(async () => {
return render(<CommunicationOverlay />);
});
mockGet.mockClear();
await act(async () => {
emitSocketEvent({
event: "WORKSPACE_OFFLINE",
workspace_id: "ws-1",
timestamp: new Date().toISOString(),
payload: { summary: "should-not-render-event" },
});
});
await act(async () => {
await Promise.resolve();
});
expect(container.textContent).not.toContain("should-not-render-event");
expect(mockGet).not.toHaveBeenCalled();
});
});

View File

@ -1,9 +1,8 @@
"use client";
import { useState, useEffect, useCallback, useRef } from "react";
import { useState, useEffect, useCallback } from "react";
import { api } from "@/lib/api";
import { ConversationTraceModal } from "@/components/ConversationTraceModal";
import { useSocketEvent } from "@/hooks/useSocketEvent";
import { type ActivityEntry } from "@/types/activity";
import { useWorkspaceName } from "@/hooks/useWorkspaceName";
import { inferA2AErrorHint } from "./chat/a2aErrorHint";
@ -49,15 +48,6 @@ export function ActivityTab({ workspaceId }: Props) {
const [traceOpen, setTraceOpen] = useState(false);
const resolveName = useWorkspaceName();
// Refs let the WS handler read the latest filter / autoRefresh
// selection without re-subscribing on every state change. The bus
// listener is registered exactly once per mount via useSocketEvent's
// ref-internal pattern; subscriber-side filtering reads from these.
const filterRef = useRef(filter);
filterRef.current = filter;
const autoRefreshRef = useRef(autoRefresh);
autoRefreshRef.current = autoRefresh;
const loadActivities = useCallback(async () => {
try {
const typeParam = filter !== "all" ? `?type=${filter}` : "";
@ -76,58 +66,11 @@ export function ActivityTab({ workspaceId }: Props) {
loadActivities();
}, [loadActivities]);
// Live-update path (issue #61 stage 3, replaces the 5s setInterval).
// ACTIVITY_LOGGED events from this workspace prepend to the rendered
// list — dedup by id so a server-side update + a poll reply don't
// double-render the same row.
//
// Honours the user's autoRefresh toggle: when paused, live updates
// are dropped until the user re-enables Live (or hits Refresh, which
// re-bootstraps via loadActivities).
//
// Filter awareness: matches the server-side `?type=<filter>`
// semantics so the panel doesn't show rows the user excluded.
useSocketEvent((msg) => {
if (!autoRefreshRef.current) return;
if (msg.event !== "ACTIVITY_LOGGED") return;
if (msg.workspace_id !== workspaceId) return;
const p = (msg.payload || {}) as Record<string, unknown>;
const activityType = (p.activity_type as string) || "";
const f = filterRef.current;
if (f !== "all" && activityType !== f) return;
const entry: ActivityEntry = {
id:
(p.id as string) ||
`ws-push-${msg.timestamp || Date.now()}-${msg.workspace_id}`,
workspace_id: msg.workspace_id,
activity_type: activityType,
source_id: (p.source_id as string | null) ?? null,
target_id: (p.target_id as string | null) ?? null,
method: (p.method as string | null) ?? null,
summary: (p.summary as string | null) ?? null,
request_body: (p.request_body as Record<string, unknown> | null) ?? null,
response_body:
(p.response_body as Record<string, unknown> | null) ?? null,
duration_ms: (p.duration_ms as number | null) ?? null,
status: (p.status as string) || "ok",
error_detail: (p.error_detail as string | null) ?? null,
created_at:
(p.created_at as string) ||
msg.timestamp ||
new Date().toISOString(),
};
setActivities((prev) => {
// Dedup by id — a row that arrived via the bootstrap fetch and
// also fires ACTIVITY_LOGGED from a delayed server-side hook
// must render exactly once.
if (prev.some((e) => e.id === entry.id)) return prev;
return [entry, ...prev];
});
});
useEffect(() => {
if (!autoRefresh) return;
const interval = setInterval(loadActivities, 5000);
return () => clearInterval(interval);
}, [loadActivities, autoRefresh]);
return (
<div className="flex flex-col h-full">

View File

@ -8,11 +8,11 @@ import { useCanvasStore, type WorkspaceNodeData } from "@/store/canvas";
import { useSocketEvent } from "@/hooks/useSocketEvent";
import { type ChatMessage, type ChatAttachment, createMessage, appendMessageDeduped } from "./chat/types";
import { uploadChatFiles, downloadChatFile, isPlatformAttachment } from "./chat/uploads";
import { PendingAttachmentPill } from "./chat/AttachmentViews";
import { AttachmentPreview } from "./chat/AttachmentPreview";
import { AttachmentChip, PendingAttachmentPill } from "./chat/AttachmentViews";
import { extractFilesFromTask } from "./chat/message-parser";
import { AgentCommsPanel } from "./chat/AgentCommsPanel";
import { appendActivityLine } from "./chat/activityLog";
import { activityRowToMessages, type ActivityRowForHydration } from "./chat/historyHydration";
import { runtimeDisplayName } from "@/lib/runtime-names";
import { ConfirmDialog } from "@/components/ConfirmDialog";
@ -49,12 +49,38 @@ interface A2AResponse {
};
}
// Internal-self-message filtering moved server-side in RFC #2945
// PR-C/D — the platform's /chat-history endpoint applies the
// IsInternalSelfMessage predicate before returning rows, so the
// client no longer needs the local backstop on the history path.
// The proper fix is still X-Workspace-ID header (source_id=workspace_id);
// the platform-side prefix filter handles the residual cases.
/** Detect activity-log rows that the workspace's own runtime fired
* against itself but were misclassified as canvas-source. The proper
* fix is the X-Workspace-ID header from `self_source_headers()` in
* workspace/platform_auth.py, which makes the platform record
* source_id = workspace_id. But three failure modes still leak a
* self-message into "My Chat":
*
* 1. Historical rows already in the DB with source_id=NULL.
* 2. Workspace containers running pre-fix heartbeat.py / main.py
* (the fix only takes effect after an image rebuild + redeploy).
* 3. Future internal triggers added without the helper.
*
* This client-side filter recognises the heartbeat trigger by its
* exact prefix the heartbeat assembles
*
* "Delegation results are ready. Review them and take appropriate
* action:\n" + summary_lines + report_instruction
*
* in workspace/heartbeat.py. The prefix is template-fixed so a
* string match is reliable. If the heartbeat copy ever changes,
* update this constant in the same commit.
*
* This is a backstop, not the primary defence the X-Workspace-ID
* header is. Filtering content is fragile to copy edits, so keep
* the list narrow. */
const INTERNAL_SELF_MESSAGE_PREFIXES = [
"Delegation results are ready. Review them and take appropriate action",
];
function isInternalSelfMessage(text: string): boolean {
return INTERNAL_SELF_MESSAGE_PREFIXES.some((p) => text.startsWith(p));
}
// extractReplyText pulls the agent's text reply out of an A2A response.
// Concatenates ALL text parts (joined with "\n") rather than returning
@ -107,19 +133,8 @@ const INITIAL_HISTORY_LIMIT = 10;
const OLDER_HISTORY_BATCH = 20;
/**
* Load chat history from the platform's typed /chat-history endpoint.
*
* Server-side rendering of activity_logs rows into ChatMessage shape
* lives in workspace-server/internal/messagestore/postgres_store.go
* (RFC #2945 PR-C/D). The server already applies the canvas-source
* filter, the internal-self-message predicate, the role decision
* (status=error vs agent-error prefix system), and the v0/v1
* file-shape extraction. Canvas just renders what it receives.
*
* Wire shape (mirrors ChatMessage exactly, no per-row mapping needed):
*
* GET /workspaces/:id/chat-history?limit=N&before_ts=T
* 200 {"messages": ChatMessage[], "reached_end": boolean}
* Load chat history from the activity_logs database via the platform API.
* Uses source=canvas to only get user-initiated messages (not agent-to-agent).
*
* Pagination:
* - Pass `limit` to bound the page size (newest-first from server).
@ -127,10 +142,10 @@ const OLDER_HISTORY_BATCH = 20;
* timestamp. Combined with limit, this yields the next-older page
* when scrolling backward through history.
*
* `reachedEnd` is propagated from the server. The server computes it
* by comparing rowCount vs limit so a partial last page is correctly
* detected even when the rowbubble fan-out is non-1:1 (each row
* produces 1-2 bubbles).
* `reachedEnd` is true when the server returned fewer rows than asked
* for caller uses this to disable further older-batch fetches.
* (Counts row-level returns, not chat-bubble count: each row may
* produce 1-2 bubbles.)
*/
async function loadMessagesFromDB(
workspaceId: string,
@ -138,23 +153,25 @@ async function loadMessagesFromDB(
beforeTs?: string,
): Promise<{ messages: ChatMessage[]; error: string | null; reachedEnd: boolean }> {
try {
const params = new URLSearchParams({ limit: String(limit) });
const params = new URLSearchParams({
type: "a2a_receive",
source: "canvas",
limit: String(limit),
});
if (beforeTs) params.set("before_ts", beforeTs);
const resp = await api.get<{ messages: ChatMessage[]; reached_end: boolean }>(
`/workspaces/${workspaceId}/chat-history?${params.toString()}`,
const activities = await api.get<ActivityRowForHydration[]>(
`/workspaces/${workspaceId}/activity?${params.toString()}`,
);
// Server emits oldest-first within the page (RFC #2945 PR-C-2
// post-fix: server reverses row-aware before returning so the
// wire is display-ready). Canvas appends/prepends without
// reordering — this avoids the pair-flip bug a naive flat
// reverse causes when each row produces a (user, agent) pair
// with the same timestamp.
return {
messages: resp.messages ?? [],
error: null,
reachedEnd: resp.reached_end,
};
const messages: ChatMessage[] = [];
// Activities are newest-first, reverse for chronological order.
// Per-row mapping lives in chat/historyHydration.ts so it can be
// unit-tested without spinning up the full ChatTab component
// (regression cover for the timestamp-collapse bug).
for (const a of [...activities].reverse()) {
messages.push(...activityRowToMessages(a, isInternalSelfMessage));
}
return { messages, error: null, reachedEnd: activities.length < limit };
} catch (err) {
return {
messages: [],
@ -1120,9 +1137,8 @@ function MyChatPanel({ workspaceId, data }: Props) {
{msg.attachments && msg.attachments.length > 0 && (
<div className={`flex flex-wrap gap-1 ${msg.content ? "mt-1.5" : ""}`}>
{msg.attachments.map((att, i) => (
<AttachmentPreview
<AttachmentChip
key={`${msg.id}-${i}`}
workspaceId={workspaceId}
attachment={att}
onDownload={downloadAttachment}
tone={msg.role === "user" ? "user" : "agent"}

View File

@ -21,39 +21,20 @@ interface Props {
// --- Agent Card Section ---
function AgentCardSection({ workspaceId }: { workspaceId: string }) {
// Initial card value comes from the canvas store — node.data.agentCard
// is hydrated by the platform stream when the workspace appears in the
// graph, so reading it here avoids a duplicate `GET /workspaces/${id}`
// (the parent ConfigTab.loadConfig already fetches workspace metadata,
// and refetching here adds a serialised RTT to the panel-open path —
// contributed to the ~20s detail-panel load reported in core#11).
// Local state still tracks the edited/saved value so the editor flow
// is unchanged.
const storeCard = useCanvasStore((s) => {
// Defensive against test mocks that omit `nodes` (some test files
// stub the store with a minimal shape). In production `nodes` is
// always an array — empty or not — so the optional chaining only
// matters for the test path.
const node = s.nodes?.find?.((n) => n.id === workspaceId);
return (node?.data.agentCard as
| Record<string, unknown>
| null
| undefined) ?? null;
});
const [card, setCard] = useState<Record<string, unknown> | null>(storeCard);
const [card, setCard] = useState<Record<string, unknown> | null>(null);
const [loading, setLoading] = useState(true);
const [editing, setEditing] = useState(false);
const [draft, setDraft] = useState("");
const [saving, setSaving] = useState(false);
const [error, setError] = useState<string | null>(null);
const [success, setSuccess] = useState(false);
// If the store updates while this section is mounted (another tab
// pushed an update via the platform event stream), reflect that —
// unless the user is mid-edit, in which case we don't clobber their
// unsaved draft.
useEffect(() => {
if (!editing) setCard(storeCard);
}, [storeCard, editing]);
api.get<Record<string, unknown>>(`/workspaces/${workspaceId}`)
.then((ws) => setCard((ws.agent_card as Record<string, unknown>) || null))
.catch(() => {})
.finally(() => setLoading(false));
}, [workspaceId]);
const handleSave = async () => {
setError(null);
@ -72,7 +53,9 @@ function AgentCardSection({ workspaceId }: { workspaceId: string }) {
return (
<Section title="Agent Card" defaultOpen={false}>
{editing ? (
{loading ? (
<div className="text-[10px] text-ink-soft">Loading...</div>
) : editing ? (
<div className="space-y-2">
<textarea
aria-label="Agent card JSON editor"
@ -238,72 +221,47 @@ export function ConfigTab({ workspaceId }: Props) {
setLoading(true);
setError(null);
// Load workspace metadata (runtime + model + provider) in parallel.
// These are independent GETs against three workspace-server endpoints
// and used to be awaited serially — for SaaS workspaces each call
// round-trips through an EIC SSH tunnel, so the previous serial
// pattern stacked 3-5s of tunnel-setup latency per call (core#11).
// Promise.all overlaps them; the per-call cost stays the same but
// wall time drops to max() instead of sum().
//
// Each leg has its own .catch handler that yields a sentinel value,
// matching the previous semantics:
// - /workspaces/${id}: required source-of-truth for runtime+tier;
// fall back to YAML if the GET fails (rare, network-class only).
// - /workspaces/${id}/model: non-fatal; empty model lets the form
// fall through to YAML runtime_config.model.
// - /workspaces/${id}/provider: non-fatal; old workspace-servers
// return 404, in which case provider="" and Save skips the PUT.
//
// See GH #1894 for the workspace-row-as-source-of-truth rationale
// that motivated splitting from a single config.yaml read.
const [wsRes, modelRes, providerRes] = await Promise.all([
api.get<{ runtime?: string; tier?: number }>(`/workspaces/${workspaceId}`)
.catch(() => ({} as { runtime?: string; tier?: number })),
api.get<{ model?: string }>(`/workspaces/${workspaceId}/model`)
.catch(() => ({} as { model?: string })),
api.get<{ provider?: string }>(`/workspaces/${workspaceId}/provider`)
.catch(() => null),
]);
const wsMetadataRuntime = (wsRes.runtime || "").trim();
const wsMetadataModel = (modelRes.model || "").trim();
const wsMetadataTier: number | null =
typeof wsRes.tier === "number" ? wsRes.tier : null;
if (providerRes !== null) {
const loadedProvider = (providerRes.provider || "").trim();
setProvider(loadedProvider);
setOriginalProvider(loadedProvider);
} else {
setProvider("");
setOriginalProvider("");
}
// ALWAYS load workspace metadata first (runtime + model). These are the
// source of truth regardless of whether the runtime uses our config.yaml
// template. Without this the form falls back to empty/default values on
// a hermes workspace (which doesn't use our template), creating the
// appearance that the saved runtime is unset — and worse, clicking Save
// would silently flip `runtime` from `hermes` back to the dropdown
// default `LangGraph`. See GH #1894.
let wsMetadataRuntime = "";
let wsMetadataModel = "";
let wsMetadataTier: number | null = null;
try {
const ws = await api.get<{ runtime?: string; tier?: number }>(`/workspaces/${workspaceId}`);
wsMetadataRuntime = (ws.runtime || "").trim();
if (typeof ws.tier === "number") wsMetadataTier = ws.tier;
} catch { /* fall back to config.yaml */ }
try {
const m = await api.get<{ model?: string }>(`/workspaces/${workspaceId}/model`);
wsMetadataModel = (m.model || "").trim();
} catch { /* non-fatal */ }
// originalModel is set further down once the YAML has been parsed —
// we want it to reflect what the form ACTUALLY rendered, which may
// be the YAML's runtime_config.model fallback when MODEL_PROVIDER
// is empty. Setting it here from wsMetadataModel alone would be
// wrong for hermes/pre-#240 workspaces.
// Skip the config.yaml fetch entirely for runtimes that manage
// their own config (external, hermes, etc.) — they don't have a
// platform-side template, so the GET would 404. The catch block
// below handles 404 gracefully, but issuing the request adds
// browser-console noise + a wasted RTT on every open of the
// Config tab for the affected workspaces. Reported on
// production reno-stars 2026-05-05 (workspace runtime=external,
// 404 on /files/config.yaml visible in the console even though
// the form rendered correctly).
if (RUNTIMES_WITH_OWN_CONFIG.has(wsMetadataRuntime)) {
setConfig({
...DEFAULT_CONFIG,
runtime: wsMetadataRuntime,
model: wsMetadataModel,
...(wsMetadataModel ? { runtime_config: { model: wsMetadataModel } } : {}),
...(wsMetadataTier !== null ? { tier: wsMetadataTier } : {}),
} as ConfigData);
setOriginalModel(wsMetadataModel);
setLoading(false);
return;
// Load explicit provider override (Option B PR-5). Endpoint returns
// {provider: "", source: "default"} when no override is set, so the
// empty string is the legitimate "auto-derive" signal — don't treat
// it as a load error. Non-fatal: an older workspace-server that
// predates PR-2 returns 404 here; the form falls back to "" and
// Save just won't PUT the provider field.
try {
const p = await api.get<{ provider?: string }>(`/workspaces/${workspaceId}/provider`);
const loadedProvider = (p.provider || "").trim();
setProvider(loadedProvider);
setOriginalProvider(loadedProvider);
} catch {
setProvider("");
setOriginalProvider("");
}
try {
const res = await api.get<{ content: string }>(`/workspaces/${workspaceId}/files/config.yaml`);
const parsed = parseYaml(res.content);

View File

@ -2,11 +2,9 @@
import { useState, useEffect, useRef, useMemo } from "react";
import { showToast } from "../Toaster";
import type { WorkspaceNodeData } from "@/store/canvas";
import { FilesToolbar } from "./FilesTab/FilesToolbar";
import { FileTree } from "./FilesTab/FileTree";
import { FileEditor } from "./FilesTab/FileEditor";
import { NotAvailablePanel } from "./FilesTab/NotAvailablePanel";
import { useFilesApi } from "./FilesTab/useFilesApi";
import { buildTree } from "./FilesTab/tree";
@ -16,40 +14,9 @@ export type { TreeNode } from "./FilesTab/tree";
interface Props {
workspaceId: string;
/** Workspace metadata from the canvas store. Optional for back-compat
* with any caller that still mounts <FilesTab workspaceId=.../> without
* threading data through (legacy tests). When present, runtime gates
* the early-return below. Mirrors TerminalTab's prop shape (#2830). */
data?: WorkspaceNodeData;
}
/** Runtimes whose filesystem the platform doesn't own. The canvas can't
* list/read/write files on these the agent runs on the user's own
* hardware (mac laptop, mac mini, hermes-on-home-server) and reaches
* the platform via the heartbeat-based polling Phase 30 layer.
*
* Keep narrow only add a runtime here when its provisioner genuinely
* has no platform-owned filesystem. Otherwise the user loses access to
* a real surface (e.g. claude-code SaaS workspaces have files served
* by ListFiles via EIC; they belong on the rendering path, not here). */
const RUNTIMES_WITHOUT_FILES = new Set(["external"]);
export function FilesTab({ workspaceId, data }: Props) {
// Early-return for runtimes whose filesystem is not platform-owned.
// Skips the whole useFilesApi hook + tree render below — without this,
// mounting the tab for an external workspace would issue a GET that
// the platform can technically answer (it reads its own DB row, not
// the user's machine), but every result row is fictional. Showing
// "0 files / No config files yet" reads as a bug. The placeholder
// makes the absence intentional and points the user at the right
// surface (Chat).
if (data && RUNTIMES_WITHOUT_FILES.has(data.runtime)) {
return <NotAvailablePanel runtime={data.runtime} />;
}
return <PlatformOwnedFilesTab workspaceId={workspaceId} />;
}
function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
export function FilesTab({ workspaceId }: Props) {
const [root, setRoot] = useState("/configs");
const [selectedFile, setSelectedFile] = useState<string | null>(null);
const [fileContent, setFileContent] = useState("");
@ -78,36 +45,11 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
readFile,
writeFile,
deleteFile,
downloadFileByPath,
downloadAllFiles,
uploadFiles,
uploadDataTransferItems,
deleteAllFiles,
} = useFilesApi(workspaceId, root);
// PR-D: track whether the user is currently dragging files OVER
// the root area (not over a specific subdir row). Used to show
// the "Drop to upload to root" highlight on the tree column.
const [rootDragHover, setRootDragHover] = useState(false);
const handleDropToTarget = (
targetDir: string,
items: DataTransferItemList,
) => {
// canDelete is the gate proxy — same constraint as the toolbar
// Upload button (today only /configs is writable from the canvas
// surface). Without this check, dropping on /home would post
// through /workspaces/<id>/files/<path>, which the backend would
// reject only after an HTTP round-trip. Fail fast.
if (root !== "/configs") {
setError(
`Upload only allowed in /configs (current root: ${root}). Switch root or use Upload button.`,
);
return;
}
void uploadDataTransferItems(items, targetDir);
};
const tree = useMemo(() => buildTree(files), [files]);
const openFile = async (path: string) => {
@ -248,46 +190,8 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
)}
<div className="flex flex-1 min-h-0">
{/* File tree column. PR-D: outer div is the drop zone for
"drop on root" when the user drags into the column area
(not over a specific subdir row), the drop targets the
current root directory. Subdirectory rows in <FileTree>
stop propagation on their own drop event so a drop on
/configs/skills doesn't ALSO fire root-area drop. */}
<div
className={`w-[180px] border-r border-line/40 overflow-y-auto shrink-0 transition-colors ${
rootDragHover ? "bg-accent/10 outline outline-1 outline-accent/40 -outline-offset-2" : ""
}`}
onDragOver={(e) => {
// Only highlight + accept the drop when uploads are
// actually allowed for the current root. Without this
// check the user gets a misleading drag affordance,
// drops, then sees the toolbar's "switch root" toast —
// bad UX.
if (root !== "/configs") return;
e.preventDefault();
e.dataTransfer.dropEffect = "copy";
}}
onDragEnter={(e) => {
if (root !== "/configs") return;
e.preventDefault();
setRootDragHover(true);
}}
onDragLeave={(e) => {
const next = e.relatedTarget as Node | null;
if (!next || !(e.currentTarget as HTMLElement).contains(next)) {
setRootDragHover(false);
}
}}
onDrop={(e) => {
if (root !== "/configs") return;
e.preventDefault();
setRootDragHover(false);
if (e.dataTransfer.items?.length) {
handleDropToTarget("", e.dataTransfer.items);
}
}}
>
{/* File tree */}
<div className="w-[180px] border-r border-line/40 overflow-y-auto shrink-0">
{/* New file input */}
{showNewFile && (
<div className="px-2 py-1 border-b border-line/40">
@ -305,27 +209,14 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
{files.length === 0 ? (
<div className="px-3 py-4 text-[10px] text-ink-soft text-center">
{rootDragHover
? "Drop to upload to root"
: root === "/configs"
? "No config files yet — drag files here to upload"
: "No config files yet"}
No config files yet
</div>
) : (
<FileTree
nodes={tree}
selectedPath={selectedFile}
onSelect={openFile}
// Delete is currently gated to /configs to match the
// toolbar's New / Upload / Clear affordances. Context
// menu and inline ✕ both honour the gate. PR-A made the
// backend EIC delete work on all roots — keeping the
// canvas gate conservative until we want to expose
// /home /workspace deletion intentionally.
onDelete={root === "/configs" ? setConfirmDelete : () => {}}
onDownload={downloadFileByPath}
canDelete={root === "/configs"}
onDropToTarget={handleDropToTarget}
expandedDirs={expandedDirs}
onToggleDir={toggleDir}
loadingDir={loadingDir}

View File

@ -1,129 +1,41 @@
"use client";
import { useState } from "react";
import { type TreeNode, getIcon } from "./tree";
import { FileTreeContextMenu, type MenuItem } from "./FileTreeContextMenu";
interface TreeCallbacks {
selectedPath: string | null;
onSelect: (path: string) => void;
onDelete: (path: string) => void;
/** PR-C: right-click → Download. Files only — directories ignore. */
onDownload: (path: string) => void;
/** Whether the active root permits delete. Wire into the Delete
* context-menu item's `disabled` flag so the user gets the same
* affordance as the toolbar (which gates Clear/New on /configs). */
canDelete: boolean;
/** PR-D: drop files/folders from the OS onto this row. targetDir
* is the directory path (relative to the active root) under which
* the dropped contents should land; "" means root. */
onDropToTarget?: (targetDir: string, items: DataTransferItemList) => void;
expandedDirs: Set<string>;
onToggleDir: (path: string) => void;
loadingDir: string | null;
}
/**
* FileTree renders the workspace tree + owns the right-click context
* menu (PR-C) and the drop-target hover state (PR-D). Lifting the
* menu state here (vs each row) means only one menu open at a time
* opening a new row's menu auto-closes the prior one. Same UX as
* VSCode / Theia.
*/
export function FileTree({
nodes,
selectedPath,
onSelect,
onDelete,
onDownload,
canDelete,
onDropToTarget,
expandedDirs,
onToggleDir,
loadingDir,
depth = 0,
}: TreeCallbacks & { nodes: TreeNode[]; depth?: number }) {
const [menu, setMenu] = useState<{
x: number;
y: number;
items: MenuItem[];
} | null>(null);
// PR-D: hover-target highlight state for drag-drop. Lifted next to
// the menu state so both shared-across-rows interactions live in
// one place.
const [hoverDir, setHoverDir] = useState<string | null>(null);
const openContextMenu = (e: React.MouseEvent, node: TreeNode) => {
e.preventDefault();
// Items composed per-row so the available actions reflect the
// node type (files get Open + Download; directories get Delete
// only since "open a directory in the editor" doesn't apply
// and "Export folder" is the toolbar's job).
const items: MenuItem[] = [];
if (!node.isDir) {
items.push({
id: "open",
label: "Open",
icon: "⤴",
onClick: () => onSelect(node.path),
});
items.push({
id: "download",
label: "Download",
icon: "↓",
onClick: () => onDownload(node.path),
});
}
items.push({
id: "delete",
label: "Delete",
icon: "✕",
destructive: true,
disabled: !canDelete,
onClick: () => onDelete(node.path),
});
setMenu({ x: e.clientX, y: e.clientY, items });
};
// Single state lifted to the top-level tree; nested <FileTree>s
// (rendered for expanded directories below) do NOT instantiate
// their own menus or drop-targets — they call back via prop
// drilling. This keeps "only one menu open" + "only one drop
// target highlighted" as structural invariants rather than
// render-order coincidences.
const childCallbacks: TreeCallbacks = {
selectedPath,
onSelect,
onDelete,
onDownload,
canDelete,
onDropToTarget,
expandedDirs,
onToggleDir,
loadingDir,
};
return (
<div>
{nodes.map((node) => (
<TreeItem
key={`${node.path}:${node.isDir ? "dir" : "file"}`}
node={node}
openContextMenu={openContextMenu}
hoverDir={hoverDir}
setHoverDir={setHoverDir}
selectedPath={selectedPath}
onSelect={onSelect}
onDelete={onDelete}
expandedDirs={expandedDirs}
onToggleDir={onToggleDir}
loadingDir={loadingDir}
depth={depth}
{...childCallbacks}
/>
))}
{menu && (
<FileTreeContextMenu
x={menu.x}
y={menu.y}
items={menu.items}
onClose={() => setMenu(null)}
/>
)}
</div>
);
}
@ -133,81 +45,22 @@ function TreeItem({
selectedPath,
onSelect,
onDelete,
onDownload,
canDelete,
onDropToTarget,
expandedDirs,
onToggleDir,
loadingDir,
depth,
openContextMenu,
hoverDir,
setHoverDir,
}: TreeCallbacks & {
node: TreeNode;
depth: number;
openContextMenu: (e: React.MouseEvent, node: TreeNode) => void;
hoverDir: string | null;
setHoverDir: (p: string | null) => void;
}) {
}: TreeCallbacks & { node: TreeNode; depth: number }) {
const isSelected = selectedPath === node.path;
const expanded = expandedDirs.has(node.path);
const isLoading = loadingDir === node.path;
const isDropTarget = node.isDir && hoverDir === node.path;
// PR-D drag handlers — only directory rows are valid drop targets
// (dropping a file ON another file is ambiguous; treat it as
// dropping in the parent dir, which the root area handles). When a
// drag enters a directory row, mark it the hover target. When the
// cursor leaves to a non-child element, clear it. drop fires the
// upload callback with the row's path.
const dragProps = node.isDir && onDropToTarget
? {
onDragOver: (e: React.DragEvent) => {
// preventDefault is REQUIRED to opt this element into the
// drop target list — without it, browsers refuse to fire
// the drop event regardless of the drop handler.
e.preventDefault();
e.dataTransfer.dropEffect = "copy";
},
onDragEnter: (e: React.DragEvent) => {
e.preventDefault();
setHoverDir(node.path);
},
onDragLeave: (e: React.DragEvent) => {
// Only clear hover when leaving to an element OUTSIDE this
// row — bare leave-events fire for every child crossed
// (the icon, the label, the ✕ button). Without the
// contains() check the highlight flickers.
const next = e.relatedTarget as Node | null;
if (!next || !(e.currentTarget as HTMLElement).contains(next)) {
setHoverDir(null);
}
},
onDrop: (e: React.DragEvent) => {
e.preventDefault();
e.stopPropagation();
setHoverDir(null);
if (e.dataTransfer.items?.length) {
onDropToTarget(node.path, e.dataTransfer.items);
}
},
}
: {};
if (node.isDir) {
return (
<div>
<div
className={`group w-full flex items-center gap-1 px-2 py-0.5 text-left transition-colors cursor-pointer ${
isDropTarget
? "bg-accent/20 outline outline-1 outline-accent/60"
: "hover:bg-surface-card/40"
}`}
className="group w-full flex items-center gap-1 px-2 py-0.5 text-left hover:bg-surface-card/40 transition-colors cursor-pointer"
style={{ paddingLeft: `${depth * 12 + 8}px` }}
onClick={() => onToggleDir(node.path)}
onContextMenu={(e) => openContextMenu(e, node)}
{...dragProps}
>
<span className="text-[9px] text-ink-soft w-3">{isLoading ? "…" : expanded ? "▼" : "▶"}</span>
<span className="text-[10px]">📁</span>
@ -229,9 +82,6 @@ function TreeItem({
selectedPath={selectedPath}
onSelect={onSelect}
onDelete={onDelete}
onDownload={onDownload}
canDelete={canDelete}
onDropToTarget={onDropToTarget}
expandedDirs={expandedDirs}
onToggleDir={onToggleDir}
loadingDir={loadingDir}
@ -249,7 +99,6 @@ function TreeItem({
}`}
style={{ paddingLeft: `${depth * 12 + 20}px` }}
onClick={() => onSelect(node.path)}
onContextMenu={(e) => openContextMenu(e, node)}
>
<span className="text-[9px]">{getIcon(node.name, false)}</span>
<span className="text-[10px] flex-1 truncate font-mono">{node.name}</span>

View File

@ -1,141 +0,0 @@
"use client";
import { useEffect, useRef } from "react";
/**
* FileTreeContextMenu VSCode-style right-click menu for a single
* file-tree row. Pops at the cursor's viewport coords; dismisses on
* outside-click, Esc, blur, or scroll.
*
* Why a custom component (no library): the menu is one of several
* "small popovers" in canvas; pulling in a dnd / popover lib for one
* surface adds 10x the bytes of this implementation. The patterns
* (outside-click + Esc + portal-free fixed position) match the
* ContextMenu used in canvas/Toolbar so the keyboard-nav muscle
* memory is uniform.
*
* Items are rendered from a `MenuItem[]` so callers can add/remove
* actions without touching this component (e.g. PR-D will add an
* "Upload to this folder" item for directory rows).
*
* Accessibility:
* - role="menu" + role="menuitem" so screen readers announce the
* surface as a menu, not a generic div.
* - First item gets autofocus so keyboard users can //Enter without
* reaching for the mouse.
* - Esc + outside-click + Tab dismisses; behaves like every other
* menu the user has touched on the canvas.
*/
export interface MenuItem {
/** Stable identifier for testing + analytics. */
id: string;
label: string;
/** Optional left icon glyph; not load-bearing. */
icon?: string;
/** Destructive (rendered in red) — for Delete-class actions. */
destructive?: boolean;
/** Item-specific click handler. The menu auto-closes after onClick
* fires so handlers don't have to call onClose themselves. */
onClick: () => void;
/** Disabled items render but don't fire onClick (useful for
* Delete-on-non-/configs case where the caller wants to surface
* the item but explain it's gated). Currently unused placeholder
* for future options. */
disabled?: boolean;
}
interface Props {
/** Viewport-coordinate position of the cursor that opened the menu. */
x: number;
y: number;
items: MenuItem[];
onClose: () => void;
}
export function FileTreeContextMenu({ x, y, items, onClose }: Props) {
const ref = useRef<HTMLDivElement>(null);
// First item gets initial focus for keyboard ↓/↑/Enter nav.
const firstItemRef = useRef<HTMLButtonElement>(null);
useEffect(() => {
firstItemRef.current?.focus();
}, []);
// Outside-click + Esc dismiss. Per memory
// (feedback_abort_controller_for_rerendered_listeners), use an
// AbortController so re-mounts (caller toggles the menu) don't leak
// listeners.
useEffect(() => {
const ctrl = new AbortController();
const onPointerDown = (e: MouseEvent) => {
if (ref.current && !ref.current.contains(e.target as Node)) onClose();
};
const onKeyDown = (e: KeyboardEvent) => {
if (e.key === "Escape") {
e.preventDefault();
onClose();
} else if (e.key === "ArrowDown" || e.key === "ArrowUp") {
// Roving focus across .menuitem buttons. Doing this with
// tabindex management because Tab / Shift+Tab leave the menu
// (which is the right thing — the user is escaping the menu).
e.preventDefault();
const buttons = ref.current?.querySelectorAll<HTMLButtonElement>(
"[role='menuitem']:not([disabled])",
);
if (!buttons || buttons.length === 0) return;
const arr = Array.from(buttons);
const cur = arr.indexOf(document.activeElement as HTMLButtonElement);
const next =
e.key === "ArrowDown"
? (cur + 1) % arr.length
: (cur - 1 + arr.length) % arr.length;
arr[next].focus();
}
};
// `mousedown` (not `click`) so the menu dismisses BEFORE the
// tree-row's click handler would fire — otherwise clicking
// outside also selects a different row, which is not what the
// user expected when "outside-click closes the menu".
document.addEventListener("mousedown", onPointerDown, { signal: ctrl.signal });
document.addEventListener("keydown", onKeyDown, { signal: ctrl.signal });
// Scroll inside any ancestor also dismisses — the fixed-position
// menu would otherwise stay anchored to viewport coords while the
// row it points at scrolled away. Use capture so we catch scroll
// on inner panels (FileTree's overflow-y-auto wrapper).
document.addEventListener("scroll", onClose, { signal: ctrl.signal, capture: true });
return () => ctrl.abort();
}, [onClose]);
return (
<div
ref={ref}
role="menu"
aria-label="File actions"
className="fixed z-[1000] min-w-[140px] py-1 bg-surface-elevated border border-line/60 rounded-md shadow-xl shadow-black/30 text-[11px]"
style={{ left: x, top: y }}
>
{items.map((item, i) => (
<button
key={item.id}
ref={i === 0 ? firstItemRef : undefined}
type="button"
role="menuitem"
disabled={item.disabled}
onClick={() => {
if (item.disabled) return;
item.onClick();
onClose();
}}
className={
item.destructive
? "w-full text-left px-3 py-1 text-bad hover:bg-red-900/30 focus:bg-red-900/30 focus:outline-none disabled:opacity-40 disabled:pointer-events-none transition-colors"
: "w-full text-left px-3 py-1 text-ink-mid hover:bg-surface-card hover:text-ink focus:bg-surface-card focus:text-ink focus:outline-none disabled:opacity-40 disabled:pointer-events-none transition-colors"
}
>
{item.icon && <span className="inline-block w-4 mr-1.5 text-ink-soft">{item.icon}</span>}
{item.label}
</button>
))}
</div>
);
}

View File

@ -1,58 +0,0 @@
"use client";
/**
* NotAvailablePanel full-tab placeholder for runtimes whose filesystem
* the platform doesn't own (today: runtime === "external").
*
* Pre-fix the FilesTab tried to GET /workspaces/<id>/files for these
* workspaces. The platform answered with [] (no rows in workspace_files
* for an external workspace by definition), but the canvas rendered
* "0 files / No config files yet" which reads identically to the SaaS
* empty-listing bug fixed in PR-A. Showing an explicit placeholder
* makes the absence intentional and routes the user toward the
* supported surface (Chat) for these workspaces.
*
* Mirrors the same affordance TerminalTab adopted for runtimes without
* a TTY in PR #2830 uniform "feature-not-applicable" UX across tabs.
*/
export function NotAvailablePanel({ runtime }: { runtime: string }) {
return (
<div className="flex flex-col items-center justify-center h-full p-8 text-center bg-surface-sunken/30">
{/* Folder-with-slash icon. Custom inline SVG so we don't depend
on an icon set being present at canvas build-time (matches
TerminalTab's NotAvailablePanel pattern). */}
<svg
width="72"
height="72"
viewBox="0 0 72 72"
fill="none"
aria-hidden="true"
className="text-ink-soft mb-4"
>
{/* Folder body */}
<path
d="M10 22 L10 56 a4 4 0 0 0 4 4 L58 60 a4 4 0 0 0 4 -4 L62 26 a4 4 0 0 0 -4 -4 L34 22 L28 16 L14 16 a4 4 0 0 0 -4 4 Z"
stroke="currentColor"
strokeWidth="2.5"
strokeLinejoin="round"
fill="none"
opacity="0.6"
/>
{/* Diagonal cancel slash */}
<path
d="M14 14 L58 58"
stroke="currentColor"
strokeWidth="3"
strokeLinecap="round"
/>
</svg>
<h3 className="text-sm font-medium text-ink mb-1.5">Files not available</h3>
<p className="text-[11px] text-ink-soft max-w-xs leading-relaxed">
This workspace runs the{" "}
<span className="font-mono text-ink-mid">{runtime}</span> runtime,
whose filesystem isn't owned by the platform. Use the Chat tab to
interact with the agent directly.
</p>
</div>
);
}

View File

@ -1,136 +0,0 @@
// @vitest-environment jsdom
//
// Pins the right-click context menu added in PR-C of issue #2999.
// VSCode-style affordance: Open / Download / Delete on file rows,
// Delete on directory rows. Delete is gated by `canDelete` (parent
// only enables on /configs root, matching the toolbar's gate).
//
// Pinned branches:
// 1. Right-click on a file row opens the menu at the click coords
// with Open + Download + Delete items.
// 2. Right-click on a directory row opens the menu with Delete
// only (no Open/Download — directories don't have one-click
// semantics in this surface).
// 3. Clicking Download fires the onDownload callback with the
// row's path.
// 4. Clicking Delete fires onDelete with the row's path (when
// canDelete=true).
// 5. Delete is disabled in the rendered menu when canDelete=false
// and clicking it does NOT fire onDelete (gate is real).
// 6. Esc dismisses the menu.
// 7. Click outside the menu dismisses it.
import { describe, it, expect, vi, afterEach } from "vitest";
import { render, screen, cleanup, fireEvent, act } from "@testing-library/react";
import React from "react";
import { FileTree } from "../FileTree";
import type { TreeNode } from "../tree";
afterEach(cleanup);
const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [], size: 0 };
const dir: TreeNode = {
name: "skills",
path: "skills",
isDir: true,
children: [],
size: 0,
};
function renderTree(props: Partial<React.ComponentProps<typeof FileTree>> = {}) {
const defaults = {
nodes: [file, dir],
selectedPath: null,
onSelect: vi.fn(),
onDelete: vi.fn(),
onDownload: vi.fn(),
canDelete: true,
expandedDirs: new Set<string>(),
onToggleDir: vi.fn(),
loadingDir: null,
};
const merged = { ...defaults, ...props };
return { ...render(<FileTree {...merged} />), props: merged };
}
describe("FileTree right-click context menu", () => {
it("right-click on a file row opens menu with Open/Download/Delete", () => {
renderTree();
fireEvent.contextMenu(screen.getByText("config.yaml"), {
clientX: 50,
clientY: 100,
});
expect(screen.getByRole("menu")).not.toBeNull();
expect(screen.getByRole("menuitem", { name: /Open/i })).not.toBeNull();
expect(screen.getByRole("menuitem", { name: /Download/i })).not.toBeNull();
expect(screen.getByRole("menuitem", { name: /Delete/i })).not.toBeNull();
});
it("right-click on a directory row opens menu with Delete only (no Open/Download)", () => {
renderTree();
fireEvent.contextMenu(screen.getByText("skills"), { clientX: 60, clientY: 120 });
expect(screen.getByRole("menu")).not.toBeNull();
expect(screen.queryByRole("menuitem", { name: /Open/i })).toBeNull();
expect(screen.queryByRole("menuitem", { name: /Download/i })).toBeNull();
expect(screen.getByRole("menuitem", { name: /Delete/i })).not.toBeNull();
});
it("clicking Download fires onDownload with the row's path", () => {
const { props } = renderTree();
fireEvent.contextMenu(screen.getByText("config.yaml"), { clientX: 0, clientY: 0 });
fireEvent.click(screen.getByRole("menuitem", { name: /Download/i }));
expect(props.onDownload).toHaveBeenCalledWith("config.yaml");
// Menu auto-closes after click.
expect(screen.queryByRole("menu")).toBeNull();
});
it("clicking Delete fires onDelete with the row's path when canDelete=true", () => {
const { props } = renderTree({ canDelete: true });
fireEvent.contextMenu(screen.getByText("config.yaml"), { clientX: 0, clientY: 0 });
fireEvent.click(screen.getByRole("menuitem", { name: /Delete/i }));
expect(props.onDelete).toHaveBeenCalledWith("config.yaml");
});
it("Delete is disabled when canDelete=false; clicking does not fire onDelete", () => {
const { props } = renderTree({ canDelete: false });
fireEvent.contextMenu(screen.getByText("config.yaml"), { clientX: 0, clientY: 0 });
const del = screen.getByRole("menuitem", { name: /Delete/i }) as HTMLButtonElement;
expect(del.disabled).toBe(true);
fireEvent.click(del);
expect(props.onDelete).not.toHaveBeenCalled();
// Menu stays open on disabled click — same as VSCode (the user
// can read the disabled-state hint without losing the menu).
expect(screen.getByRole("menu")).not.toBeNull();
});
it("Esc dismisses the menu", () => {
renderTree();
fireEvent.contextMenu(screen.getByText("config.yaml"), { clientX: 0, clientY: 0 });
expect(screen.getByRole("menu")).not.toBeNull();
act(() => {
fireEvent.keyDown(document, { key: "Escape" });
});
expect(screen.queryByRole("menu")).toBeNull();
});
it("click outside the menu dismisses it", () => {
renderTree();
fireEvent.contextMenu(screen.getByText("config.yaml"), { clientX: 0, clientY: 0 });
expect(screen.getByRole("menu")).not.toBeNull();
// mousedown on document.body — outside the menu.
act(() => {
fireEvent.mouseDown(document.body);
});
expect(screen.queryByRole("menu")).toBeNull();
});
it("opening a second context menu replaces the first (only one open at a time)", () => {
renderTree();
fireEvent.contextMenu(screen.getByText("config.yaml"), { clientX: 10, clientY: 10 });
fireEvent.contextMenu(screen.getByText("skills"), { clientX: 20, clientY: 20 });
// Only one menu in the DOM. The second open replaced the first
// because the menu state is lifted to the FileTree, not per-row.
const menus = screen.getAllByRole("menu");
expect(menus.length).toBe(1);
});
});

View File

@ -1,212 +0,0 @@
// @vitest-environment jsdom
//
// Pins the drag-drop upload added in PR-D of issue #2999.
// Two layers of coverage:
//
// 1. The pure walker (collectFileEntries / walkEntry) — pins the
// recursion shape against silent folder truncation. Browsers
// return up to ~100 entries per readEntries() call; if the loop
// stops early, large folder uploads silently drop files. We
// simulate a multi-batch reader to discriminate.
//
// 2. FileTree directory-row drop handlers — pins that dragover/drop
// events fire onDropToTarget with the directory's path + the
// drop's DataTransferItemList.
import { describe, it, expect, vi, afterEach } from "vitest";
import { render, screen, cleanup, fireEvent } from "@testing-library/react";
import React from "react";
import { FileTree } from "../FileTree";
import type { TreeNode } from "../tree";
import { __testables } from "../useFilesApi";
afterEach(cleanup);
// ---- Walker tests ----
/**
* Build a fake FileSystemEntry tree we can hand to walkEntry. The
* shape mimics what webkitGetAsEntry returns from a real OS drag
* directory entries expose createReader, file entries expose file().
*/
function fakeFileEntry(name: string, content = "x"): {
isFile: true;
isDirectory: false;
name: string;
fullPath: string;
file: (cb: (f: File) => void) => void;
} {
return {
isFile: true,
isDirectory: false,
name,
fullPath: "/" + name,
file: (cb) => cb(new File([content], name, { type: "text/plain" })),
};
}
function fakeDirEntry(
name: string,
childBatches: ReturnType<typeof fakeFileEntry>[][],
): {
isFile: false;
isDirectory: true;
name: string;
fullPath: string;
createReader: () => { readEntries: (cb: (entries: unknown[]) => void) => void };
} {
let i = 0;
return {
isFile: false,
isDirectory: true,
name,
fullPath: "/" + name,
createReader: () => ({
readEntries: (cb) => {
// Mimic browser semantics: emit one batch per call, then
// an empty array to signal end-of-stream. A walker that
// calls readEntries only once would silently truncate at
// the first batch.
if (i < childBatches.length) {
cb(childBatches[i++]);
} else {
cb([]);
}
},
}),
};
}
describe("walkEntry — folder-recursion drop walker", () => {
it("collects a single dropped file", async () => {
const out: { file: File; relativePath: string }[] = [];
await __testables.walkEntry(fakeFileEntry("README.md") as never, "", out);
expect(out.length).toBe(1);
expect(out[0].relativePath).toBe("README.md");
expect(out[0].file.name).toBe("README.md");
});
it("walks a folder and preserves the relative path under the folder name", async () => {
const out: { file: File; relativePath: string }[] = [];
const folder = fakeDirEntry("skills", [
[fakeFileEntry("a.md"), fakeFileEntry("b.md")],
]);
await __testables.walkEntry(folder as never, "", out);
expect(out.map((e) => e.relativePath).sort()).toEqual([
"skills/a.md",
"skills/b.md",
]);
});
it("loops readEntries until empty so a multi-batch folder isn't truncated", async () => {
// Browsers limit each readEntries() call to ~100 entries. Our
// walker MUST call it again until an empty batch is returned.
// Fake reader emits two batches of 2 + an implicit empty → 4
// total. A buggy walker that only takes the first batch would
// see only 2.
const out: { file: File; relativePath: string }[] = [];
const folder = fakeDirEntry("big", [
[fakeFileEntry("1.txt"), fakeFileEntry("2.txt")],
[fakeFileEntry("3.txt"), fakeFileEntry("4.txt")],
]);
await __testables.walkEntry(folder as never, "", out);
expect(out.length).toBe(4);
});
it("walks nested directories and accumulates the full path", async () => {
const out: { file: File; relativePath: string }[] = [];
const inner = fakeDirEntry("web-search", [[fakeFileEntry("SKILL.md")]]);
// Outer dir whose first batch contains a sub-dir entry.
const outer = {
isFile: false,
isDirectory: true,
name: "skills",
fullPath: "/skills",
createReader: () => {
let i = 0;
return {
readEntries: (cb: (entries: unknown[]) => void) => {
if (i++ === 0) cb([inner]);
else cb([]);
},
};
},
};
await __testables.walkEntry(outer as never, "", out);
expect(out.length).toBe(1);
expect(out[0].relativePath).toBe("skills/web-search/SKILL.md");
});
});
// ---- FileTree drag-drop wiring ----
const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [], size: 0 };
const skillsDir: TreeNode = { name: "skills", path: "skills", isDir: true, children: [], size: 0 };
function renderTree(props: Partial<React.ComponentProps<typeof FileTree>> = {}) {
// PR-D test defaults must include PR-C's onDownload + canDelete now
// that they're required on the TreeCallbacks shape (the rebase
// surfaced this — the merged tree depends on both feature sets).
const defaults: React.ComponentProps<typeof FileTree> = {
nodes: [file, skillsDir],
selectedPath: null,
onSelect: vi.fn(),
onDelete: vi.fn(),
onDownload: vi.fn(),
canDelete: true,
onDropToTarget: vi.fn(),
expandedDirs: new Set<string>(),
onToggleDir: vi.fn(),
loadingDir: null,
};
const merged = { ...defaults, ...props };
return { ...render(<FileTree {...merged} />), props: merged };
}
describe("FileTree directory-row drag-drop", () => {
it("dragover on a directory row preventDefault's so the drop will fire", () => {
renderTree();
const row = screen.getByText("skills");
const dragOver = new Event("dragover", { bubbles: true, cancelable: true });
Object.defineProperty(dragOver, "dataTransfer", {
value: { dropEffect: "" },
});
row.parentElement!.dispatchEvent(dragOver);
// preventDefault registers via the React handler — without it
// the drop event would never fire, so this assertion is the
// load-bearing one.
expect(dragOver.defaultPrevented).toBe(true);
});
it("drop on a directory row fires onDropToTarget with that path + the items list", () => {
const { props } = renderTree();
const row = screen.getByText("skills").parentElement!;
const fakeItems = { length: 1, 0: { kind: "file" } } as unknown as DataTransferItemList;
fireEvent.drop(row, { dataTransfer: { items: fakeItems } });
expect(props.onDropToTarget).toHaveBeenCalledWith("skills", fakeItems);
});
it("drop on a FILE row does NOT fire onDropToTarget (only directories are valid targets)", () => {
const { props } = renderTree();
const fileRow = screen.getByText("config.yaml").parentElement!;
const fakeItems = { length: 1, 0: { kind: "file" } } as unknown as DataTransferItemList;
fireEvent.drop(fileRow, { dataTransfer: { items: fakeItems } });
expect(props.onDropToTarget).not.toHaveBeenCalled();
});
it("drop with no DataTransferItems does NOT fire onDropToTarget", () => {
const { props } = renderTree();
const row = screen.getByText("skills").parentElement!;
fireEvent.drop(row, { dataTransfer: { items: { length: 0 } } });
expect(props.onDropToTarget).not.toHaveBeenCalled();
});
it("dragenter sets the drop-target highlight on the directory row", () => {
renderTree();
const row = screen.getByText("skills").parentElement!;
fireEvent.dragEnter(row, { dataTransfer: {} });
// Highlight class is the discriminator — without dragenter
// wiring the row stays in its hover-only style.
expect(row.className).toMatch(/bg-accent|outline-accent/);
});
});

View File

@ -90,43 +90,6 @@ export function useFilesApi(workspaceId: string, root: string) {
[workspaceId]
);
/**
* Fetch a file's content from the server and trigger a browser
* download. Used by the right-click "Download" context-menu item
* (PR-C of issue #2999) distinct from `handleDownloadFile` in
* FilesTab which downloads the CURRENTLY-OPEN-IN-EDITOR file from
* the in-memory `editContent` buffer (so unsaved edits round-trip
* to disk). This helper downloads the on-server content, suitable
* for arbitrary tree rows the user hasn't opened.
*/
const downloadFileByPath = useCallback(
async (path: string) => {
try {
const res = await api.get<{ content: string }>(
`/workspaces/${workspaceId}/files/${path}?root=${encodeURIComponent(root)}`,
);
// text/plain is correct for the canvas's text-only file
// surface (config.yaml, prompts, skill markdown). Binary
// files would need an Accept-arraybuffer path; the API
// returns string today so this matches the wire shape.
const blob = new Blob([res.content], { type: "text/plain" });
const url = URL.createObjectURL(blob);
const a = document.createElement("a");
a.href = url;
a.download = path.split("/").pop() || "file";
a.click();
URL.revokeObjectURL(url);
showToast(`Downloaded ${a.download}`, "success");
} catch (e) {
showToast(
`Download failed: ${e instanceof Error ? e.message : "unknown error"}`,
"error",
);
}
},
[workspaceId, root],
);
const downloadAllFiles = useCallback(async () => {
const fileEntries = files.filter((f) => !f.dir);
const results = await Promise.allSettled(
@ -151,20 +114,16 @@ export function useFilesApi(workspaceId: string, root: string) {
}, [files, workspaceId]);
const uploadFiles = useCallback(
async (fileList: FileList, targetDir = "") => {
async (fileList: FileList) => {
let uploaded = 0;
for (const file of Array.from(fileList)) {
const path = file.webkitRelativePath || file.name;
const parts = path.split("/");
// For folder picker: webkitRelativePath is "<picked-folder>/a/b.txt"
// — strip the picked-folder prefix so files land flat under the
// workspace's target dir, not under a redundant outer folder.
const relPath = parts.length > 1 ? parts.slice(1).join("/") : parts[0];
const finalPath = targetDir ? `${targetDir}/${relPath}` : relPath;
if (file.size > 1_000_000) continue;
try {
const content = await file.text();
await api.put(`/workspaces/${workspaceId}/files/${finalPath}`, { content });
await api.put(`/workspaces/${workspaceId}/files/${relPath}`, { content });
uploaded++;
} catch {
/* skip binary */
@ -172,7 +131,7 @@ export function useFilesApi(workspaceId: string, root: string) {
}
if (uploaded > 0) {
useCanvasStore.getState().updateNodeData(workspaceId, { needsRestart: true });
showToast(`Uploaded ${uploaded} files${targetDir ? ` to ${targetDir}` : ""}`, "success");
showToast(`Uploaded ${uploaded} files`, "success");
loadFiles();
}
return uploaded;
@ -180,58 +139,6 @@ export function useFilesApi(workspaceId: string, root: string) {
[workspaceId, loadFiles]
);
/**
* Upload files dragged from the OS via the HTML5 DataTransferItemList
* API. Unlike the folder-picker path (uploadFiles), this preserves
* the dropped folder structure under `targetDir` drag a "skills/"
* folder onto the /configs/skills row and you get
* /configs/skills/skills/* (the OUTER folder name is preserved
* because the user explicitly chose to drop a NAMED folder, unlike
* the folder-picker which always wraps the picked dir).
*
* Walks FileSystemDirectoryEntry recursively via webkitGetAsEntry.
* VSCode/JupyterLab use the same primitive there's no other
* portable browser API for "drag a folder from OS". `webkit*`
* naming is a Chromium relic; Firefox + Safari implement the same
* surface.
*
* Returns the number of files uploaded so the caller can show a
* tally / fail toast.
*/
const uploadDataTransferItems = useCallback(
async (items: DataTransferItemList, targetDir = "") => {
const fileEntries = collectFileEntries(items);
let uploaded = 0;
for (const { file, relativePath } of await fileEntries) {
if (file.size > 1_000_000) continue;
const finalPath = targetDir
? `${targetDir}/${relativePath}`
: relativePath;
try {
const content = await file.text();
await api.put(`/workspaces/${workspaceId}/files/${finalPath}`, {
content,
});
uploaded++;
} catch {
/* skip binary */
}
}
if (uploaded > 0) {
useCanvasStore
.getState()
.updateNodeData(workspaceId, { needsRestart: true });
showToast(
`Uploaded ${uploaded} file${uploaded === 1 ? "" : "s"}${targetDir ? ` to ${targetDir}` : ""}`,
"success",
);
loadFiles();
}
return uploaded;
},
[workspaceId, loadFiles],
);
const deleteAllFiles = useCallback(async () => {
let deleted = 0;
for (const f of files) {
@ -258,98 +165,8 @@ export function useFilesApi(workspaceId: string, root: string) {
readFile,
writeFile,
deleteFile,
downloadFileByPath,
downloadAllFiles,
uploadFiles,
uploadDataTransferItems,
deleteAllFiles,
};
}
// ----- DataTransfer entry walker (PR-D) ---------------------------------
/**
* Minimal subset of the FileSystem Entry API surface we use. The DOM
* lib types this as FileSystemEntry / FileSystemFileEntry /
* FileSystemDirectoryEntry but the relevant methods are callback-
* based. Keep the shape narrow + explicit so the recursion below
* type-checks without pulling in the full DOM lib types.
*/
interface FSEntry {
isFile: boolean;
isDirectory: boolean;
name: string;
fullPath: string;
file?(success: (f: File) => void, fail?: (e: unknown) => void): void;
createReader?(): { readEntries(success: (entries: FSEntry[]) => void): void };
}
interface CollectedEntry {
file: File;
/** Path relative to the dropped root (e.g. "skills/web-search/SKILL.md"
* for a dropped "skills/" folder containing web-search/SKILL.md). */
relativePath: string;
}
/**
* Walk a DataTransferItemList, returning every file entry as a flat
* array keyed by the path relative to the originally-dropped item.
* Folders dropped from the OS expand recursively; loose files
* passthrough with name as the relative path.
*
* Skips items where webkitGetAsEntry() returns null that's how
* the browser signals a non-file payload (e.g. a dragged URL or
* text snippet).
*/
async function collectFileEntries(
items: DataTransferItemList,
): Promise<CollectedEntry[]> {
const out: CollectedEntry[] = [];
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (item.kind !== "file") continue;
// webkitGetAsEntry is the standardised name; older Firefox used
// getAsEntry. Both Chromium + Firefox + Safari ship the webkit-
// prefixed variant today. There's no non-prefixed alternative.
const entry = (item as DataTransferItem & {
webkitGetAsEntry?: () => FSEntry | null;
}).webkitGetAsEntry?.();
if (!entry) continue;
await walkEntry(entry, "", out);
}
return out;
}
async function walkEntry(
entry: FSEntry,
prefix: string,
out: CollectedEntry[],
): Promise<void> {
const name = entry.name;
const relPath = prefix ? `${prefix}/${name}` : name;
if (entry.isFile && entry.file) {
const file = await new Promise<File>((resolve, reject) => {
entry.file!(resolve, reject);
});
out.push({ file, relativePath: relPath });
return;
}
if (entry.isDirectory && entry.createReader) {
const reader = entry.createReader();
// readEntries returns up to ~100 at a time on Chromium; loop
// until empty so large folders aren't truncated.
let batch: FSEntry[] = [];
do {
batch = await new Promise<FSEntry[]>((resolve) =>
reader.readEntries(resolve),
);
for (const child of batch) {
await walkEntry(child, relPath, out);
}
} while (batch.length > 0);
}
}
// Exported for direct testing — the recursion + readEntries batching
// is the part most likely to silently truncate a real folder upload.
export const __testables = { collectFileEntries, walkEntry };

View File

@ -297,49 +297,10 @@ export function SkillsTab({ workspaceId, data }: Props) {
}
};
// Compact-empty pattern: when the workspace has zero plugins
// installed AND the registry isn't open, collapse the whole
// "Plugins" section into a single inline pill rather than rendering
// the full panel chrome. Reported on production 2026-05-05 (#2971):
// the empty state's panel-with-zero-list-rows layout gives the user
// a lot of vertical real estate for content that's just "0
// installed + Install button". The compact form keeps that
// affordance without the chrome.
//
// Expanded/full layout still fires when installed.length > 0 OR
// when the user opens the registry (clicked "+ Install Plugin").
// Once a plugin is installed the section auto-expands to surface
// the list.
const compactEmpty = installed.length === 0 && !showRegistry && installedLoaded;
if (compactEmpty) {
return (
<div className="p-4 space-y-4">
<div
className="flex items-center justify-between gap-2 rounded-full border border-line/60 bg-surface-sunken/70 px-3 py-1.5"
aria-label="Plugins (none installed)"
>
<div className="flex items-center gap-2">
<span className="text-[10px] uppercase tracking-[0.2em] text-ink-soft">Plugins</span>
<span className="text-[11px] text-ink-mid">0 installed</span>
</div>
<button
onClick={() => setShowRegistry(true)}
className="rounded-full border border-violet-700/50 bg-violet-950/30 px-3 py-0.5 text-[10px] text-violet-200 hover:bg-violet-900/40 transition-colors"
aria-expanded="false"
aria-controls="plugins-section"
>
+ Install Plugin
</button>
</div>
</div>
);
}
return (
<div className="p-4 space-y-4">
{/* Plugins section */}
<div id="plugins-section" className="rounded-xl border border-line bg-surface-sunken/70 p-3">
<div className="rounded-xl border border-line bg-surface-sunken/70 p-3">
<div className="flex items-center justify-between gap-3">
<div>
<div className="text-[10px] uppercase tracking-[0.22em] text-ink-soft">Plugins</div>
@ -350,8 +311,6 @@ export function SkillsTab({ workspaceId, data }: Props) {
<button
onClick={() => setShowRegistry(!showRegistry)}
className="rounded-full border border-violet-700/50 bg-violet-950/30 px-3 py-1 text-[10px] text-violet-200 hover:bg-violet-900/40 transition-colors"
aria-expanded={showRegistry}
aria-controls="plugins-registry"
>
{showRegistry ? "Hide Registry" : "+ Install Plugin"}
</button>

View File

@ -1,11 +1,13 @@
// @vitest-environment jsdom
//
// Pins the lazy-loading chat-history pagination.
// Pins the lazy-loading chat-history pagination added 2026-05-05.
//
// PR-C-2 (RFC #2945): canvas was migrated from /activity?type=a2a_receive
// to /chat-history. Server now returns typed ChatMessage[] in
// display-ready oldest-first order. These tests guard the canvas-side
// pagination invariants against the new endpoint surface.
// Pre-fix: ChatTab fetched the newest 50 messages on every mount and
// scrolled to bottom, paying full DOM cost up-front even when the user
// only wanted to read the last few bubbles. Post-fix: initial load is
// bounded to 10 newest, and an IntersectionObserver on a top sentinel
// triggers loadOlder() (batch of 20 with `before_ts` cursor) when the
// user scrolls up.
//
// Pinned branches:
// 1. Initial fetch carries `limit=10` and NO before_ts (newest-first
@ -18,10 +20,11 @@
// asserting the rendered bubble count matches the full page).
// 4. The retry button after a failed initial load uses the same
// INITIAL_HISTORY_LIMIT (10), not the legacy 50.
// 5. before_ts cursor is the OLDEST timestamp from the current page,
// passed verbatim to walk backward.
// 6. Inflight guard rejects duplicate IO triggers while a loadOlder
// fetch is in flight.
//
// IntersectionObserver / scroll-anchor restoration is exercised by the
// E2E synth-canary suite — pinning it in jsdom would require mocking
// the observer and faking layout, which is brittler than trusting a
// live-DOM canary against the staging tenant.
import { describe, it, expect, vi, afterEach, beforeEach } from "vitest";
import { render, screen, cleanup, waitFor, fireEvent } from "@testing-library/react";
@ -30,31 +33,24 @@ import React from "react";
afterEach(cleanup);
// Both ChatTab sub-panels (MyChat + AgentComms) mount simultaneously so
// keyboard tab order and aria-controls land on a real DOM. MyChat's
// loadMessagesFromDB hits /chat-history; AgentComms's polling hits a
// different URL. Route the mock by URL so each gets a sensible default
// and only MyChat's calls land in the assertion array.
const myChatHistoryCalls: string[] = [];
let myChatNextResponse:
| { ok: true; messages: unknown[]; reachedEnd?: boolean }
| { ok: false; err: Error } = { ok: true, messages: [] };
// keyboard tab order and aria-controls land on a real DOM. Both fire
// /activity GETs on mount: MyChat's hits `type=a2a_receive&source=canvas`,
// AgentComms's hits a different filter. Route the mock by URL so each
// gets a sensible default and only MyChat's call is what the assertions
// scrutinise.
const myChatActivityCalls: string[] = [];
let myChatNextResponse: { ok: true; rows: unknown[] } | { ok: false; err: Error } = {
ok: true,
rows: [],
};
const apiGet = vi.fn((path: string): Promise<unknown> => {
if (path.includes("/chat-history")) {
myChatHistoryCalls.push(path);
if (myChatNextResponse.ok) {
const reached_end =
myChatNextResponse.reachedEnd !== undefined
? myChatNextResponse.reachedEnd
: myChatNextResponse.messages.length < 10;
return Promise.resolve({
messages: myChatNextResponse.messages,
reached_end,
});
}
if (path.includes("type=a2a_receive") && path.includes("source=canvas")) {
myChatActivityCalls.push(path);
if (myChatNextResponse.ok) return Promise.resolve(myChatNextResponse.rows);
return Promise.reject(myChatNextResponse.err);
}
// AgentComms / heartbeat / anything else — empty array safe default.
// AgentComms / heartbeat / anything else — empty array is a safe
// default that won't blow up the corresponding component's .then().
return Promise.resolve([]);
});
const apiPost = vi.fn();
@ -88,8 +84,8 @@ const ioInstances: IOInstance[] = [];
beforeEach(() => {
apiGet.mockClear();
apiPost.mockReset();
myChatHistoryCalls.length = 0;
myChatNextResponse = { ok: true, messages: [] };
myChatActivityCalls.length = 0;
myChatNextResponse = { ok: true, rows: [] };
ioInstances.length = 0;
class FakeIO {
private inst: IOInstance;
@ -105,12 +101,20 @@ beforeEach(() => {
this.inst.disconnected = true;
}
}
// Install on every reachable global — different bundlers / module
// graphs can resolve `IntersectionObserver` via `window`, `globalThis`,
// or the bare global. Without all three, jsdom's own (pre-existing)
// stub silently wins and ioInstances stays empty.
(window as unknown as { IntersectionObserver: unknown }).IntersectionObserver = FakeIO;
(globalThis as unknown as { IntersectionObserver: unknown }).IntersectionObserver = FakeIO;
// jsdom doesn't implement scrollIntoView; ChatTab calls it after every
// messages update.
Element.prototype.scrollIntoView = vi.fn();
});
function triggerIntersection(instanceIdx = -1) {
// -1 → the latest observer (the live one). Tests targeting an old
// (disconnected) instance pass a positive index.
const inst = ioInstances.at(instanceIdx);
if (!inst) throw new Error(`no IO instance at ${instanceIdx}`);
inst.callback(
@ -121,30 +125,25 @@ function triggerIntersection(instanceIdx = -1) {
import { ChatTab } from "../ChatTab";
// makeMessagePair returns a (user, agent) pair sharing a timestamp,
// matching the wire shape /chat-history emits per activity_logs row.
// Server-side reverseRowChunks ensures the wire is oldest-first across
// rows but [user, agent] within each row.
function makeMessagePair(seq: number): unknown[] {
// Zero-pad seq into the minute slot so seq=10 produces a valid
// timestamp (00:10:00Z, not 00:010:00Z).
function makeActivityRow(seq: number): Record<string, unknown> {
// Zero-pad seq into the minute slot so "seq=10" doesn't produce
// the invalid timestamp "00:010:00Z" (caught by the loadOlder URL
// assertion below — first version of the helper used `0${seq}` and
// the test failed on `before_ts` having an extra digit).
const mm = String(seq).padStart(2, "0");
const ts = `2026-05-05T00:${mm}:00Z`;
return [
{ id: `u-${seq}`, role: "user", content: `user msg ${seq}`, timestamp: ts },
{ id: `a-${seq}`, role: "agent", content: `agent reply ${seq}`, timestamp: ts },
];
return {
activity_type: "a2a_receive",
status: "ok",
created_at: `2026-05-05T00:${mm}:00Z`,
request_body: { params: { message: { parts: [{ kind: "text", text: `user msg ${seq}` }] } } },
response_body: { result: `agent reply ${seq}` },
};
}
// pageOldestFirst builds a wire-shape page (oldest-first within page)
// of `count` row-pairs starting at seq=`start`. Mirrors the server's
// post-reverseRowChunks emission order.
function pageOldestFirst(start: number, count: number): unknown[] {
const out: unknown[] = [];
for (let i = 0; i < count; i++) {
out.push(...makeMessagePair(start + i));
}
return out;
// Server returns newest-first; the helper builds a server-shape page
// so the order in the rendered messages array matches production.
function newestFirstPage(start: number, count: number): unknown[] {
return Array.from({ length: count }, (_, i) => makeActivityRow(start + count - 1 - i));
}
const minimalData = {
@ -154,30 +153,28 @@ const minimalData = {
} as unknown as Parameters<typeof ChatTab>[0]["data"];
describe("ChatTab lazy history pagination", () => {
it("initial fetch carries limit=10 (not the legacy 50) and hits /chat-history", async () => {
myChatNextResponse = { ok: true, messages: makeMessagePair(1) };
it("initial fetch carries limit=10 (not the legacy 50)", async () => {
myChatNextResponse = { ok: true, rows: [makeActivityRow(1)] };
render(<ChatTab workspaceId="ws-1" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
const url = myChatHistoryCalls[0];
expect(url).toContain("/chat-history");
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
const url = myChatActivityCalls[0];
expect(url).toContain("limit=10");
expect(url).not.toContain("limit=50");
// before_ts should NOT be set on the initial fetch — that's the
// newest-first slice the user lands on.
expect(url).not.toContain("before_ts");
// /chat-history filters source-canvas server-side; client should
// NOT pass type/source params (they belonged to /activity).
expect(url).not.toContain("type=a2a_receive");
expect(url).not.toContain("source=canvas");
});
it("hides the top sentinel when initial fetch returns fewer than the limit", async () => {
// 3 < 10 → server says "no more older history exists"; sentinel
// should NOT mount and the "Loading older messages…" line should
// never appear.
myChatNextResponse = { ok: true, messages: pageOldestFirst(1, 3) };
// never appear (it can't, since the sentinel is what triggers it).
myChatNextResponse = {
ok: true,
rows: [makeActivityRow(1), makeActivityRow(2), makeActivityRow(3)],
};
render(<ChatTab workspaceId="ws-2" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
await waitFor(() => {
expect(screen.queryByText(/Loading chat history/i)).toBeNull();
});
@ -185,15 +182,15 @@ describe("ChatTab lazy history pagination", () => {
});
it("renders all messages when initial fetch returns exactly the limit", async () => {
// limit=10 row-pairs → 20 ChatMessages. reachedEnd should be FALSE
// so the sentinel mounts. Verified by bubble counts.
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(1, 10),
reachedEnd: false,
};
// 10 == limit → server might have more older rows; sentinel SHOULD
// mount so the IO observer can fire loadOlder() on scroll-up. We
// verify by checking the rendered bubble count — if hasMore stayed
// true the sentinel render path doesn't crash and all 10 rows
// produced their pair of bubbles.
const fullPage = Array.from({ length: 10 }, (_, i) => makeActivityRow(i + 1));
myChatNextResponse = { ok: true, rows: fullPage };
render(<ChatTab workspaceId="ws-3" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
await waitFor(() => {
expect(screen.queryByText(/Loading chat history/i)).toBeNull();
});
@ -205,67 +202,54 @@ describe("ChatTab lazy history pagination", () => {
myChatNextResponse = { ok: false, err: new Error("network down") };
render(<ChatTab workspaceId="ws-4" data={minimalData} />);
const retry = await screen.findByText(/Retry/);
myChatNextResponse = { ok: true, messages: makeMessagePair(1) };
myChatNextResponse = { ok: true, rows: [makeActivityRow(1)] };
fireEvent.click(retry);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
const retryUrl = myChatHistoryCalls[1];
expect(retryUrl).toContain("/chat-history");
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
const retryUrl = myChatActivityCalls[1];
expect(retryUrl).toContain("limit=10");
expect(retryUrl).not.toContain("limit=50");
});
it("loadOlder fetches limit=20 with before_ts=oldest.timestamp", async () => {
// Initial page = 10 row-pairs in oldest-first order (seq 1..10).
// The oldest (and so the cursor for loadOlder) is seq=1's
// timestamp 2026-05-05T00:01:00Z.
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(1, 10),
reachedEnd: false,
};
// Initial page = 10 rows in newest-first order (seq 10..1). After
// the component reverses to oldest-first for display, messages[0]
// is built from seq=1 — the oldest — and its timestamp is what
// before_ts should carry.
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
render(<ChatTab workspaceId="ws-load-older" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
// Stage older-batch response, then fire IO callback.
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(0, 1),
reachedEnd: true,
};
// Stage the older-batch response, then fire the IO callback.
myChatNextResponse = { ok: true, rows: newestFirstPage(0, 1) };
triggerIntersection();
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
const olderUrl = myChatHistoryCalls[1];
expect(olderUrl).toContain("/chat-history");
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
const olderUrl = myChatActivityCalls[1];
expect(olderUrl).toContain("limit=20");
expect(olderUrl).toContain("before_ts=");
expect(decodeURIComponent(olderUrl)).toContain("before_ts=2026-05-05T00:01:00Z");
});
it("inflight guard rejects a second IO trigger while first loadOlder is in flight", async () => {
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(1, 10),
reachedEnd: false,
};
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
render(<ChatTab workspaceId="ws-inflight" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
// Hold the next loadOlder fetch open with a manual deferred so we
// can fire the second trigger while the first is in-flight.
let release!: (resp: unknown) => void;
const deferred = new Promise<unknown>((res) => {
let release!: (rows: unknown[]) => void;
const deferred = new Promise<unknown[]>((res) => {
release = res;
});
apiGet.mockImplementationOnce((path: string): Promise<unknown> => {
myChatHistoryCalls.push(path);
myChatActivityCalls.push(path);
return deferred;
});
triggerIntersection(); // start loadOlder #1
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
// Second IO trigger lands while #1 is still pending.
triggerIntersection();
@ -274,62 +258,79 @@ describe("ChatTab lazy history pagination", () => {
// Without the inflight guard, each of these would have started a
// new fetch. With the guard, none of them do — call count stays 2.
await new Promise((r) => setTimeout(r, 10));
expect(myChatHistoryCalls.length).toBe(2);
expect(myChatActivityCalls.length).toBe(2);
// Release the first fetch with a valid wire response shape.
release({ messages: [], reached_end: true });
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
// Release the first fetch. Inflight clears in the finally block;
// a subsequent IO trigger is permitted again (verified by checking
// we can fire a follow-up after release without hanging the test).
release([]);
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
});
it("empty older response clears the scroll anchor and unmounts the sentinel", async () => {
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(1, 10),
reachedEnd: false,
};
// The bug we're pinning: if loadOlder returns 0 rows, the
// scrollAnchorRef must be cleared so the next paint doesn't try to
// restore against a no-op prepend (which would fight the natural
// bottom-pin for any subsequent live message). hasMore flipping to
// false is the same flag-flip path; sentinel disappearing is the
// observable proxy.
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
render(<ChatTab workspaceId="ws-anchor" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
myChatNextResponse = {
ok: true,
messages: [],
reachedEnd: true,
};
myChatNextResponse = { ok: true, rows: [] }; // empty → reachedEnd
triggerIntersection();
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
// After reachedEnd the sentinel unmounts (hasMore=false). We can't
// peek scrollAnchorRef directly, but we can assert the consequence:
// scrollIntoView (the bottom-pin for live appends) is not blocked
// by a stale anchor. Trigger a re-render via an unrelated state
// change… in practice the safest assertion here is that the
// sentinel disappeared (proving the empty response propagated to
// hasMore correctly, which is the same flag-flip path as anchor
// clearing).
await waitFor(() => {
expect(screen.queryByText(/Loading older messages/i)).toBeNull();
});
});
it("IntersectionObserver does not churn when older messages prepend", async () => {
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(1, 10),
reachedEnd: false,
};
// Whole-PR perf invariant: prepending older history (the load-bearing
// user gesture) must NOT tear down + re-arm the IO observer.
// Triggering loadOlder is the cleanest way to drive a messages
// mutation from inside the test, since live agent push goes through
// a Zustand store that's harder to drive reliably from jsdom.
//
// Pre-fix, loadOlder depended on `messages`, so every prepend
// recreated loadOlder → re-ran the IO effect → new observer. Each
// call to triggerIntersection() produced a fresh disconnected
// observer + a new live one. Post-fix, the observer survives.
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
render(<ChatTab workspaceId="ws-stable-io" data={minimalData} />);
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
// Snapshot the observer instance after first paint stabilises.
const observerBefore = ioInstances.at(-1);
expect(observerBefore).toBeDefined();
expect(observerBefore!.disconnected).toBe(false);
// Trigger three older-batch prepends. Each batch returns the full
// OLDER_HISTORY_BATCH (20 row-pairs = 40 messages) so reachedEnd
// stays false and the sentinel keeps mounting.
// OLDER_HISTORY_BATCH (20 rows) so reachedEnd stays false and the
// sentinel keeps mounting. Pre-fix, each prepend mutated `messages`
// → recreated loadOlder → re-ran the IO effect → new observer.
for (let batch = 0; batch < 3; batch++) {
myChatNextResponse = {
ok: true,
messages: pageOldestFirst(-(batch + 1) * 20, 20),
reachedEnd: false,
rows: newestFirstPage(-(batch + 1) * 20, 20),
};
const callsBefore = myChatHistoryCalls.length;
const callsBefore = myChatActivityCalls.length;
triggerIntersection();
await waitFor(() => expect(myChatHistoryCalls.length).toBe(callsBefore + 1));
await waitFor(() =>
expect(myChatActivityCalls.length).toBe(callsBefore + 1),
);
}
// The original observer is still the live one — no churn.

View File

@ -1,119 +0,0 @@
// @vitest-environment jsdom
//
// Pins the "Files not available" early-return for runtimes whose
// filesystem the platform doesn't own (today: runtime === "external").
//
// Pre-fix: FilesTab issued a GET /workspaces/<id>/files for every
// workspace. The platform's response for an external workspace is
// always [] (no rows in workspace_files), but the canvas rendered
// "0 files / No config files yet" — visually identical to the SaaS
// empty-listing bug fixed in PR-A. The placeholder makes the absence
// intentional.
//
// Pinned branches:
// 1. external runtime → "Files not available" banner renders,
// runtime name surfaces in the body so user knows WHY.
// 2. external runtime → useFilesApi is NOT invoked. Verified by
// asserting the mocked api.get was never called.
// 3. claude-code (or any other runtime) → no banner, normal mount
// proceeds (`/configs` toolbar visible). Pre-fix regression cover.
// 4. data prop omitted (legacy callers) → no early-return, falls
// through to normal mount.
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { render, screen, cleanup, waitFor } from "@testing-library/react";
import React from "react";
afterEach(cleanup);
// Mock the api module so the normal-mount branches don't try to
// fetch against a real backend — and so we can assert the
// external-runtime branch never fires a request.
const apiCalls: string[] = [];
vi.mock("@/lib/api", () => ({
api: {
get: vi.fn((path: string) => {
apiCalls.push(path);
return Promise.resolve([]);
}),
put: vi.fn(() => Promise.resolve()),
del: vi.fn(() => Promise.resolve()),
},
}));
// useCanvasStore is referenced by useFilesApi for the needsRestart
// flag. The Toaster import inside FilesTab also pulls the store
// indirectly. Stub minimally to satisfy the import chain.
vi.mock("@/store/canvas", async () => {
const actual = await vi.importActual<typeof import("@/store/canvas")>(
"@/store/canvas",
);
return {
...actual,
useCanvasStore: {
getState: () => ({
updateNodeData: vi.fn(),
}),
},
};
});
vi.mock("../Toaster", () => ({
showToast: vi.fn(),
}));
beforeEach(() => {
apiCalls.length = 0;
});
import { FilesTab } from "../FilesTab";
const externalData = { runtime: "external", status: "online" } as unknown as Parameters<
typeof FilesTab
>[0]["data"];
const claudeData = { runtime: "claude-code", status: "online" } as unknown as Parameters<
typeof FilesTab
>[0]["data"];
describe("FilesTab not-available early-return for runtimes without platform-owned filesystem", () => {
it("external runtime renders the not-available banner with runtime name", () => {
render(<FilesTab workspaceId="ws-ext" data={externalData} />);
expect(screen.getByText(/Files not available/i)).not.toBeNull();
// Runtime name must surface so the user understands WHY — without
// it the placeholder reads as a generic error.
expect(screen.getByText(/external/)).not.toBeNull();
// Chat tab is the recommended alternative — flagged in copy so the
// user knows where to go next instead of bouncing tabs.
expect(screen.getByText(/Chat tab/i)).not.toBeNull();
});
it("external runtime does NOT issue any /files API call", async () => {
render(<FilesTab workspaceId="ws-ext" data={externalData} />);
// Tolerate one microtask boundary in case useEffect schedules.
await new Promise((r) => setTimeout(r, 0));
const filesCalls = apiCalls.filter((p) => p.includes("/files"));
expect(filesCalls).toEqual([]);
});
it("claude-code runtime does NOT render the banner (normal mount)", async () => {
render(<FilesTab workspaceId="ws-claude" data={claudeData} />);
// The normal-mount path renders the FilesToolbar with the root
// selector. Wait for it (useEffect → loadFiles → setLoading false).
await waitFor(() => {
expect(screen.queryByText(/Files not available/i)).toBeNull();
});
// Toolbar's root selector confirms we're on the platform-owned
// rendering path, not the placeholder.
expect(screen.getByLabelText(/File root directory/i)).not.toBeNull();
});
it("data prop omitted falls through to normal mount (back-compat)", async () => {
render(<FilesTab workspaceId="ws-no-data" />);
await waitFor(() => {
expect(screen.queryByText(/Files not available/i)).toBeNull();
});
// Without data we can't gate on runtime — must mount normally.
expect(screen.getByLabelText(/File root directory/i)).not.toBeNull();
});
});

View File

@ -1,141 +0,0 @@
// @vitest-environment jsdom
//
// Pins the compact-when-empty layout for the SkillsTab Plugins section
// (issue #2971, reported on production 2026-05-05).
//
// Three states matter for layout:
// 1. installed.length === 0 + registry closed + load completed → COMPACT pill
// 2. installed.length > 0 → FULL panel + installed list
// 3. registry open (showRegistry=true) → FULL panel + registry browser
//
// The compact-empty path is the new behavior; the other two were
// pre-existing. This test pins all three so a future refactor that
// over-collapses (showing compact when plugins are installed) or
// over-expands (showing full panel on empty load) fails loudly.
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { render, screen, cleanup, fireEvent, waitFor } from "@testing-library/react";
import React from "react";
afterEach(cleanup);
const apiGet = vi.fn();
vi.mock("@/lib/api", () => ({
api: {
get: (path: string, opts?: unknown) => apiGet(path, opts),
post: vi.fn(() => Promise.resolve({})),
del: vi.fn(),
patch: vi.fn(),
put: vi.fn(),
},
}));
beforeEach(() => {
apiGet.mockReset();
Element.prototype.scrollIntoView = vi.fn();
});
import { SkillsTab } from "../SkillsTab";
const minimalData = {
status: "online" as const,
runtime: "claude-code",
currentTask: "",
agentCard: undefined,
} as unknown as Parameters<typeof SkillsTab>[0]["data"];
describe("SkillsTab Plugins compact-empty layout", () => {
it("renders compact pill when installed.length === 0 and registry closed", async () => {
// Both fetches return empty arrays — workspace is fresh, no plugins.
apiGet.mockImplementation((path: string) => {
if (path.endsWith("/plugins") || path === "/plugins" || path === "/plugins/sources") {
return Promise.resolve([]);
}
return Promise.resolve([]);
});
render(<SkillsTab workspaceId="ws-fresh" data={minimalData} />);
// Wait for the installedLoaded gate to flip — without that the
// component renders a "loading" state, not the compact pill.
await waitFor(() => {
expect(screen.getByLabelText(/Plugins \(none installed\)/i)).toBeTruthy();
});
// Compact assertions: the rounded-xl panel chrome MUST NOT be in
// the DOM (we'd see two "Plugins" labels — one in the header,
// one in the pill — if the layout regressed to "always full
// panel"). The compact form has exactly one "Plugins" label.
const labels = screen.getAllByText("Plugins");
expect(labels).toHaveLength(1);
// The full-panel chrome's id="plugins-section" should NOT be
// rendered when we're in compact mode.
expect(document.getElementById("plugins-section")).toBeNull();
});
it("renders full panel when installed.length > 0", async () => {
apiGet.mockImplementation((path: string) => {
if (path.endsWith("/plugins")) {
return Promise.resolve([
{ name: "memory-postgres", version: "1.0.0", description: "memory backend", supported_on_runtime: true },
]);
}
return Promise.resolve([]);
});
render(<SkillsTab workspaceId="ws-installed" data={minimalData} />);
await waitFor(() => {
expect(screen.getByText(/1 installed/i)).toBeTruthy();
});
// Full-panel chrome MUST be present — id pin.
expect(document.getElementById("plugins-section")).not.toBeNull();
// Compact pill ariaLabel MUST NOT be present.
expect(screen.queryByLabelText(/Plugins \(none installed\)/i)).toBeNull();
});
it("expands to full panel when user clicks + Install Plugin from compact pill", async () => {
apiGet.mockImplementation(() => Promise.resolve([]));
render(<SkillsTab workspaceId="ws-expand" data={minimalData} />);
// Start compact — wait for the compact pill to settle so we click
// the right button (initial render before installedLoaded flips
// doesn't have either layout, and the post-load compact pill is
// what we want to interact with).
await waitFor(() => {
expect(screen.getByLabelText(/Plugins \(none installed\)/i)).toBeTruthy();
});
const installBtn = screen.getByRole("button", { name: /\+ Install Plugin/i });
expect(installBtn.getAttribute("aria-expanded")).toBe("false");
fireEvent.click(installBtn);
// After click, registry opens → full panel renders. The compact
// pill's aria-label should be gone; the full-panel id should
// appear. Generous waitFor — a registry fetch may also fire in
// the React effect chain, and we want to assert the compact →
// full transition without racing it.
await waitFor(
() => {
expect(document.getElementById("plugins-section")).not.toBeNull();
},
{ timeout: 3000 },
);
expect(screen.queryByLabelText(/Plugins \(none installed\)/i)).toBeNull();
});
it("does NOT collapse to compact while initial load is pending (avoid flash)", () => {
// Returning a never-resolving promise means installedLoaded stays
// false. The compact pill MUST NOT render in this state — that
// would flash compact → full as the load completes, which looks
// janky. The component shows a loading shell instead (the
// existing pre-fix behavior).
apiGet.mockImplementation(() => new Promise(() => {}));
render(<SkillsTab workspaceId="ws-loading" data={minimalData} />);
// Synchronous assertion — no waitFor — since we want to confirm
// the compact pill is NOT rendered before any network round-trip
// finishes.
expect(screen.queryByLabelText(/Plugins \(none installed\)/i)).toBeNull();
});
});

View File

@ -1,116 +0,0 @@
"use client";
// AttachmentAudio — inline native HTML5 <audio controls> player for
// chat attachments (RFC #2991, PR-2).
//
// Same auth + Blob-URL pattern as AttachmentImage / AttachmentVideo.
// Native audio control bar handles play/pause/scrub/volume/download,
// and there's no fullscreen UI to worry about (audio doesn't need
// AttachmentLightbox).
import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentChip } from "./AttachmentViews";
interface Props {
workspaceId: string;
attachment: ChatAttachment;
onDownload: (a: ChatAttachment) => void;
tone: "user" | "agent";
}
type FetchState =
| { kind: "idle" }
| { kind: "loading" }
| { kind: "ready"; src: string }
| { kind: "error" };
export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: Props) {
const [state, setState] = useState<FetchState>({ kind: "idle" });
const blobUrlRef = useRef<string | null>(null);
useEffect(() => {
let cancelled = false;
setState({ kind: "loading" });
if (!isPlatformAttachment(attachment.uri)) {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
if (!cancelled) setState({ kind: "ready", src: href });
return;
}
void (async () => {
try {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
const res = await fetch(href, {
headers: platformAuthHeaders(),
credentials: "include",
signal: AbortSignal.timeout(60_000),
});
if (!res.ok) {
if (!cancelled) setState({ kind: "error" });
return;
}
const blob = await res.blob();
const url = URL.createObjectURL(blob);
blobUrlRef.current = url;
if (cancelled) {
URL.revokeObjectURL(url);
return;
}
setState({ kind: "ready", src: url });
} catch {
if (!cancelled) setState({ kind: "error" });
}
})();
return () => {
cancelled = true;
if (blobUrlRef.current) {
URL.revokeObjectURL(blobUrlRef.current);
blobUrlRef.current = null;
}
};
}, [workspaceId, attachment.uri]);
if (state.kind === "error") {
return <AttachmentChip attachment={attachment} onDownload={onDownload} tone={tone} />;
}
if (state.kind === "idle" || state.kind === "loading") {
return (
<div
className="rounded-md border border-line/50 bg-surface-card/40 animate-pulse"
style={{ width: 280, height: 40 }}
aria-label={`Loading ${attachment.name}`}
/>
);
}
return (
<div
className={`inline-flex flex-col gap-1 rounded-md border px-2 py-1 ${
tone === "user" ? "border-blue-400/30 bg-accent-strong/10" : "border-line/50 bg-surface-card/40"
}`}
>
{/* Filename label so the user knows what they're hearing
before pressing play. Short, single-line, truncated. */}
<span className="text-[10px] text-ink-mid truncate max-w-[280px]" title={attachment.name}>
{attachment.name}
</span>
<audio
controls
preload="metadata"
src={state.src}
style={{ width: 280, height: 32 }}
onError={() => setState({ kind: "error" })}
>
{attachment.name}
</audio>
</div>
);
}
// Local getTenantSlug() removed — auth-header construction now goes
// through platformAuthHeaders() from @/lib/api (#178).

View File

@ -1,183 +0,0 @@
"use client";
// AttachmentImage — inline image thumbnail + click-to-fullscreen.
// First "specialized renderer" landing under RFC #2991 PR-1.
//
// Auth model
// ----------
//
// The Critical UX/Security trade-off (per RFC's hostile-self-review
// item #2): the bytes live behind workspace auth. A bare
// <img src="https://reno-stars.../chat/download?path=…"> WILL NOT
// include our cookie + Origin headers when the browser loads it —
// even for same-origin canvas-server, the auth chain (cookie + token
// + X-Molecule-Org-Slug header) is JS-injected, not browser-default.
//
// Solution: same auth path the chip download uses. Fetch the bytes
// with the JS auth headers, wrap in a Blob, hand the browser an
// ObjectURL. The image renders from local memory; no second request,
// no auth leakage, no CORS pain.
//
// That same blob URL is what the lightbox shows on click — single
// fetch, cached for the lifetime of the message bubble.
//
// Failure modes
// -------------
//
// - Fetch fails (404, 403, network) → fall back to AttachmentChip
// (the existing file-pill download flow). The user still gets a
// working download; we just lose the inline preview.
// - Decoded as non-image (server returned wrong Content-Type, or
// bytes are corrupt) → onError handler swaps to AttachmentChip.
// - Bytes too large — no enforcement here; the server caps at 25MB
// per file (chat_files.go), which is too big for a thumbnail but
// acceptable for a chat-attached image. If we hit pain we can
// downscale via canvas, but defer that to v2.
import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentLightbox } from "./AttachmentLightbox";
import { AttachmentChip } from "./AttachmentViews";
interface Props {
workspaceId: string;
attachment: ChatAttachment;
onDownload: (a: ChatAttachment) => void;
tone: "user" | "agent";
}
type FetchState =
| { kind: "idle" }
| { kind: "loading" }
| { kind: "ready"; blobUrl: string }
| { kind: "error" };
export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: Props) {
const [state, setState] = useState<FetchState>({ kind: "idle" });
const [open, setOpen] = useState(false);
// Track whether we created the ObjectURL so cleanup runs on the
// exact value we minted (state could change between effect setup
// and effect cleanup if a new fetch fires).
const blobUrlRef = useRef<string | null>(null);
useEffect(() => {
let cancelled = false;
setState({ kind: "loading" });
// For non-platform URIs (http/https external image hosts) we can
// skip the auth fetch — browser loads them directly. We bail out
// of the auth-fetch flow and use the raw URL via resolveAttachmentHref.
if (!isPlatformAttachment(attachment.uri)) {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
if (!cancelled) setState({ kind: "ready", blobUrl: href });
return;
}
// Platform-auth path: identical to downloadChatFile but we keep
// the blob (don't trigger a Save-As). Auth headers come from the
// shared `platformAuthHeaders()` helper — one source of truth for
// every authenticated raw fetch in the canvas (#178).
void (async () => {
try {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
const res = await fetch(href, {
headers: platformAuthHeaders(),
credentials: "include",
signal: AbortSignal.timeout(30_000),
});
if (!res.ok) {
if (!cancelled) setState({ kind: "error" });
return;
}
const blob = await res.blob();
const url = URL.createObjectURL(blob);
blobUrlRef.current = url;
if (cancelled) {
URL.revokeObjectURL(url);
return;
}
setState({ kind: "ready", blobUrl: url });
} catch {
if (!cancelled) setState({ kind: "error" });
}
})();
return () => {
cancelled = true;
// Free the ObjectURL when the bubble unmounts — keeps memory
// bounded across long chat histories.
if (blobUrlRef.current) {
URL.revokeObjectURL(blobUrlRef.current);
blobUrlRef.current = null;
}
};
}, [workspaceId, attachment.uri]);
// Failure → render the existing file chip. Maintains the download
// affordance even if preview fails; the user never gets stuck.
if (state.kind === "error") {
return <AttachmentChip attachment={attachment} onDownload={onDownload} tone={tone} />;
}
// Loading → small placeholder pill so the bubble doesn't reflow
// when the image lands. Sized to roughly the thumbnail's aspect
// ratio guess (a 240x180 box) so the layout is stable.
if (state.kind === "loading" || state.kind === "idle") {
return (
<div
className="rounded-md border border-line/50 bg-surface-card/40 animate-pulse"
style={{ width: 240, height: 180 }}
aria-label={`Loading ${attachment.name}`}
/>
);
}
// Ready → inline thumbnail with click handler. The img has its
// own onError so a corrupt blob (server returned the right size
// but invalid bytes) falls through to the chip too.
return (
<>
<button
type="button"
onClick={() => setOpen(true)}
title={`Preview ${attachment.name}`}
className={`group relative inline-block max-w-full rounded-lg overflow-hidden border focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/60 ${
tone === "user" ? "border-blue-400/30" : "border-line/50"
}`}
aria-label={`Open ${attachment.name} preview`}
>
<img
src={state.blobUrl}
alt={attachment.name}
// Cap thumbnail so a tall portrait image doesn't blow up
// the message bubble. The lightbox shows the full size.
style={{ maxWidth: 240, maxHeight: 180, display: "block" }}
onError={() => setState({ kind: "error" })}
/>
{/* Tiny filename label on hover — same affordance as Slack/
Discord. Helps when several images land in one bubble. */}
<div className="absolute bottom-0 inset-x-0 bg-black/60 text-white text-[10px] px-1.5 py-0.5 truncate opacity-0 group-hover:opacity-100 transition-opacity">
{attachment.name}
</div>
</button>
<AttachmentLightbox
open={open}
onClose={() => setOpen(false)}
ariaLabel={`Preview of ${attachment.name}`}
>
<img
src={state.blobUrl}
alt={attachment.name}
className="max-w-[95vw] max-h-[90vh] object-contain"
/>
</AttachmentLightbox>
</>
);
}
// Local getTenantSlug() removed — auth-header construction now goes
// through platformAuthHeaders() from @/lib/api which uses the canonical
// getTenantSlug() from @/lib/tenant. This eliminates the duplicate
// hostname-regex + the duplicate bearer-token-attach pattern (#178).

View File

@ -1,122 +0,0 @@
"use client";
// AttachmentLightbox — shared fullscreen modal for image / PDF /
// (future) any-fullscreen-renderable kind. Owns:
// - Backdrop + centered viewport
// - Esc to close
// - Click-outside to close
// - Focus trap (focus enters the modal on open, restored on close)
// - prefers-reduced-motion respect (no animation)
//
// Per RFC #2991 Phase 2: this is the third-caller justification for
// the abstraction (image, PDF, future video-fullscreen all want the
// same modal contract). Not invented for a single caller.
//
// Design choices:
//
// 1. Portals — we don't use ReactDOM.createPortal because the canvas
// chat surface already renders at a high z-index and the modal's
// fixed-position layout reaches the viewport regardless. Saves a
// portal mount in the common case + avoids the SSR warning (canvas
// is "use client" but the parent shell is server-rendered).
//
// 2. Focus trap — inline implementation (not a 3rd-party dep). The
// chat lightbox needs to trap focus only across two interactive
// elements (close button + content), so a 100-line manual trap
// beats pulling in focus-trap-react for ~12KB.
//
// 3. Escape key — listened on `document` (not on the modal element)
// because the user can be focused anywhere when they hit Esc,
// including outside the modal if focus restoration ever fails.
// The cleanup runs on unmount so leaked listeners don't persist.
import { useEffect, useRef, useCallback, type ReactNode } from "react";
interface Props {
/** Render the lightbox when true. Caller controls open state. */
open: boolean;
/** Caller's handler for "close" — Esc, click-outside, X button. */
onClose: () => void;
/** Accessible label for the modal voiced by screen readers when
* the dialog opens. The caller knows what's inside (image alt
* text, PDF filename) and supplies it. */
ariaLabel: string;
/** The thing being shown in fullscreen <img>, <embed>, etc.
* Caller is responsible for sizing it to fit the viewport (we
* give it max-w-full max-h-full via CSS). */
children: ReactNode;
}
export function AttachmentLightbox({ open, onClose, ariaLabel, children }: Props) {
const closeButtonRef = useRef<HTMLButtonElement>(null);
const previousFocusRef = useRef<HTMLElement | null>(null);
// Focus enters the close button on open + restores to whatever
// had focus when the modal closes. Without this, the user's
// focus is left wherever they clicked (often the chip) and Tab
// walks them back through the chat surface — disorienting.
useEffect(() => {
if (!open) return;
previousFocusRef.current = document.activeElement as HTMLElement | null;
closeButtonRef.current?.focus();
return () => {
previousFocusRef.current?.focus?.();
};
}, [open]);
// Esc closes; bound on document so the user can press Esc
// regardless of where focus actually is.
useEffect(() => {
if (!open) return;
const onKey = (e: KeyboardEvent) => {
if (e.key === "Escape") {
e.preventDefault();
onClose();
}
};
document.addEventListener("keydown", onKey);
return () => document.removeEventListener("keydown", onKey);
}, [open, onClose]);
// Click on the backdrop (NOT the content) closes. Content's own
// onClick stops propagation so the user can interact (e.g. native
// PDF viewer controls) without dismissing the modal.
const onBackdropClick = useCallback(
(e: React.MouseEvent) => {
if (e.target === e.currentTarget) onClose();
},
[onClose],
);
if (!open) return null;
return (
<div
role="dialog"
aria-modal="true"
aria-label={ariaLabel}
className="fixed inset-0 z-50 flex items-center justify-center bg-black/85 motion-reduce:transition-none transition-opacity"
onClick={onBackdropClick}
>
{/* Close button top-right, large hit area, keyboard-focusable.
ariaLabel includes "Close" so SR users hear what action it
performs, not just the X glyph. */}
<button
ref={closeButtonRef}
onClick={onClose}
aria-label="Close preview"
className="absolute top-4 right-4 rounded-full bg-white/10 hover:bg-white/20 text-white p-2 focus:outline-none focus-visible:ring-2 focus-visible:ring-white"
>
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" aria-hidden="true">
<path d="M5 5l14 14M19 5l-14 14" stroke="currentColor" strokeWidth="2" strokeLinecap="round" />
</svg>
</button>
<div
className="max-w-[95vw] max-h-[90vh] flex items-center justify-center"
onClick={(e) => e.stopPropagation()}
>
{children}
</div>
</div>
);
}

View File

@ -1,189 +0,0 @@
"use client";
// AttachmentPDF — inline PDF preview using the browser's native viewer
// (RFC #2991, PR-3).
//
// Why browser-native (not PDF.js / pdfjs-dist):
//
// - Chrome / Edge / Firefox / Safari (desktop) all ship a built-in
// PDF viewer. <embed src="…blob"> renders correctly; user gets
// scroll, zoom, search, print for free.
// - PDF.js adds ~3 MB to the canvas bundle. For an MVP that
// specifically targets desktop chat, the browser viewer is good
// enough. v2 can wire pdfjs-dist if Safari mobile coverage
// becomes a real ask (its built-in viewer is preview-only).
//
// Auth model: identical to AttachmentImage / Video / Audio — fetch
// bytes with JS-injected auth headers, wrap in Blob, hand the
// browser an ObjectURL. <embed src="blob:…#toolbar=0"> would
// suppress the toolbar; we keep it on so the user gets standard
// PDF affordances.
//
// Fullscreen: AttachmentLightbox hosts the PDF at viewport size on
// click. Same shared modal as image — third caller justifies the
// abstraction (per RFC #2991 design).
//
// Failure modes:
//
// - Fetch fail → AttachmentChip fallback (download still works)
// - Browser refuses to render the PDF (Safari mobile, plugin
// disabled, corrupt bytes) → <embed onError> swap to chip.
// Note: <embed> doesn't fire onError reliably across browsers.
// Defensive fallback: if blob load triggers no onLoad after a
// timeout, swap to chip. Implemented as a 3-second watchdog.
import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentLightbox } from "./AttachmentLightbox";
import { AttachmentChip } from "./AttachmentViews";
interface Props {
workspaceId: string;
attachment: ChatAttachment;
onDownload: (a: ChatAttachment) => void;
tone: "user" | "agent";
}
type FetchState =
| { kind: "idle" }
| { kind: "loading" }
| { kind: "ready"; blobUrl: string }
| { kind: "error" };
export function AttachmentPDF({ workspaceId, attachment, onDownload, tone }: Props) {
const [state, setState] = useState<FetchState>({ kind: "idle" });
const [open, setOpen] = useState(false);
const blobUrlRef = useRef<string | null>(null);
useEffect(() => {
let cancelled = false;
setState({ kind: "loading" });
if (!isPlatformAttachment(attachment.uri)) {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
if (!cancelled) setState({ kind: "ready", blobUrl: href });
return;
}
void (async () => {
try {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
const res = await fetch(href, {
headers: platformAuthHeaders(),
credentials: "include",
signal: AbortSignal.timeout(60_000),
});
if (!res.ok) {
if (!cancelled) setState({ kind: "error" });
return;
}
const blob = await res.blob();
const url = URL.createObjectURL(blob);
blobUrlRef.current = url;
if (cancelled) {
URL.revokeObjectURL(url);
return;
}
setState({ kind: "ready", blobUrl: url });
} catch {
if (!cancelled) setState({ kind: "error" });
}
})();
return () => {
cancelled = true;
if (blobUrlRef.current) {
URL.revokeObjectURL(blobUrlRef.current);
blobUrlRef.current = null;
}
};
}, [workspaceId, attachment.uri]);
if (state.kind === "error") {
return <AttachmentChip attachment={attachment} onDownload={onDownload} tone={tone} />;
}
if (state.kind === "idle" || state.kind === "loading") {
return (
<div
className="rounded-md border border-line/50 bg-surface-card/40 animate-pulse flex items-center gap-1.5 px-2 py-1 text-[10px] text-ink-mid"
style={{ width: 240 }}
aria-label={`Loading ${attachment.name}`}
>
<PdfGlyph />
Loading {attachment.name}
</div>
);
}
// PDF preview chip — clicking it opens the full embed in the
// shared lightbox. We don't inline-embed in the bubble because
// even a small embed renders at 600×400 minimum on most browsers
// (the PDF viewer's natural scale), which would dominate every
// chat bubble. Slack/Linear/Notion all gate PDF preview behind a
// click for the same reason.
return (
<>
<button
type="button"
onClick={() => setOpen(true)}
title={`Preview ${attachment.name}`}
className={`inline-flex items-center gap-1.5 rounded-md border px-2 py-1 text-[10px] hover:bg-surface-card/70 focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/60 ${
tone === "user"
? "border-blue-400/30 bg-accent-strong/10 text-blue-100"
: "border-line/50 bg-surface-card/40 text-ink"
}`}
aria-label={`Open ${attachment.name} preview`}
>
<PdfGlyph />
<span className="truncate max-w-[200px]">{attachment.name}</span>
<span className="opacity-60 shrink-0">PDF</span>
</button>
<AttachmentLightbox
open={open}
onClose={() => setOpen(false)}
ariaLabel={`Preview of ${attachment.name}`}
>
<embed
src={state.blobUrl}
type="application/pdf"
// The lightbox's content slot caps at 95vw / 90vh, so size
// 100% within that and let the user scroll inside the PDF
// viewer.
style={{ width: "95vw", height: "90vh" }}
aria-label={attachment.name}
/>
</AttachmentLightbox>
</>
);
}
function PdfGlyph() {
return (
<svg
width="11"
height="11"
viewBox="0 0 16 16"
fill="none"
aria-hidden="true"
className="shrink-0 opacity-70"
>
<path
d="M4 2h5l3 3v9a1 1 0 0 1-1 1H4a1 1 0 0 1-1-1V3a1 1 0 0 1 1-1Z"
stroke="currentColor"
strokeWidth="1.3"
/>
<path d="M9 2v3h3" stroke="currentColor" strokeWidth="1.3" />
<path
d="M5.5 9.5h1m1 0h1m-3 2h2"
stroke="currentColor"
strokeWidth="1.1"
strokeLinecap="round"
/>
</svg>
);
}
// Local getTenantSlug() removed — auth-header construction now goes
// through platformAuthHeaders() from @/lib/api (#178).

View File

@ -1,90 +0,0 @@
"use client";
// AttachmentPreview — the SSOT dispatch point for chat-attachment
// rendering (RFC #2991, PR-1).
//
// Replaces the previous direct-AttachmentChip usage in ChatTab so
// every attachment routes through the same preview-kind taxonomy.
// Adding a new renderer (PDF, video, audio, text) in PR-2/PR-3 is a
// one-arm extension to the switch below — no touch-points scattered
// across ChatTab.tsx, AgentCommsPanel.tsx, or other chat consumers.
//
// Per the RFC's Phase 2: this is the only file that should directly
// import any kind-specific component. ChatTab and other callers
// import only AttachmentPreview — no leaking of the kind taxonomy
// into the consumer surface.
import type { ChatAttachment } from "./types";
import { getAttachmentPreviewKind } from "./preview-kind";
import { AttachmentImage } from "./AttachmentImage";
import { AttachmentVideo } from "./AttachmentVideo";
import { AttachmentAudio } from "./AttachmentAudio";
import { AttachmentPDF } from "./AttachmentPDF";
import { AttachmentTextPreview } from "./AttachmentTextPreview";
import { AttachmentChip } from "./AttachmentViews";
interface Props {
workspaceId: string;
attachment: ChatAttachment;
/** Caller's download handler used for the kind=file fallback
* and as the kind-specific renderers' fallback when their own
* preview fails (e.g. image fetch errored). */
onDownload: (a: ChatAttachment) => void;
/** Tone follows the message bubble's role used for visual
* variant only. */
tone: "user" | "agent";
}
export function AttachmentPreview({ workspaceId, attachment, onDownload, tone }: Props) {
const kind = getAttachmentPreviewKind(attachment.mimeType, attachment.uri, attachment.name);
switch (kind) {
case "image":
return (
<AttachmentImage
workspaceId={workspaceId}
attachment={attachment}
onDownload={onDownload}
tone={tone}
/>
);
case "video":
return (
<AttachmentVideo
workspaceId={workspaceId}
attachment={attachment}
onDownload={onDownload}
tone={tone}
/>
);
case "audio":
return (
<AttachmentAudio
workspaceId={workspaceId}
attachment={attachment}
onDownload={onDownload}
tone={tone}
/>
);
case "pdf":
return (
<AttachmentPDF
workspaceId={workspaceId}
attachment={attachment}
onDownload={onDownload}
tone={tone}
/>
);
case "text":
return (
<AttachmentTextPreview
workspaceId={workspaceId}
attachment={attachment}
onDownload={onDownload}
tone={tone}
/>
);
case "file":
default:
return <AttachmentChip attachment={attachment} onDownload={onDownload} tone={tone} />;
}
}

View File

@ -1,187 +0,0 @@
"use client";
// AttachmentTextPreview — inline preview for text/code/JSON/YAML/etc
// (RFC #2991, PR-3).
//
// Shape: render first N lines (~10) in monospace inside the bubble.
// Click "Show more" to expand fully; the lightbox is reserved for
// image/PDF where viewport-size matters. For text, the bubble itself
// can host the full content.
//
// Why no syntax highlighting (yet):
//
// - Pulling in shiki / highlight.js / prism adds 200-500KB to the
// bundle for a feature that's nice-to-have. MVP uses plain
// <pre><code>.
// - Future: lazy-load shiki on first text-attachment render. v2
// if the user reports the gap.
//
// Auth: same fetch+text() pattern as image/video/audio, but we read
// the text directly instead of building a Blob URL — no <img>/<video>
// element to feed.
//
// Memory: text files are usually small. We cap the preview at 256 KB
// fetched (large logs would otherwise crash the bubble). If the file
// exceeds the cap, we show what we got + a "truncated" note + a chip
// to download the full file.
import { useState, useEffect } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentChip } from "./AttachmentViews";
interface Props {
workspaceId: string;
attachment: ChatAttachment;
onDownload: (a: ChatAttachment) => void;
tone: "user" | "agent";
}
type FetchState =
| { kind: "idle" }
| { kind: "loading" }
| { kind: "ready"; text: string; truncated: boolean }
| { kind: "error" };
const PREVIEW_LINE_COUNT = 10;
const MAX_FETCH_BYTES = 256 * 1024; // 256 KB
export function AttachmentTextPreview({ workspaceId, attachment, onDownload, tone }: Props) {
const [state, setState] = useState<FetchState>({ kind: "idle" });
const [expanded, setExpanded] = useState(false);
useEffect(() => {
let cancelled = false;
setState({ kind: "loading" });
void (async () => {
try {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
// Only attach platform auth headers for in-platform URIs —
// off-platform URLs (HTTP/HTTPS attachments) MUST NOT receive
// our bearer token (it would leak the admin token to a third
// party). The branch is preserved with the new shared helper.
const headers: Record<string, string> = isPlatformAttachment(attachment.uri)
? platformAuthHeaders()
: {};
const res = await fetch(href, {
headers,
credentials: "include",
signal: AbortSignal.timeout(30_000),
});
if (!res.ok) {
if (!cancelled) setState({ kind: "error" });
return;
}
// Read up to MAX_FETCH_BYTES. Use the standard ReadableStream
// path so we don't materialise a 100MB log into memory.
const reader = res.body?.getReader();
if (!reader) {
// Fallback: small text file, just .text() it.
const text = await res.text();
if (cancelled) return;
setState({
kind: "ready",
text: text.slice(0, MAX_FETCH_BYTES),
truncated: text.length > MAX_FETCH_BYTES,
});
return;
}
let received = 0;
const chunks: BlobPart[] = [];
while (received < MAX_FETCH_BYTES) {
const { value, done } = await reader.read();
if (done) break;
// Copy into a fresh ArrayBuffer-backed view — TS in lib.dom
// 2026 narrows BlobPart away from SharedArrayBuffer-backed
// Uint8Arrays. Blob() accepts the copy fine at runtime.
const copy = new Uint8Array(value.byteLength);
copy.set(value);
chunks.push(copy.buffer);
received += value.byteLength;
}
// If we hit the cap but the stream isn't done, mark truncated.
const truncated = received >= MAX_FETCH_BYTES;
if (truncated) reader.cancel();
const blob = new Blob(chunks);
const text = await blob.text();
if (cancelled) return;
setState({ kind: "ready", text, truncated });
} catch {
if (!cancelled) setState({ kind: "error" });
}
})();
return () => {
cancelled = true;
};
}, [workspaceId, attachment.uri]);
if (state.kind === "error") {
return <AttachmentChip attachment={attachment} onDownload={onDownload} tone={tone} />;
}
if (state.kind === "idle" || state.kind === "loading") {
return (
<div
className="rounded-md border border-line/50 bg-surface-card/40 animate-pulse"
style={{ width: 320, height: 80 }}
aria-label={`Loading ${attachment.name}`}
/>
);
}
const lines = state.text.split("\n");
const preview = expanded ? state.text : lines.slice(0, PREVIEW_LINE_COUNT).join("\n");
const showExpandButton = !expanded && lines.length > PREVIEW_LINE_COUNT;
return (
<div
className={`inline-block max-w-full rounded-md border ${
tone === "user" ? "border-blue-400/30 bg-accent-strong/10" : "border-line/50 bg-surface-card/40"
}`}
>
<div className="flex items-center justify-between px-2 py-1 border-b border-line/40 text-[10px] text-ink-mid">
<span className="truncate max-w-[220px]" title={attachment.name}>
{attachment.name}
</span>
<button
type="button"
onClick={() => onDownload(attachment)}
className="text-ink-soft hover:text-ink"
title={`Download ${attachment.name}`}
aria-label={`Download ${attachment.name}`}
>
</button>
</div>
<pre className="overflow-x-auto px-2 py-1.5 text-[10px] leading-snug text-ink whitespace-pre font-mono max-w-[480px] max-h-[300px]">
<code>{preview}</code>
</pre>
{showExpandButton && (
<button
type="button"
onClick={() => setExpanded(true)}
className="block w-full text-center text-[10px] text-ink-mid hover:text-ink py-1 border-t border-line/40"
>
Show all {lines.length} lines
</button>
)}
{state.truncated && (
<div className="px-2 py-1 text-[10px] text-warm border-t border-line/40">
Preview truncated at {Math.round(MAX_FETCH_BYTES / 1024)} KB {" "}
<button
type="button"
onClick={() => onDownload(attachment)}
className="underline"
>
download full file
</button>
</div>
)}
</div>
);
}
// Local getTenantSlug() removed — auth-header construction now goes
// through platformAuthHeaders() from @/lib/api (#178).

View File

@ -1,147 +0,0 @@
"use client";
// AttachmentVideo — inline native HTML5 <video controls> player for
// chat attachments (RFC #2991, PR-2).
//
// Why HTML5-native (vs custom JS player):
//
// - Browser vendors ship hardware-accelerated decoders, captions,
// and fullscreen UI. We get all of it for free.
// - Native fullscreen via the <video> element's built-in button
// (no AttachmentLightbox needed for video — the browser does it).
// - Mobile-friendly: iOS / Android Safari + Chrome handle the
// pinch + scrub UX the user already knows.
//
// Auth model — identical to AttachmentImage:
// platform-auth URIs need our cookie/token, so we fetch the bytes,
// wrap in a Blob, hand the browser an ObjectURL via <video src=>.
// External (http/https) URIs skip the fetch and use the raw URL.
//
// Memory caveat: a Blob holds the entire video in JS memory until
// the bubble unmounts. For multi-hundred-MB videos this is bad. The
// server caps single-file uploads at 25MB (chat_files.go), so we're
// bounded; if larger files become a real shape, switch to streaming
// via MediaSource or just `<video src=…>` with a credentials-aware
// fetch via service worker. v2 if measured-needed.
import { useState, useEffect, useRef } from "react";
import { platformAuthHeaders } from "@/lib/api";
import type { ChatAttachment } from "./types";
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
import { AttachmentChip } from "./AttachmentViews";
interface Props {
workspaceId: string;
attachment: ChatAttachment;
onDownload: (a: ChatAttachment) => void;
tone: "user" | "agent";
}
type FetchState =
| { kind: "idle" }
| { kind: "loading" }
| { kind: "ready"; src: string }
| { kind: "error" };
export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: Props) {
const [state, setState] = useState<FetchState>({ kind: "idle" });
const blobUrlRef = useRef<string | null>(null);
useEffect(() => {
let cancelled = false;
setState({ kind: "loading" });
if (!isPlatformAttachment(attachment.uri)) {
// External video (http/https) — let the browser stream it
// natively without the JS-blob detour.
const href = resolveAttachmentHref(workspaceId, attachment.uri);
if (!cancelled) setState({ kind: "ready", src: href });
return;
}
void (async () => {
try {
const href = resolveAttachmentHref(workspaceId, attachment.uri);
const res = await fetch(href, {
headers: platformAuthHeaders(),
credentials: "include",
// Videos are larger than images on average; give the request
// more headroom. The server's per-request body cap (50MB) is
// still the actual ceiling.
signal: AbortSignal.timeout(120_000),
});
if (!res.ok) {
if (!cancelled) setState({ kind: "error" });
return;
}
const blob = await res.blob();
const url = URL.createObjectURL(blob);
blobUrlRef.current = url;
if (cancelled) {
URL.revokeObjectURL(url);
return;
}
setState({ kind: "ready", src: url });
} catch {
if (!cancelled) setState({ kind: "error" });
}
})();
return () => {
cancelled = true;
if (blobUrlRef.current) {
URL.revokeObjectURL(blobUrlRef.current);
blobUrlRef.current = null;
}
};
}, [workspaceId, attachment.uri]);
if (state.kind === "error") {
return <AttachmentChip attachment={attachment} onDownload={onDownload} tone={tone} />;
}
if (state.kind === "idle" || state.kind === "loading") {
return (
<div
className="rounded-md border border-line/50 bg-surface-card/40 animate-pulse"
style={{ width: 320, height: 180 }}
aria-label={`Loading ${attachment.name}`}
/>
);
}
return (
<div
className={`inline-block rounded-lg overflow-hidden border ${
tone === "user" ? "border-blue-400/30" : "border-line/50"
}`}
>
<video
controls
// preload="metadata" so the browser fetches just enough to
// show duration + first frame thumbnail without streaming
// the whole file before the user clicks play.
preload="metadata"
// playsInline keeps mobile Safari from auto-fullscreening
// on play; the user can still hit the native fullscreen
// button (or PiP on Chrome) if they want.
playsInline
// Native fullscreen via the <video> control bar; no
// AttachmentLightbox needed for video.
src={state.src}
// Cap thumbnail / inline display so the bubble doesn't blow
// up vertical layout for tall portrait clips. The native
// fullscreen button uses the original aspect ratio.
style={{ maxWidth: 320, maxHeight: 240, display: "block" }}
// Bytes that aren't actually a valid video (corrupt blob,
// wrong Content-Type) fail load → swap to chip.
onError={() => setState({ kind: "error" })}
>
<track kind="captions" />
{attachment.name}
</video>
</div>
);
}
// Local getTenantSlug() removed — auth-header construction now goes
// through platformAuthHeaders() from @/lib/api (#178).

View File

@ -1,317 +0,0 @@
// @vitest-environment jsdom
//
// AttachmentPreview component tests — pin the dispatch contract:
// each kind goes to its dedicated renderer; kind=file falls back to
// the chip; failure modes don't strand the user without a download.
//
// Per RFC #2991 Phase 4: every test must be able to fail. No
// asserting-the-mock; we render the real component and inspect what
// the DOM actually shows.
import { describe, it, expect, vi, afterEach, beforeEach } from "vitest";
import { render, screen, fireEvent, cleanup, waitFor, act } from "@testing-library/react";
import React from "react";
afterEach(cleanup);
// Mock the auth-token env var so AttachmentImage's fetch doesn't
// hit a real network. The fetch is itself mocked below.
vi.stubEnv("NEXT_PUBLIC_ADMIN_TOKEN", "test-token");
// Mock fetch so the AttachmentImage path can return a synthetic blob.
// Tests override per-case to simulate success / 404 / network fail.
const fetchMock = vi.fn();
beforeEach(() => {
fetchMock.mockReset();
vi.stubGlobal("fetch", fetchMock);
// jsdom doesn't implement URL.createObjectURL — stub.
global.URL.createObjectURL = vi.fn(() => "blob:test-url");
global.URL.revokeObjectURL = vi.fn();
});
import { AttachmentPreview } from "../AttachmentPreview";
import type { ChatAttachment } from "../types";
const onDownload = vi.fn();
function preview(att: ChatAttachment) {
return render(
<AttachmentPreview
workspaceId="ws-1"
attachment={att}
onDownload={onDownload}
tone="agent"
/>,
);
}
describe("AttachmentPreview dispatch", () => {
it("kind=file → renders the AttachmentChip download button (existing fallback)", () => {
preview({ uri: "workspace:/workspace/tmp/foo.zip", name: "foo.zip", mimeType: "application/zip" });
// The chip's button title is `Download <name>`. Pre-fix this was
// the only render path; now it's the kind=file fallback.
expect(screen.getByTitle(/Download foo\.zip/i)).toBeTruthy();
});
it("kind=image (mime) → renders the AttachmentImage path (loading placeholder until fetch resolves)", async () => {
// never-resolving fetch → component sits in loading state. Pin
// the loading placeholder shape.
fetchMock.mockReturnValue(new Promise(() => {}));
preview({ uri: "workspace:/workspace/tmp/photo.png", name: "photo.png", mimeType: "image/png" });
expect(await screen.findByLabelText(/Loading photo\.png/i)).toBeTruthy();
// The chip download button must NOT be in the DOM during the
// image path's loading state — proves dispatch routed correctly.
expect(screen.queryByTitle(/Download photo\.png/i)).toBeNull();
});
it("kind=image (extension fallback when mime is empty) → image path", async () => {
fetchMock.mockReturnValue(new Promise(() => {}));
preview({ uri: "workspace:/workspace/screenshot.jpg", name: "screenshot.jpg" /* no mime */ });
expect(await screen.findByLabelText(/Loading screenshot\.jpg/i)).toBeTruthy();
});
it("kind=image fetch fails (404) → falls back to AttachmentChip so the user can still download", async () => {
fetchMock.mockResolvedValue({ ok: false, status: 404 });
preview({ uri: "workspace:/workspace/tmp/missing.png", name: "missing.png", mimeType: "image/png" });
// The fallback chip shows up on error.
await waitFor(() => {
expect(screen.getByTitle(/Download missing\.png/i)).toBeTruthy();
});
});
it("kind=image fetch network error → falls back to chip", async () => {
fetchMock.mockRejectedValue(new Error("network down"));
preview({ uri: "workspace:/workspace/tmp/x.png", name: "x.png", mimeType: "image/png" });
await waitFor(() => {
expect(screen.getByTitle(/Download x\.png/i)).toBeTruthy();
});
});
it("kind=image success → renders <img> + clicking opens the lightbox", async () => {
fetchMock.mockResolvedValue({
ok: true,
blob: async () => new Blob(["fake-png-bytes"], { type: "image/png" }),
});
preview({ uri: "workspace:/workspace/tmp/ok.png", name: "ok.png", mimeType: "image/png" });
// Image element shows up after the fetch resolves.
const img = await screen.findByAltText(/ok\.png/);
expect(img).toBeTruthy();
expect((img as HTMLImageElement).src).toBe("blob:test-url");
// Lightbox closed initially — the dialog must not be in the DOM.
expect(screen.queryByRole("dialog")).toBeNull();
// Click the thumbnail button (the surrounding <button>) → lightbox opens.
const button = screen.getByLabelText(/Open ok\.png preview/i);
fireEvent.click(button);
expect(await screen.findByRole("dialog")).toBeTruthy();
expect(screen.getByLabelText(/Close preview/i)).toBeTruthy();
});
it("kind=image lightbox closes on Esc keypress", async () => {
fetchMock.mockResolvedValue({
ok: true,
blob: async () => new Blob(["b"], { type: "image/png" }),
});
preview({ uri: "workspace:/workspace/tmp/x.png", name: "x.png", mimeType: "image/png" });
await screen.findByAltText(/x\.png/);
fireEvent.click(screen.getByLabelText(/Open x\.png preview/i));
expect(await screen.findByRole("dialog")).toBeTruthy();
// Esc on document — lightbox listens there per design (not on
// the modal element) so the user can press Esc anywhere.
act(() => {
const event = new KeyboardEvent("keydown", { key: "Escape", bubbles: true });
document.dispatchEvent(event);
});
await waitFor(() => {
expect(screen.queryByRole("dialog")).toBeNull();
});
});
it("kind=image lightbox closes on backdrop click but not on inner content click", async () => {
fetchMock.mockResolvedValue({
ok: true,
blob: async () => new Blob(["b"], { type: "image/png" }),
});
preview({ uri: "workspace:/workspace/tmp/x.png", name: "x.png", mimeType: "image/png" });
await screen.findByAltText(/x\.png/);
fireEvent.click(screen.getByLabelText(/Open x\.png preview/i));
const dialog = await screen.findByRole("dialog");
// Click on the inner content (the lightbox image) — must NOT close.
const lightboxImg = dialog.querySelector("img");
if (!lightboxImg) throw new Error("lightbox img missing");
fireEvent.click(lightboxImg);
expect(screen.queryByRole("dialog")).toBeTruthy();
// Click on the backdrop (the dialog itself) — closes.
fireEvent.click(dialog);
await waitFor(() => {
expect(screen.queryByRole("dialog")).toBeNull();
});
});
// ─── PR-2: video / audio dispatch ───────────────────────────────
it("kind=video → renders <video controls> after fetch resolves", async () => {
fetchMock.mockResolvedValue({
ok: true,
blob: async () => new Blob(["fake-mp4"], { type: "video/mp4" }),
});
preview({ uri: "workspace:/workspace/clip.mp4", name: "clip.mp4", mimeType: "video/mp4" });
// Loading placeholder first.
expect(await screen.findByLabelText(/Loading clip\.mp4/i)).toBeTruthy();
// After the blob resolves, a <video> element with controls=true
// is in the DOM. Use a tag query — there's no built-in role for
// <video>, but the element is unambiguous in the bubble.
await waitFor(() => {
const v = document.querySelector("video");
expect(v).not.toBeNull();
// controls attribute pinned — without it the user can't play.
expect(v?.hasAttribute("controls")).toBe(true);
// src is the blob URL we minted.
expect((v as HTMLVideoElement).src).toBe("blob:test-url");
});
// Chip MUST NOT render — proves dispatch routed to video, not file.
expect(screen.queryByTitle(/Download clip\.mp4/i)).toBeNull();
});
it("kind=video fetch fails → falls back to AttachmentChip", async () => {
fetchMock.mockResolvedValue({ ok: false, status: 404 });
preview({ uri: "workspace:/workspace/missing.mp4", name: "missing.mp4", mimeType: "video/mp4" });
await waitFor(() => {
expect(screen.getByTitle(/Download missing\.mp4/i)).toBeTruthy();
});
});
it("kind=video by extension fallback (no mime) → video path", async () => {
fetchMock.mockReturnValue(new Promise(() => {}));
preview({ uri: "workspace:/workspace/recording.webm", name: "recording.webm" });
expect(await screen.findByLabelText(/Loading recording\.webm/i)).toBeTruthy();
});
it("kind=audio → renders <audio controls> with filename label", async () => {
fetchMock.mockResolvedValue({
ok: true,
blob: async () => new Blob(["fake-mp3"], { type: "audio/mpeg" }),
});
preview({ uri: "workspace:/workspace/song.mp3", name: "song.mp3", mimeType: "audio/mpeg" });
await waitFor(() => {
const a = document.querySelector("audio");
expect(a).not.toBeNull();
expect(a?.hasAttribute("controls")).toBe(true);
expect((a as HTMLAudioElement).src).toBe("blob:test-url");
});
// Filename label pinned: helps the user know what they're hearing
// BEFORE pressing play. Multiple matches — `<span>` text and the
// `<audio>`'s fallback `{name}` text node — so getAllByText.
expect(screen.getAllByText("song.mp3").length).toBeGreaterThan(0);
});
it("kind=audio fetch fails → falls back to chip", async () => {
fetchMock.mockResolvedValue({ ok: false, status: 403 });
preview({ uri: "workspace:/workspace/locked.wav", name: "locked.wav", mimeType: "audio/wav" });
await waitFor(() => {
expect(screen.getByTitle(/Download locked\.wav/i)).toBeTruthy();
});
});
// ─── PR-3: PDF / text dispatch ─────────────────────────────────────
it("kind=pdf → renders the PDF preview chip (click opens lightbox)", async () => {
fetchMock.mockResolvedValue({
ok: true,
blob: async () => new Blob(["%PDF-1.4..."], { type: "application/pdf" }),
});
preview({ uri: "workspace:/workspace/doc.pdf", name: "doc.pdf", mimeType: "application/pdf" });
// Loading placeholder first.
expect(await screen.findByLabelText(/Loading doc\.pdf/i)).toBeTruthy();
// After fetch, preview chip with "PDF" tag rendered.
await waitFor(() => {
// The button title is "Preview doc.pdf"; alongside is a "PDF" tag.
expect(screen.getByLabelText(/Open doc\.pdf preview/i)).toBeTruthy();
});
// Click → lightbox opens with <embed> inside.
fireEvent.click(screen.getByLabelText(/Open doc\.pdf preview/i));
const dialog = await screen.findByRole("dialog");
expect(dialog).toBeTruthy();
expect(dialog.querySelector("embed[type='application/pdf']")).not.toBeNull();
});
it("kind=pdf fetch fails → falls back to chip", async () => {
fetchMock.mockResolvedValue({ ok: false, status: 404 });
preview({ uri: "workspace:/workspace/missing.pdf", name: "missing.pdf", mimeType: "application/pdf" });
await waitFor(() => {
expect(screen.getByTitle(/Download missing\.pdf/i)).toBeTruthy();
});
});
it("kind=text (text/plain) → renders inline <pre><code> preview", async () => {
const body = "line1\nline2\nline3";
fetchMock.mockResolvedValue({
ok: true,
body: null,
text: async () => body,
});
preview({ uri: "workspace:/workspace/log.txt", name: "log.txt", mimeType: "text/plain" });
// testing-library normalizes whitespace by default. The <pre>
// contains the literal text node, so query the DOM directly.
await waitFor(() => {
const code = document.querySelector("pre code");
expect(code).not.toBeNull();
expect(code?.textContent).toBe("line1\nline2\nline3");
});
});
it("kind=text long content → shows 'Show all N lines' button when >10 lines", async () => {
// 25 lines, default preview shows 10. Button labels with full count.
const body = Array.from({ length: 25 }, (_, i) => `line ${i + 1}`).join("\n");
fetchMock.mockResolvedValue({
ok: true,
body: null,
text: async () => body,
});
preview({ uri: "workspace:/workspace/big.txt", name: "big.txt", mimeType: "text/plain" });
await waitFor(() => {
expect(screen.getByRole("button", { name: /Show all 25 lines/i })).toBeTruthy();
});
// Pre-expand: only first 10 lines in <code>; line 11+ absent.
let code = document.querySelector("pre code");
expect(code?.textContent?.includes("line 10")).toBe(true);
expect(code?.textContent?.includes("line 11")).toBe(false);
// After clicking expand, all 25 lines present.
fireEvent.click(screen.getByRole("button", { name: /Show all 25 lines/i }));
await waitFor(() => {
code = document.querySelector("pre code");
expect(code?.textContent?.includes("line 25")).toBe(true);
});
});
it("kind=text fetch fails → chip fallback", async () => {
fetchMock.mockResolvedValue({ ok: false, status: 404 });
preview({ uri: "workspace:/workspace/missing.json", name: "missing.json", mimeType: "application/json" });
await waitFor(() => {
expect(screen.getByTitle(/Download missing\.json/i)).toBeTruthy();
});
});
// ─── universal-fallback regression ─────────────────────────────────
it("kind=file is the universal fallback for unknown MIME (regression: don't try to preview a zip)", () => {
// Critical safety: agent could attach a misnamed file. Pre-fix
// the chip path was unconditional; we want unknown MIME to
// STILL go to the chip even though the extension matches an
// image kind.
preview({ uri: "workspace:/workspace/tmp/x.docx", name: "x.docx", mimeType: "application/vnd.zip-disguised-as-doc" });
expect(screen.getByTitle(/Download x\.docx/i)).toBeTruthy();
});
});

View File

@ -64,54 +64,6 @@ describe("extractRequestText", () => {
};
expect(extractRequestText(body)).toBe("");
});
// Regression: delegation.go stores request_body as {"task": "...", "delegation_id": "..."}.
// extractRequestText was checking only the A2A params.message.parts path, so
// outbound delegation messages were rendered as blank bubbles.
// Fix: check body.task first (delegation format), then fall back to A2A.
it("extracts text from body.task (delegation format)", () => {
const body = {
task: "Deploy the staging environment for this sprint's release",
delegation_id: "delg_01jx8q4n3k",
};
expect(extractRequestText(body)).toBe(
"Deploy the staging environment for this sprint's release"
);
});
it("prefers body.task over A2A params when both present", () => {
const body = {
task: "Delegation text wins",
params: {
message: {
parts: [{ kind: "text", text: "A2A text" }],
},
},
};
// body.task is checked first; delegation wins for delegation activities.
expect(extractRequestText(body)).toBe("Delegation text wins");
});
it("falls back to A2A format when body.task is absent", () => {
const body = {
params: {
message: {
parts: [{ kind: "text", text: "A2A fallback" }],
},
},
};
expect(extractRequestText(body)).toBe("A2A fallback");
});
it("returns empty string when body.task is empty string", () => {
const body = { task: "" };
expect(extractRequestText(body)).toBe("");
});
it("returns empty string when body.task is not a string", () => {
const body = { task: 42 };
expect(extractRequestText(body)).toBe("");
});
});
describe("extractResponseText", () => {

View File

@ -1,112 +0,0 @@
// preview-kind unit tests — exhaustive table of MIME / extension
// combinations. The kind helper is a pure function; this is the
// regression line for "what renders as what" across the entire chat
// surface.
import { describe, it, expect } from "vitest";
import { getAttachmentPreviewKind } from "../preview-kind";
describe("getAttachmentPreviewKind", () => {
describe("strict MIME match", () => {
const cases: Array<[string, ReturnType<typeof getAttachmentPreviewKind>]> = [
// images
["image/png", "image"],
["image/jpeg", "image"],
["image/gif", "image"],
["image/webp", "image"],
["image/svg+xml", "image"],
["image/avif", "image"],
["IMAGE/PNG", "image"], // case-insensitive
[" image/png ", "image"], // trim
// video
["video/mp4", "video"],
["video/webm", "video"],
["video/quicktime", "video"],
// audio
["audio/mpeg", "audio"],
["audio/wav", "audio"],
["audio/ogg", "audio"],
// pdf
["application/pdf", "pdf"],
// text family
["text/plain", "text"],
["text/markdown", "text"],
["text/html", "text"],
["text/css", "text"],
["text/javascript", "text"],
["text/csv", "text"],
["application/json", "text"],
["application/yaml", "text"],
["application/x-yaml", "text"],
["application/javascript", "text"],
["application/typescript", "text"],
// unknown / non-renderable → file
["application/zip", "file"],
["application/octet-stream", "file"],
["application/x-tar", "file"],
["application/vnd.ms-excel", "file"],
["weird/unknown-thing", "file"],
];
for (const [mime, expected] of cases) {
it(`mimeType=${JSON.stringify(mime)}${expected}`, () => {
expect(getAttachmentPreviewKind(mime)).toBe(expected);
});
}
});
describe("extension fallback when MIME is missing or generic", () => {
const cases: Array<[string | undefined, string | undefined, string | undefined, ReturnType<typeof getAttachmentPreviewKind>]> = [
// [mime, uri, name, expected]
[undefined, "workspace:/tmp/screenshot.png", "screenshot.png", "image"],
["", "workspace:/tmp/photo.JPG", "photo.JPG", "image"],
["application/octet-stream", "workspace:/tmp/clip.mp4", "clip.mp4", "video"],
[undefined, "workspace:/foo/song.mp3", "song.mp3", "audio"],
[undefined, "workspace:/docs/report.pdf", "report.pdf", "pdf"],
[undefined, "workspace:/code/main.py", "main.py", "text"],
[undefined, "workspace:/data/notes.md", "notes.md", "text"],
// No extension → file
[undefined, "workspace:/tmp/Dockerfile", "Dockerfile", "file"],
// Trailing dot → file
[undefined, "workspace:/tmp/weird.", "weird.", "file"],
// URL with query string + fragment → strip before parsing
[undefined, "https://example.com/foo.png?download=1#anchor", "", "image"],
// Unknown extension → file
[undefined, "workspace:/tmp/something.xyz", "something.xyz", "file"],
// Empty
[undefined, "", "", "file"],
[undefined, undefined, undefined, "file"],
];
for (const [mime, uri, name, expected] of cases) {
it(`mime=${mime ?? "<undef>"} uri=${uri} name=${name}${expected}`, () => {
expect(getAttachmentPreviewKind(mime, uri, name)).toBe(expected);
});
}
});
describe("MIME wins over extension", () => {
it("explicit mime=application/zip + extension=.png → file (don't render zip as image)", () => {
// Critical safety: agent might attach a .png-named file that's
// actually a zip. The strict-MIME branch wins and we render
// the chip, not an <img> that 404s on broken bytes.
expect(getAttachmentPreviewKind("application/zip", "x.png", "x.png")).toBe("file");
});
it("explicit mime=text/plain + extension=.png → text", () => {
expect(getAttachmentPreviewKind("text/plain", "log.png", "log.png")).toBe("text");
});
});
describe("regression: hostile-reviewer cases", () => {
it("does NOT misclassify image/svg+xml as text (svg is image even though it has XML)", () => {
expect(getAttachmentPreviewKind("image/svg+xml")).toBe("image");
});
it("application/octet-stream + extension=.docx → file (no renderer, don't try)", () => {
expect(getAttachmentPreviewKind("application/octet-stream", "f.docx", "f.docx")).toBe("file");
});
it("non-canonical MIME application/json works", () => {
expect(getAttachmentPreviewKind("application/json")).toBe("text");
});
});
});

View File

@ -114,15 +114,9 @@ function basename(uri: string): string {
return slash >= 0 ? cleaned.slice(slash + 1) : cleaned || "file";
}
/** Extract user message text from an activity log request_body.
*
* Delegation activities from delegation.go store the task text directly
* at `body.task` as a plain string: {"task": "...", "delegation_id": "..."}.
* Check this first before falling back to the A2A JSON-RPC format
* (`body.params.message.parts[].text`). */
/** Extract user message text from an activity log request_body */
export function extractRequestText(body: Record<string, unknown> | null): string {
if (!body) return "";
if (typeof body.task === "string" && body.task) return body.task;
const params = body.params as Record<string, unknown> | undefined;
const msg = params?.message as Record<string, unknown> | undefined;
const parts = msg?.parts as Array<Record<string, unknown>> | undefined;

View File

@ -1,154 +0,0 @@
// preview-kind.ts — single source of truth for "what renderer should
// this attachment use" (RFC #2991, PR-1).
//
// Per the RFC's Phase 2 design, MIME type is the dispatch axis. The
// wire shape (ChatAttachment.mimeType) already carries it end-to-end
// from the server's chat_files.go through agent_message_writer.go to
// the canvas hydrater — we just need to map it to a render kind.
//
// Why a separate file from AttachmentPreview.tsx: the kind helper is
// a pure function that's easier to unit-test in isolation than a
// React component, and unit tests across MIME families are the
// regression line for new types added later.
/** The render-kind taxonomy. Each kind has a dedicated component:
*
* image AttachmentImage (inline thumbnail + click lightbox)
* video AttachmentVideo (HTML5 <video controls>, native fullscreen)
* audio AttachmentAudio (HTML5 <audio controls>)
* pdf AttachmentPDF (browser-native <embed>, fullscreen modal)
* text AttachmentTextPreview (monospace, first N lines, expand)
* file AttachmentChip (existing fallback generic file pill)
*
* NB: `text` includes JSON, YAML, source code, plain text anything
* that renders sensibly as preformatted ASCII without a specialized
* viewer. PR-1 ships only `image` + `file`; PR-2 adds video/audio;
* PR-3 adds pdf + text. All routed through this same dispatch table
* so adding a new kind is a one-line registration. */
export type AttachmentPreviewKind = "image" | "video" | "audio" | "pdf" | "text" | "file";
/** Maps a MIME type to the render kind. Falls back to "file" for
* any MIME we don't have a renderer for (current behavior the
* attachment chip is the universal fallback).
*
* Filename-based fallback: when mimeType is missing or generic
* (application/octet-stream), inspect the URI's extension. The
* workspace-server's chat_files.go derives Content-Type from the
* file extension, but agent-emitted attachments may not always
* set mimeType, and the canvas should still preview a file named
* `screenshot.png` even if the wire shape lacks the MIME.
*
* Strict MIME match always wins; extension fallback only applies
* to empty / generic. Unknown extension "file". */
export function getAttachmentPreviewKind(
mimeType: string | undefined,
uri?: string,
name?: string,
): AttachmentPreviewKind {
const mime = (mimeType ?? "").toLowerCase().trim();
// Strict MIME match (preferred — set by server's Content-Type
// detection or by the agent's explicit mimeType field).
if (mime.startsWith("image/")) return "image";
if (mime.startsWith("video/")) return "video";
if (mime.startsWith("audio/")) return "audio";
if (mime === "application/pdf") return "pdf";
if (
mime.startsWith("text/") ||
mime === "application/json" ||
mime === "application/yaml" ||
mime === "application/x-yaml" ||
mime === "application/javascript" ||
mime === "application/typescript"
) {
return "text";
}
// Extension-based fallback — only when MIME is missing or
// application/octet-stream (the server's "I don't know" default).
// Skip when MIME is set to something specific we just don't have
// a renderer for (e.g. application/zip → file is correct).
const looksGeneric = mime === "" || mime === "application/octet-stream";
if (looksGeneric) {
const ext = extractExtension(uri, name);
if (ext) {
const kind = EXTENSION_KIND.get(ext);
if (kind) return kind;
}
}
return "file";
}
// Extension → kind table for the fallback branch. Keep this list
// short and curated — every entry is a UX commitment to render
// inline, and a wrong inference (e.g. .doc rendered as text) is
// worse than the generic file chip.
const EXTENSION_KIND: ReadonlyMap<string, AttachmentPreviewKind> = new Map([
// Images
["png", "image"],
["jpg", "image"],
["jpeg", "image"],
["gif", "image"],
["webp", "image"],
["svg", "image"],
["avif", "image"],
["bmp", "image"],
// Video
["mp4", "video"],
["webm", "video"],
["mov", "video"],
["mkv", "video"],
// Audio
["mp3", "audio"],
["wav", "audio"],
["ogg", "audio"],
["m4a", "audio"],
["flac", "audio"],
// PDF
["pdf", "pdf"],
// Text-ish (rendered as preformatted ASCII)
["txt", "text"],
["md", "text"],
["json", "text"],
["yaml", "text"],
["yml", "text"],
["js", "text"],
["ts", "text"],
["tsx", "text"],
["jsx", "text"],
["py", "text"],
["go", "text"],
["rs", "text"],
["java", "text"],
["c", "text"],
["cpp", "text"],
["h", "text"],
["hpp", "text"],
["sh", "text"],
["bash", "text"],
["html", "text"],
["css", "text"],
["sql", "text"],
["toml", "text"],
["ini", "text"],
["xml", "text"],
["csv", "text"],
["log", "text"],
]);
/** Extracts the lowercased extension from a uri or name, without
* the leading dot. Returns "" when no extension is present. */
function extractExtension(uri: string | undefined, name: string | undefined): string {
// Prefer name (always a leaf path); fall back to uri's last
// segment. Strip query string + fragment so a URI like
// "https://example.com/foo.png?download=1" still parses as png.
const candidate = name || uri || "";
if (!candidate) return "";
let leaf = candidate.split(/[\\/]/).pop() || "";
// Drop ?query and #fragment.
leaf = leaf.split(/[?#]/)[0];
const dot = leaf.lastIndexOf(".");
if (dot < 0 || dot === leaf.length - 1) return "";
return leaf.slice(dot + 1).toLowerCase();
}

View File

@ -1,16 +1,12 @@
import { PLATFORM_URL, platformAuthHeaders } from "@/lib/api";
import { PLATFORM_URL } from "@/lib/api";
import { getTenantSlug } from "@/lib/tenant";
import type { ChatAttachment } from "./types";
/** Chat attachments are intentionally uploaded via a direct fetch()
* instead of the `api.post` helper `api.post` JSON-stringifies the
* body, which would 500 on a Blob. Auth headers (tenant slug, admin
* token, credentials) come from `platformAuthHeaders()` the same
* helper `request()` uses, so a missing bearer surfaces as a single
* fix site instead of N copies. We deliberately do NOT set
* Content-Type so the browser writes the multipart boundary into the
* header; setting it manually would yield a multipart body the server
* can't parse. See lib/api.ts platformAuthHeaders() for the full
* rationale on why this pair must stay matched. */
* body, which would 500 on a Blob. Mirrors the header plumbing
* (tenant slug, admin token, credentials) so SaaS + self-hosted
* callers work the same way. */
export async function uploadChatFiles(
workspaceId: string,
files: File[],
@ -20,12 +16,18 @@ export async function uploadChatFiles(
const form = new FormData();
for (const f of files) form.append("files", f, f.name);
const headers: Record<string, string> = {};
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
// Uploads legitimately take a while on cold cache (tar write +
// docker cp into the container). 60s is comfortable for the 25MB/
// 50MB caps the server enforces.
const res = await fetch(`${PLATFORM_URL}/workspaces/${workspaceId}/chat/uploads`, {
method: "POST",
headers: platformAuthHeaders(),
headers,
body: form,
credentials: "include",
signal: AbortSignal.timeout(60_000),
@ -141,8 +143,14 @@ export async function downloadChatFile(
return;
}
const headers: Record<string, string> = {};
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const res = await fetch(href, {
headers: platformAuthHeaders(),
headers,
credentials: "include",
signal: AbortSignal.timeout(60_000),
});

View File

@ -1,130 +0,0 @@
// @vitest-environment node
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
// Tests for the boot-time matched-pair guard added to next.config.ts.
//
// Why this lives in src/lib/__tests__ even though the function is in
// canvas/next.config.ts:
// - next.config.ts runs as ESM-but-also-CJS depending on which
// consumer loads it (Next.js dev server vs Next.js build); we
// want the test to be a plain ESM module Vitest already handles.
// - Importing from "../../../next.config" pulls in the rest of the
// file (loadMonorepoEnv, the default export, etc.) which has
// side effects on module load (it runs loadMonorepoEnv()
// immediately). To keep the test hermetic we don't import — we
// duplicate the function under test.
//
// Sourcing the function from a shared module would be cleaner, but
// next.config.ts is required to be a single self-contained file by
// Next.js's loader on some host configurations. Pin invariant: the
// duplicated function below MUST stay byte-identical to the one in
// next.config.ts. If you change one, change the other and bump this
// comment.
function checkAdminTokenPair(): void {
const serverSet = !!process.env.ADMIN_TOKEN;
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (serverSet === clientSet) return;
if (serverSet && !clientSet) {
// eslint-disable-next-line no-console
console.error(
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
"canvas will 401 against workspace-server because the bearer header " +
"is never attached. Set both to the same value, or unset both.",
);
} else {
// eslint-disable-next-line no-console
console.error(
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
"workspace-server will reject the bearer because no AdminAuth gate " +
"is configured. Set both to the same value, or unset both.",
);
}
}
describe("checkAdminTokenPair", () => {
// Snapshot env so individual tests can stomp on it without leaking.
// Rebuild from snapshot in afterEach so the next test sees a known
// baseline regardless of mutation pattern.
let originalEnv: Record<string, string | undefined>;
let errorSpy: ReturnType<typeof vi.spyOn>;
beforeEach(() => {
originalEnv = {
ADMIN_TOKEN: process.env.ADMIN_TOKEN,
NEXT_PUBLIC_ADMIN_TOKEN: process.env.NEXT_PUBLIC_ADMIN_TOKEN,
};
delete process.env.ADMIN_TOKEN;
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
errorSpy = vi.spyOn(console, "error").mockImplementation(() => {});
});
afterEach(() => {
if (originalEnv.ADMIN_TOKEN === undefined) delete process.env.ADMIN_TOKEN;
else process.env.ADMIN_TOKEN = originalEnv.ADMIN_TOKEN;
if (originalEnv.NEXT_PUBLIC_ADMIN_TOKEN === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalEnv.NEXT_PUBLIC_ADMIN_TOKEN;
errorSpy.mockRestore();
});
it("emits no warning when both are unset", () => {
checkAdminTokenPair();
expect(errorSpy).not.toHaveBeenCalled();
});
it("emits no warning when both are set (matched pair, the happy path)", () => {
process.env.ADMIN_TOKEN = "local-dev-admin";
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
checkAdminTokenPair();
expect(errorSpy).not.toHaveBeenCalled();
});
it("warns when ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not", () => {
process.env.ADMIN_TOKEN = "local-dev-admin";
checkAdminTokenPair();
expect(errorSpy).toHaveBeenCalledTimes(1);
// Exact-string assertion — substring would also pass when the
// function's branch logic is broken (e.g. emits both messages, or
// emits the wrong one). Pin the exact message that operators will
// see in their dev console so regressions are visible.
expect(errorSpy).toHaveBeenCalledWith(
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
"canvas will 401 against workspace-server because the bearer header " +
"is never attached. Set both to the same value, or unset both.",
);
});
it("warns when NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not", () => {
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
checkAdminTokenPair();
expect(errorSpy).toHaveBeenCalledTimes(1);
expect(errorSpy).toHaveBeenCalledWith(
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
"workspace-server will reject the bearer because no AdminAuth gate " +
"is configured. Set both to the same value, or unset both.",
);
});
// Empty string in process.env is the JS-side representation of `KEY=`
// (no value) in a .env file. Treating "" as unset makes the pair
// invariant symmetric: `KEY=` and `unset KEY` produce the same
// verdict. Without this branch, an operator who comments out the
// value but leaves the line would get a false-positive warning.
it("treats empty string as unset (so KEY= and unset KEY are equivalent)", () => {
process.env.ADMIN_TOKEN = "";
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
checkAdminTokenPair();
expect(errorSpy).not.toHaveBeenCalled();
});
it("warns when ADMIN_TOKEN is set and NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
process.env.ADMIN_TOKEN = "local-dev-admin";
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
checkAdminTokenPair();
expect(errorSpy).toHaveBeenCalledTimes(1);
// First branch — server set, client unset.
expect(errorSpy).toHaveBeenCalledWith(
expect.stringContaining("ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not"),
);
});
});

View File

@ -1,97 +0,0 @@
// @vitest-environment jsdom
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
// Tests for platformAuthHeaders — the shared helper extracted in #178
// to consolidate the bearer-token-attach + tenant-slug-attach pattern
// that was previously duplicated across 7 raw-fetch callsites in the
// canvas (uploads + 5 Attachment* components + the api.ts request()
// function).
//
// What we pin here:
// - Returns a fresh object each call (so callers can mutate without
// leaking into each other).
// - Empty result on a non-tenant host with no admin token (the
// localhost / self-hosted shape).
// - Bearer attached when NEXT_PUBLIC_ADMIN_TOKEN is set.
// - X-Molecule-Org-Slug attached when window.location.hostname is a
// tenant subdomain (<slug>.moleculesai.app).
// - Both attached when both apply (the production SaaS shape).
//
// Why jsdom: getTenantSlug() reads window.location.hostname. Node-only
// environment yields no window and getTenantSlug returns null
// unconditionally — wouldn't exercise the slug branch.
import { platformAuthHeaders } from "../api";
describe("platformAuthHeaders", () => {
let originalAdminToken: string | undefined;
beforeEach(() => {
originalAdminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
});
afterEach(() => {
if (originalAdminToken === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalAdminToken;
// jsdom resets hostname between tests via the @vitest-environment
// pragma's per-test isolation. No explicit reset needed.
});
it("returns an empty object on a non-tenant host with no admin token", () => {
// jsdom default hostname is "localhost" — not a tenant slug, so
// getTenantSlug() returns null and no X-Molecule-Org-Slug is added.
const headers = platformAuthHeaders();
expect(headers).toEqual({});
});
it("attaches Authorization when NEXT_PUBLIC_ADMIN_TOKEN is set", () => {
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
const headers = platformAuthHeaders();
expect(headers).toEqual({ Authorization: "Bearer local-dev-admin" });
});
it("does NOT attach Authorization when NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
// Empty-string env is the JS-side shape of `KEY=` in .env.
// Treating it as unset matches the matched-pair guard in
// next.config.ts (admin-token-pair.test.ts) — symmetric semantics.
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
const headers = platformAuthHeaders();
expect(headers).toEqual({});
});
it("attaches X-Molecule-Org-Slug on a tenant subdomain", () => {
Object.defineProperty(window, "location", {
value: { hostname: "reno-stars.moleculesai.app" },
writable: true,
});
const headers = platformAuthHeaders();
expect(headers).toEqual({ "X-Molecule-Org-Slug": "reno-stars" });
});
it("attaches both when both apply (production SaaS shape)", () => {
Object.defineProperty(window, "location", {
value: { hostname: "reno-stars.moleculesai.app" },
writable: true,
});
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tenant-bearer";
const headers = platformAuthHeaders();
// Pin exact-equality on the full shape — substring/contains
// assertions would also pass for an extra-header bug.
expect(headers).toEqual({
"X-Molecule-Org-Slug": "reno-stars",
Authorization: "Bearer tenant-bearer",
});
});
it("returns a fresh object each call (callers can mutate safely)", () => {
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tok";
const a = platformAuthHeaders();
const b = platformAuthHeaders();
expect(a).not.toBe(b); // distinct refs
expect(a).toEqual(b); // same content
a["Content-Type"] = "application/json";
// Mutation on `a` does not leak into `b`.
expect(b["Content-Type"]).toBeUndefined();
});
});

View File

@ -21,45 +21,6 @@ export interface RequestOptions {
timeoutMs?: number;
}
/**
* Build the platform auth header set used by every authenticated fetch
* from the canvas. Returns a fresh object so callers can mutate (e.g.
* append `Content-Type` for JSON requests, omit it for FormData).
*
* SaaS cross-origin shape:
* - `X-Molecule-Org-Slug` derived from `window.location.hostname`
* by `getTenantSlug()`. Control plane uses it for fly-replay
* routing. Empty on localhost / non-tenant hosts safe to omit.
* - `Authorization: Bearer <token>` `NEXT_PUBLIC_ADMIN_TOKEN` baked
* into the canvas build (see canvas/Dockerfile L8/L11). Required by
* the workspace-server when `ADMIN_TOKEN` is set on the server side
* (Tier-2b AdminAuth gate, wsauth_middleware.go ~L245). Empty when
* no admin token was provisioned the Tier-1 session-cookie path
* handles that case via `credentials:"include"`.
*
* Why a shared helper: the two-line "read env, attach bearer; read
* slug, attach header" pattern was duplicated across `request()` and
* 7 raw-fetch callsites (chat uploads/download + 5 Attachment*
* components) before this consolidation. A new poller or raw fetch
* that forgets one of the two headers silently 401s against
* workspace-server when ADMIN_TOKEN is set the exact bug shape
* called out in #178 / closes the post-#176 self-review gap.
*
* Callers that want JSON Content-Type should spread this and add it
* themselves; FormData callers should NOT add Content-Type (the
* browser sets the multipart boundary). Centralizing the auth pair
* but leaving Content-Type up to the caller is the minimum viable
* shared shape.
*/
export function platformAuthHeaders(): Record<string, string> {
const headers: Record<string, string> = {};
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
return headers;
}
async function request<T>(
method: string,
path: string,
@ -67,16 +28,17 @@ async function request<T>(
retryCount = 0,
options?: RequestOptions,
): Promise<T> {
// JSON-bodied request — Content-Type is JSON. Auth pair comes from
// the shared helper; see its doc comment for the SaaS-shape rationale.
const headers: Record<string, string> = {
"Content-Type": "application/json",
...platformAuthHeaders(),
};
// Re-read slug locally for the 401 handler below — `headers` already
// has it, but the 401 branch needs the bare value to gate the
// session-probe + redirect logic on tenant context.
// SaaS cross-origin shape:
// - X-Molecule-Org-Slug: derived from window.location.hostname by
// getTenantSlug(). Control plane uses it for fly-replay routing.
// Empty on localhost / non-tenant hosts — safe to omit.
// - credentials:"include": sends the session cookie cross-origin.
// Cookie's Domain=.moleculesai.app attribute + cp's CORS allow this.
const headers: Record<string, string> = { "Content-Type": "application/json" };
const slug = getTenantSlug();
if (slug) headers["X-Molecule-Org-Slug"] = slug;
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
const res = await fetch(`${PLATFORM_URL}${path}`, {
method,

View File

@ -7,48 +7,6 @@ export default defineConfig({
test: {
environment: 'node',
exclude: ['e2e/**', 'node_modules/**', '**/dist/**'],
// Issue #22 / vitest pool investigation:
//
// The forks pool spawns one Node.js worker per concurrent slot.
// Each jsdom-environment worker bootstraps a full DOM (~30-50 MB resident
// set) at cold-start. With the default maxWorkers derived from CPU
// count, multiple jsdom workers can start simultaneously, exhausting
// memory on the 2-CPU Gitea Actions runner and causing pool workers to
// fail to respond with "[vitest-pool]: Timeout starting … runner."
//
// Fix: cap maxWorkers at 1 so only one worker is alive at any time.
// Tests still run in parallel within that single worker's process (via
// node's EventLoop) — this is the same parallelism as the `threads`
// pool but without the per-worker jsdom cold-start overhead. 51 test
// files that previously took 5070 s with 5 failures now run
// sequentially through one worker, eliminating the memory spike.
maxWorkers: 1,
// CI-conditional test timeout (issue #96).
//
// Vitest's 5000ms default is too tight for the first test in any
// file under our CI shape: `npx vitest run --coverage` on the
// self-hosted Gitea Actions Docker runner. The cold-start cost
// (v8 coverage instrumentation init + JSDOM bootstrap + module-
// graph import for @/components/* and @/lib/* + first React
// render) consistently consumes 5-7 seconds for the first
// synchronous test in heavyweight component files
// (ActivityTab.test.tsx, CreateWorkspaceDialog.test.tsx,
// ConfigTab.provider.test.tsx) — even though every subsequent
// test in the same file completes in 100-1500ms.
//
// Empirically the worst observed first-test was 6453ms in a
// single file (CreateWorkspaceDialog). 30000ms gives ~5x
// headroom over that on CI; we still keep 5000ms locally so
// genuine waitFor races / hung promises stay sensitive in dev.
//
// Same vitest pattern documented at:
// https://vitest.dev/config/testtimeout
// https://vitest.dev/guide/coverage#profiling-test-performance
//
// Per-test duration is still emitted to the CI log; if a test
// ever silently approaches 25-30s under this raised ceiling that
// will surface as a duration regression and we revisit.
testTimeout: process.env.CI ? 30000 : 5000,
// Coverage is instrumented but NOT yet a CI gate — first land
// observability so we can see the baseline, then dial in
// thresholds + a hard gate in a follow-up PR (#1815). Today's

View File

@ -1,43 +0,0 @@
# docker-compose.dev.yml — overlay over docker-compose.yml for local dev
# with air-driven live reload of the platform (workspace-server) service.
#
# Usage:
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up
# (or `make dev` shorthand from repo root)
#
# What this overlay changes vs docker-compose.yml alone:
# - Platform service uses workspace-server/Dockerfile.dev (air on top of
# golang:1.25-alpine) instead of the multi-stage prod Dockerfile.
# - Platform service bind-mounts the host's workspace-server/ source
# into /app/workspace-server so air sees source edits live.
# - Other services (postgres, redis, langfuse, etc.) inherit unchanged
# from docker-compose.yml.
#
# What stays the same:
# - All env vars, volumes, depends_on, healthchecks from docker-compose.yml.
# - Network topology + ports.
# - Postgres/Redis as service containers (no in-process replacements).
services:
platform:
build:
context: .
dockerfile: workspace-server/Dockerfile.dev
# Rebind source: edits under host's workspace-server/ propagate live.
# The named volume on go-build-cache speeds up first build per container.
volumes:
- ./workspace-server:/app/workspace-server
- go-build-cache:/root/.cache/go-build
- go-mod-cache:/go/pkg/mod
# Air signals the running binary on rebuild; ensure shell stops cleanly.
init: true
# Mark the service as dev-mode so the platform can short-circuit any
# behavior that's incompatible with hot-reload (e.g. background
# cron-style watchers that don't survive process restart). No-op
# today; reserved for future flag use.
environment:
MOLECULE_DEV_HOT_RELOAD: "1"
volumes:
go-build-cache:
go-mod-cache:

View File

@ -119,7 +119,7 @@ services:
networks:
default:
name: molecule-core-net
name: molecule-monorepo-net
external: true
volumes:

View File

@ -1,7 +1,3 @@
# Include infra services (Temporal, Langfuse) so `docker compose up` starts the full stack.
include:
- docker-compose.infra.yml
services:
# --- Infrastructure ---
postgres:
@ -16,8 +12,7 @@ services:
volumes:
- pgdata:/var/lib/postgresql/data
networks:
- molecule-core-net
restart: unless-stopped
- molecule-monorepo-net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-dev}"]
interval: 2s
@ -44,7 +39,7 @@ services:
psql -h postgres -U "$${POSTGRES_USER}" -d postgres -c "CREATE DATABASE langfuse"
fi
networks:
- molecule-core-net
- molecule-monorepo-net
redis:
image: redis:7-alpine
@ -54,8 +49,7 @@ services:
volumes:
- redisdata:/data
networks:
- molecule-core-net
restart: unless-stopped
- molecule-monorepo-net
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 2s
@ -72,7 +66,7 @@ services:
volumes:
- clickhousedata:/var/lib/clickhouse
networks:
- molecule-core-net
- molecule-monorepo-net
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:8123/ping || exit 1"]
interval: 5s
@ -101,7 +95,7 @@ services:
ports:
- "3001:3000"
networks:
- molecule-core-net
- molecule-monorepo-net
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/public/health || exit 1"]
interval: 10s
@ -132,10 +126,6 @@ services:
REDIS_URL: redis://redis:6379
PORT: "${PLATFORM_PORT:-8080}"
PLATFORM_URL: "http://platform:${PLATFORM_PORT:-8080}"
# Container network namespace is already isolated; "all interfaces"
# inside the container = the bridge interface only. The fail-open
# default (127.0.0.1) would block host-to-container access.
BIND_ADDR: "${BIND_ADDR:-0.0.0.0}"
# Default MOLECULE_ENV=development so the WorkspaceAuth / AdminAuth
# middleware fail-open path activates when ADMIN_TOKEN is unset —
# otherwise the canvas (which runs without a bearer in pure local
@ -205,28 +195,12 @@ services:
# App private key — read-only bind-mount. The host-side path is
# gitignored per .gitignore rules (/.secrets/ + *.pem).
- ./.secrets/github-app.pem:/secrets/github-app.pem:ro
# Per-role persona credentials (molecule-core#242 local surface).
# Sourced at workspace creation time by org_import.go::loadPersonaEnvFile
# when a workspace.yaml carries `role: <name>`. The host-side dir is
# populated by the operator-host bootstrap kit (28 dev-tree personas);
# /etc/molecule-bootstrap/personas is the in-container path the
# platform expects (matches the prod tenant-EC2 path so the same code
# works in both modes).
#
# Read-only mount — workspace-server only reads, never writes here.
# If the host dir is empty/missing the platform's loadPersonaEnvFile
# silently no-ops per its existing semantics, so this mount is safe
# even on a fresh machine that hasn't run the bootstrap kit yet.
- ${MOLECULE_PERSONA_ROOT_HOST:-${HOME}/.molecule-ai/personas}:/etc/molecule-bootstrap/personas:ro
ports:
- "${PLATFORM_PUBLISH_PORT:-8080}:${PLATFORM_PORT:-8080}"
networks:
- molecule-core-net
restart: unless-stopped
- molecule-monorepo-net
healthcheck:
# Plain GET — `--spider` would issue HEAD, which returns 404 because
# /health is registered as GET only.
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
interval: 5s
timeout: 5s
retries: 10
@ -238,8 +212,8 @@ services:
# docker compose pull canvas && docker compose up -d canvas
# First-time local setup or testing unreleased changes — build from source:
# docker compose build canvas && docker compose up -d canvas
# Note: ECR images require AWS auth — `aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 153263036946.dkr.ecr.us-east-2.amazonaws.com` before pull.
image: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/canvas:latest
# Note: GHCR images are private — `docker login ghcr.io` required before pull.
image: ghcr.io/molecule-ai/canvas:latest
build:
context: ./canvas
dockerfile: Dockerfile
@ -262,9 +236,9 @@ services:
ports:
- "${CANVAS_PUBLISH_PORT:-3000}:${CANVAS_PORT:-3000}"
networks:
- molecule-core-net
- molecule-monorepo-net
healthcheck:
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
interval: 10s
timeout: 5s
retries: 10
@ -295,7 +269,7 @@ services:
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY:-sk-molecule}
networks:
- molecule-core-net
- molecule-monorepo-net
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1"]
@ -320,7 +294,7 @@ services:
volumes:
- ollamadata:/root/.ollama
networks:
- molecule-core-net
- molecule-monorepo-net
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "ollama list || exit 1"]
@ -330,8 +304,8 @@ services:
start_period: 20s
networks:
molecule-core-net:
name: molecule-core-net
molecule-monorepo-net:
name: molecule-monorepo-net
volumes:
pgdata:

View File

@ -1,74 +0,0 @@
# ADR-002: Local-build mode signalled by `MOLECULE_IMAGE_REGISTRY` presence
* Status: Accepted (2026-05-07)
* Issue: #63 (closes Task #194)
* Decision: Hongming (CTO) + Claude Opus 4.7 (implementation)
## Context
Pre-2026-05-06, every Molecule deployment — both production tenants and OSS contributor laptops — pulled workspace-template-* container images from `ghcr.io/molecule-ai/`. Production tenants additionally set `MOLECULE_IMAGE_REGISTRY` to an AWS ECR mirror via Railway env / EC2 user-data, but the OSS default was the upstream GHCR org.
On 2026-05-06 the `Molecule-AI` GitHub org was suspended (saved memory: `feedback_github_botring_fingerprint`). GHCR now returns **403 Forbidden** for every `molecule-ai/workspace-template-*` manifest. OSS contributors who clone `molecule-core` and run `go run ./workspace-server/cmd/server` cannot provision a workspace — every first provision fails with:
```
docker image "ghcr.io/molecule-ai/workspace-template-claude-code:latest" not found after pull attempt
```
Production tenants are unaffected (their `MOLECULE_IMAGE_REGISTRY` points at ECR, which we still control), but OSS onboarding is broken. Workspace template repos are intentionally separate from `molecule-core` (each runtime is OSS-shape and forkable), and they are mirrored to Gitea (`https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-<runtime>`) — but the provisioner has no path that consumes Gitea source directly.
## Decision
When `MOLECULE_IMAGE_REGISTRY` is **unset** (or empty), the provisioner switches to a **local-build mode** that:
1. Looks up the workspace-template repo's HEAD sha on Gitea via a single API call.
2. Checks whether a SHA-pinned local image (`molecule-local/workspace-template-<runtime>:<sha12>`) already exists; if so, reuses it.
3. Otherwise shallow-clones the repo into `~/.cache/molecule/workspace-template-build/<runtime>/<sha12>/` and runs `docker build --platform=linux/amd64 -t <tag> .`.
4. Hands the SHA-pinned tag to Docker for ContainerCreate, bypassing the registry-pull path entirely.
When `MOLECULE_IMAGE_REGISTRY` is **set**, behavior is unchanged: pull the image from that registry. Existing prod tenants and self-hosters who mirror to a private registry are not affected.
## Consequences
### Positive
* **Zero-config OSS onboarding**`git clone molecule-core && go run ./workspace-server/cmd/server` boots end-to-end without any registry credentials.
* **Production tenants protected** — same env var, same semantics in SaaS-mode. Migration is a no-op.
* **No new env var** — extending an existing var's semantics ("where to pull, OR build locally if absent") rather than introducing `MOLECULE_LOCAL_BUILD=1` keeps the surface small.
* **SHA-pinned cache** — repeat builds are O(API-call); only template-repo HEAD changes invalidate.
* **Production-parity image** — amd64 emulation on Apple Silicon honours `feedback_local_must_mimic_production`. The provisioner's existing `defaultImagePlatform()` already forces amd64 for parity; building amd64 locally lets that decision stay consistent.
### Negative
* **Conflates two concerns**`MOLECULE_IMAGE_REGISTRY` now signals BOTH "where to pull" AND "build locally if absent." A future operator who unsets it expecting a hard error will instead get a slow first-provision. Documented in the runbook.
* **First-provision is slow on Apple Silicon** — 510 min via QEMU emulation on the cold path. Mitigated by SHA-cache (subsequent runs are <1s lookup + 0s build).
* **Coverage gap** — only 4 of 9 runtimes are mirrored to Gitea today (`claude-code`, `hermes`, `langgraph`, `autogen`). The other 5 fail with an actionable "not mirrored" error. Mirroring those repos is a separate task.
* **Implicit trust boundary** — operator running `go run` implicitly trusts `molecule-ai/molecule-ai-workspace-template-*` repos on Gitea. This is the same trust they would extend to the GHCR images today; not a new attack surface.
## Alternatives considered
1. **New env var `MOLECULE_LOCAL_BUILD=1`** — explicit, but requires OSS contributors to know it exists. Violates the zero-config goal.
2. **Push pre-built images to a Gitea container registry, mirror tag from upstream** — operationally cleaner but: (a) Gitea's container-registry add-on isn't deployed on the operator host, (b) defeats the OSS-contributor goal of "hack on the source, see your changes," since they'd still pull a stale image.
3. **Embed Dockerfiles in molecule-core itself, drop the standalone template repos** — would work but breaks the OSS-shape principle; templates are intentionally separable, anyone-can-fork artifacts.
4. **Build native arch on Apple Silicon (arm64) and drop the platform pin in local-mode** — fast, but creates `linux/arm64` images that diverge from the amd64-only prod runtime. Local-vs-prod debug behavior would diverge. Rejected per `feedback_local_must_mimic_production`.
## Security review
* **Gitea repo URL allowlist** — runtime name must be in the `knownRuntimes` allowlist (defence-in-depth against a future code path that lets cfg.Runtime carry untrusted input). Repo prefix is hardcoded to `https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-`; forks can override via `MOLECULE_LOCAL_TEMPLATE_REPO_PREFIX` (opt-in, default off).
* **Token handling** — clones are anonymous over HTTPS by default (templates are public). `MOLECULE_GITEA_TOKEN`, if set, is passed via URL userinfo for the clone and as `Authorization: token` for the API call. The token is **masked in every log line** via `maskTokenInURL` / `maskTokenInString` and never appears in the cache dir path.
* **No silent fallback** — if Gitea is unreachable or the runtime isn't mirrored, we return a clear error mentioning the repo URL and the missing runtime. We **never** fall back to GHCR/ECR (that would be a confusing bug for an OSS contributor who happened to have stale ECR creds in their docker config).
* **Build-arg injection**`docker build` is invoked with NO `--build-arg` from external input. Dockerfile is consumed as-is.
* **Cache poisoning** — cache key is the Gitea HEAD sha + Dockerfile content; a force-push to the template repo's main branch regenerates the key on next run. Cache dir is per-user (`$HOME/.cache`), so cross-user attacks aren't relevant in single-user dev mode.
## Versioning + back-compat
* Existing prod tenants set `MOLECULE_IMAGE_REGISTRY=<ECR url>` → unchanged behavior.
* Existing local installs that set the var → unchanged behavior.
* Existing local installs that don't set it → switch to local-build path. Migration: none required (additive); first provision will take 510 min instead of failing.
* No deprecations.
## References
* Issue #63 — feat(workspace-server): local-dev provisioner builds from Gitea source
* Saved memory `feedback_local_must_mimic_production` — local docker must mimic prod, no bypasses
* Saved memory `reference_post_suspension_pipeline` — full post-2026-05-06 stack shape
* Saved memory `feedback_github_botring_fingerprint` — what got the org suspended

View File

@ -67,7 +67,7 @@ On-demand fits naturally with how agents work — an agent only needs to know ab
This is acceptable for MVP because:
- All workspaces are provisioned by the same platform on trusted infrastructure
- Docker network isolation (`molecule-core-net`) limits who can reach workspace endpoints
- Docker network isolation (`molecule-monorepo-net`) limits who can reach workspace endpoints
- The tool is self-hosted — the operator controls the network
**Known gap:** Once workspace A caches workspace B's URL, nothing stops A from calling B directly even after the hierarchy changes and A is no longer supposed to reach B. The cached URL remains valid until the container is restarted or the URL changes.

View File

@ -2,7 +2,7 @@
**Status:** living document — update when you ship a feature that touches one backend.
**Owner:** workspace-server + controlplane teams.
**Last audit:** 2026-05-07 (plugin install/uninstall closed for EC2 backend via EIC SSH push to the bind-mounted `/configs/plugins/<name>/`, mirroring the Files API PR #1702 pattern).
**Last audit:** 2026-05-05 (Claude agent — `provisionWorkspaceAuto` / `StopWorkspaceAuto` / `HasProvisioner` SoT pattern landed in PRs #2811 + #2824).
## Why this exists
@ -54,7 +54,7 @@ For "do we have any backend?", use `HasProvisioner()`, never bare `h.provisioner
| **Files API** | | | | |
| List / Read / Write / Replace / Delete | `container_files.go`, `template_import.go` | `docker exec` + tar `CopyToContainer` | SSH via EIC tunnel (PR #1702) | ✅ parity as of 2026-04-22 (previously docker-only) |
| **Plugins** | | | | |
| Install / uninstall / list | `plugins_install.go` + `plugins_install_eic.go` | `deliverToContainer()` → exec+`CopyToContainer` on local container | `instance_id` set → EIC SSH push of the staged tarball into the EC2's bind-mounted `/configs/plugins/<name>/` (per `workspaceFilePathPrefix`), `chown 1000:1000`, restart | ✅ parity |
| Install / uninstall / list | `plugins_install.go` | `deliverToContainer()` + volume rm | **gap — no live plugin delivery** | 🔴 **docker-only** |
| **Terminal (WebSocket)** | | | | |
| Dispatch | `terminal.go:90-105` | `instance_id=""``handleLocalConnect``docker attach` | `instance_id` set → `handleRemoteConnect` → EIC SSH + `docker exec` | ✅ parity (different implementations, same UX) |
| **A2A proxy** | | | | |

View File

@ -4,7 +4,7 @@ How a workspace-server code change reaches the prod tenant fleet — and how to
> **⚠️ State note (2026-04-22):** this doc describes the **intended design**. As of this write, the canary fleet described below is **not actually running** — no canary tenants are provisioned, `CANARY_TENANT_URLS` / `CANARY_ADMIN_TOKENS` / `CANARY_CP_SHARED_SECRET` are empty in repo secrets, and `canary-verify.yml` fails every run.
>
> Current merges gate on manual `promote-latest.yml` dispatches, not canary. See [molecule-controlplane/docs/canary-tenants.md](https://git.moleculesai.app/molecule-ai/molecule-controlplane/src/branch/main/docs/canary-tenants.md) for the Phase 1 code work that's already shipped + the Phase 2 plan for actually standing up the fleet + a "should we even do this now?" decision framework.
> Current merges gate on manual `promote-latest.yml` dispatches, not canary. See [molecule-controlplane/docs/canary-tenants.md](https://github.com/Molecule-AI/molecule-controlplane/blob/main/docs/canary-tenants.md) for the Phase 1 code work that's already shipped + the Phase 2 plan for actually standing up the fleet + a "should we even do this now?" decision framework.
>
> **Account-specific identifiers (AWS account ID, IAM role name) referenced below in the original design have been redacted from this public doc.** The actual values — if they exist — are in `Molecule-AI/internal/runbooks/canary-fleet.md`. If you're implementing Phase 2, start there.
>

View File

@ -1,7 +1,7 @@
# Molecule AI — Comprehensive Technical Documentation
> Definitive technical reference for the Molecule AI Agent Team platform.
> Based on a full non-invasive scan of the [molecule-monorepo](https://git.moleculesai.app/molecule-ai/molecule-monorepo) repository.
> Based on a full non-invasive scan of the [molecule-monorepo](https://github.com/Molecule-AI/molecule-monorepo) repository.
---
@ -124,7 +124,7 @@ Six runtime adapters ship production-ready on `main`: LangGraph, DeepAgents, Cla
| Platform ↔ Redis | TCP | Ephemeral state (liveness TTL), caching, pub/sub |
| Workspace ↔ Workspace | HTTP (A2A JSON-RPC 2.0) | Direct peer-to-peer, **platform not in data path** |
| Workspace → Langfuse | HTTP | Automatic OpenTelemetry tracing |
| Docker Network | `molecule-core-net` | Internal-only by default, no exposed DB/Redis ports |
| Docker Network | `molecule-monorepo-net` | Internal-only by default, no exposed DB/Redis ports |
### Core Components
@ -465,7 +465,7 @@ Unknown tier values default to T2 for safety. Applied via `provisioner.ApplyTier
### Docker Networking
- All containers join `molecule-core-net` private network
- All containers join `molecule-monorepo-net` private network
- Container naming: `ws-{workspace_id[:12]}`
- Ephemeral host port binding: `127.0.0.1:0→8000/tcp`
@ -1149,11 +1149,11 @@ Molecule AI's workspace abstraction is **runtime-agnostic by design**. A workspa
## Links
- **GitHub**: https://git.moleculesai.app/molecule-ai/molecule-monorepo
- **Architecture Docs**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/architecture
- **API Protocol**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/api-protocol
- **Agent Runtime**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/agent-runtime
- **Product Docs**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/product
- **GitHub**: https://github.com/Molecule-AI/molecule-monorepo
- **Architecture Docs**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/architecture
- **API Protocol**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/api-protocol
- **Agent Runtime**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/agent-runtime
- **Product Docs**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/product
---

View File

@ -19,7 +19,7 @@ The provisioner is the platform component that deploys workspace containers and
## Docker Networking (Tier 1-3, Tier 4 uses host)
All workspace containers join the `molecule-core-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
All workspace containers join the `molecule-monorepo-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
- `provisioner.ContainerName(workspaceID)``ws-{id[:12]}`
- `provisioner.InternalURL(workspaceID)``http://ws-{id[:12]}:8000`
@ -38,7 +38,7 @@ This URL is pre-stored in both Postgres and Redis before the agent registers. Wh
**Why not use Docker-internal URLs?** In local dev, the platform runs on the host (not in Docker), so it cannot resolve Docker container hostnames. The ephemeral port mapping lets the A2A proxy reach agents via localhost. In production (platform in Docker), the Docker-internal URL (`http://ws-{id}:8000`) would work directly.
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-core-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-monorepo-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
For external HTTPS access (multi-host mode), Nginx on the host handles TLS termination and proxies to the container.

View File

@ -79,7 +79,7 @@ For SOC2 / ISO 27001 / customer security questionnaires:
## Pointers
- KMS envelope code: [`molecule-controlplane/internal/crypto/kms.go`](https://git.moleculesai.app/molecule-ai/molecule-controlplane/src/branch/main/internal/crypto/kms.go)
- Static-key fallback: [`molecule-controlplane/internal/crypto/aes.go`](https://git.moleculesai.app/molecule-ai/molecule-controlplane/src/branch/main/internal/crypto/aes.go)
- KMS envelope code: [`molecule-controlplane/internal/crypto/kms.go`](https://github.com/Molecule-AI/molecule-controlplane/blob/main/internal/crypto/kms.go)
- Static-key fallback: [`molecule-controlplane/internal/crypto/aes.go`](https://github.com/Molecule-AI/molecule-controlplane/blob/main/internal/crypto/aes.go)
- Tenant secrets handler: [`workspace-server/internal/crypto/aes.go`](../../workspace-server/internal/crypto/aes.go)
- Tenant secrets schema: [database-schema.md](./database-schema.md#workspace_secrets)

View File

@ -1,28 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64">
<style>
.bg { fill: #0a1120; }
.accent { fill: #7fe8d6; }
.accent-stroke { stroke: #7fe8d6; }
@media (prefers-color-scheme: light) {
.bg { fill: #f5f7fa; }
.accent { fill: #1a8a72; }
.accent-stroke { stroke: #1a8a72; }
}
</style>
<rect class="bg" width="64" height="64" rx="14"/>
<g class="accent-stroke" stroke-width="2.4" stroke-linecap="round" fill="none">
<line x1="32" y1="32" x2="12" y2="14"/>
<line x1="32" y1="32" x2="52" y2="18"/>
<line x1="32" y1="32" x2="10" y2="40"/>
<line x1="32" y1="32" x2="54" y2="44"/>
<line x1="32" y1="32" x2="32" y2="56"/>
</g>
<g class="accent">
<circle cx="32" cy="32" r="6.5"/>
<circle cx="12" cy="14" r="3.5"/>
<circle cx="52" cy="18" r="3.5"/>
<circle cx="10" cy="40" r="3.5"/>
<circle cx="54" cy="44" r="3.5"/>
<circle cx="32" cy="56" r="3.5"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 957 B

View File

@ -1,17 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" role="img" aria-label="Molecule AI">
<g stroke="#7fe8d6" stroke-width="2.6" stroke-linecap="round" fill="none">
<line x1="32" y1="32" x2="12" y2="14"/>
<line x1="32" y1="32" x2="52" y2="18"/>
<line x1="32" y1="32" x2="10" y2="40"/>
<line x1="32" y1="32" x2="54" y2="44"/>
<line x1="32" y1="32" x2="32" y2="56"/>
</g>
<g fill="#7fe8d6">
<circle cx="32" cy="32" r="7"/>
<circle cx="12" cy="14" r="3.6"/>
<circle cx="52" cy="18" r="3.6"/>
<circle cx="10" cy="40" r="3.6"/>
<circle cx="54" cy="44" r="3.6"/>
<circle cx="32" cy="56" r="3.6"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 662 B

View File

@ -10,7 +10,7 @@ tags: [platform, fly.io, deployment, infrastructure]
Your infrastructure choice just got decoupled from your agent platform choice. Molecule AI now ships three production-ready workspace backends — `docker`, `flyio`, and `controlplane` — and switching between them takes a single environment variable. Your agent code, model choices, and workspace topology stay exactly the same.
This post covers what shipped in [PR #501](https://git.moleculesai.app/molecule-ai/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://git.moleculesai.app/molecule-ai/molecule-core/pull/503) (control plane provisioner), and which backend fits your situation.
This post covers what shipped in [PR #501](https://github.com/Molecule-AI/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://github.com/Molecule-AI/molecule-core/pull/503) (control plane provisioner), and which backend fits your situation.
## Before: One Deployment Model for Every Use Case
@ -107,4 +107,4 @@ No changes to agent code, tool definitions, or orchestration logic. Swap `CONTAI
---
*[PR #501](https://git.moleculesai.app/molecule-ai/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://git.moleculesai.app/molecule-ai/molecule-core/pull/503) (control plane provisioner) are both merged to `main`. Molecule AI is open source — contributions welcome.*
*[PR #501](https://github.com/Molecule-AI/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://github.com/Molecule-AI/molecule-core/pull/503) (control plane provisioner) are both merged to `main`. Molecule AI is open source — contributions welcome.*

View File

@ -299,8 +299,8 @@ Or use the Canvas UI: Workspace → Config → MCP Servers → Add browser MCP s
**Try it free** — Molecule AI is open source and self-hostable. Get a workspace running in under 5 minutes.
→ [Get started on GitHub →](https://git.moleculesai.app/molecule-ai/molecule-core)
→ [Get started on GitHub →](https://github.com/Molecule-AI/molecule-core)
---
*Have a browser automation use case you want to see covered? File an issue with the `enhancement` label on the [molecule-core issue tracker](https://git.moleculesai.app/molecule-ai/molecule-core/issues).*
*Have a browser automation use case you want to see covered? Open a discussion on [GitHub Discussions](https://github.com/Molecule-AI/molecule-core/discussions) — or file an issue with the `enhancement` label.*

View File

@ -148,7 +148,7 @@ Then follow the [quick-start guide](/docs/guides/remote-workspaces.md).
Or run the annotated example directly:
```bash
git clone https://git.moleculesai.app/molecule-ai/molecule-sdk-python
git clone https://github.com/Molecule-AI/molecule-sdk-python
cd molecule-sdk-python/examples/remote-agent
# Create workspace with runtime:external, grab the ID, then:
WORKSPACE_ID=<your-id> PLATFORM_URL=https://acme.moleculesai.app python3 run.py
@ -160,6 +160,6 @@ The agent appears on the canvas within seconds.
→ [Remote Workspaces Guide →](/docs/guides/remote-workspaces.md)
→ [External Agent Registration Reference →](/docs/guides/external-agent-registration.md)
→ [molecule-sdk-python →](https://git.moleculesai.app/molecule-ai/molecule-sdk-python)
→ [molecule-sdk-python →](https://github.com/Molecule-AI/molecule-sdk-python)
*Phase 30 shipped in PRs #1075#1083 and #1085#1100 on `molecule-core`.*

View File

@ -27,7 +27,7 @@ The biggest user-facing change: every Molecule AI org can now mint named, revoca
→ [User guide: Organization API Keys](/docs/guides/org-api-keys.md)
→ [Architecture: Org API Keys](/docs/architecture/org-api-keys.md)
→ PRs: [#1105](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1105), [#1107](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1107), [#1109](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1109), [#1110](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1110)
→ PRs: [#1105](https://github.com/Molecule-AI/molecule-core/pull/1105), [#1107](https://github.com/Molecule-AI/molecule-core/pull/1107), [#1109](https://github.com/Molecule-AI/molecule-core/pull/1109), [#1110](https://github.com/Molecule-AI/molecule-core/pull/1110)
---
@ -48,7 +48,7 @@ AdminAuth now accepts a session-verification tier that runs **before** the beare
**Self-hosted / local dev:** `CP_UPSTREAM_URL` is unset → this feature is disabled, behaviour is unchanged.
→ [Guide: Same-Origin Canvas Fetches & Session Auth](/docs/guides/same-origin-canvas-fetches.md)
→ PRs: [#1099](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1099), [#1100](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1100)
→ PRs: [#1099](https://github.com/Molecule-AI/molecule-core/pull/1099), [#1100](https://github.com/Molecule-AI/molecule-core/pull/1100)
---
@ -87,7 +87,7 @@ The proxy is **fail-closed**: only an explicit allowlist of paths (`/cp/auth/`,
This is also the structural fix for the lateral-movement risk that session auth introduced: without the allowlist, a tenant-authed browser user could have proxied `/cp/admin/*` requests upstream and exploited the fact that those endpoints accept WorkOS session cookies. The allowlist makes that impossible by construction.
→ [Guide: Same-Origin Canvas Fetches & Session Auth](/docs/guides/same-origin-canvas-fetches.md)
→ PR: [#1095](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1095)
→ PR: [#1095](https://github.com/Molecule-AI/molecule-core/pull/1095)
---
@ -99,7 +99,7 @@ The waitlist itself is a Canvas-administered list with email hashing in audit lo
This is the operational surface that makes the above security work matter: the beta is invitation-only, credentials are scoped, and every admin action is auditable.
→ Control plane PRs [#145](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/145), [#148](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/148), [#150](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/150)
→ Control plane PRs [#145](https://github.com/Molecule-AI/molecule-controlplane/pull/145), [#148](https://github.com/Molecule-AI/molecule-controlplane/pull/148), [#150](https://github.com/Molecule-AI/molecule-controlplane/pull/150)
---

View File

@ -12,7 +12,7 @@ Your team is in Discord. Your AI agents are in Molecule AI. Until today, those t
That's now one webhook URL.
Molecule AI workspaces can now connect to Discord. Here's what shipped in [PR #656](https://git.moleculesai.app/molecule-ai/molecule-core/pull/656).
Molecule AI workspaces can now connect to Discord. Here's what shipped in [PR #656](https://github.com/Molecule-AI/molecule-core/pull/656).
---
@ -70,7 +70,7 @@ For inbound slash commands, point your Discord app's **Interactions Endpoint URL
## Security: Webhook Tokens Don't Appear in Logs
Webhook URLs contain a token (`/webhooks/{id}/{token}`). If that token leaks into server logs, it's a rotation event. The Discord adapter is explicit about this: HTTP request errors are logged without the URL, and the adapter returns a generic error message. This was hardened in [PR #659](https://git.moleculesai.app/molecule-ai/molecule-core/pull/659).
Webhook URLs contain a token (`/webhooks/{id}/{token}`). If that token leaks into server logs, it's a rotation event. The Discord adapter is explicit about this: HTTP request errors are logged without the URL, and the adapter returns a generic error message. This was hardened in [PR #659](https://github.com/Molecule-AI/molecule-core/pull/659).
---
@ -97,4 +97,4 @@ Documentation: [Social Channels guide](/docs/agent-runtime/social-channels#disco
---
*Discord adapter shipped in [PR #656](https://git.moleculesai.app/molecule-ai/molecule-core/pull/656). Security hardening in [PR #659](https://git.moleculesai.app/molecule-ai/molecule-core/pull/659). Molecule AI is open source — contributions welcome.*
*Discord adapter shipped in [PR #656](https://github.com/Molecule-AI/molecule-core/pull/656). Security hardening in [PR #659](https://github.com/Molecule-AI/molecule-core/pull/659). Molecule AI is open source — contributions welcome.*

View File

@ -133,4 +133,4 @@ With protocol-native A2A, you get:
Molecule AI's external agent registration is production-ready. Documentation is live at [External Agent Registration Guide](https://docs.molecule.ai/docs/guides/external-agent-registration). The npm package for the MCP server is available at [`@molecule-ai/mcp-server`](https://www.npmjs.com/package/@molecule-ai/mcp-server).
Read the full [A2A v1.0 protocol spec](https://git.moleculesai.app/molecule-ai/molecule-core/src/branch/main/docs/api-protocol/a2a-protocol.md) on GitHub.
Read the full [A2A v1.0 protocol spec](https://github.com/Molecule-AI/molecule-core/blob/main/docs/api-protocol/a2a-protocol.md) on GitHub.

View File

@ -45,7 +45,7 @@ canonicalUrl: "https://docs.molecule.ai/blog/remote-workspaces"
" proficiencyLevel": "Expert",
"genre": ["technical documentation", "product announcement"],
"sameAs": [
"https://git.moleculesai.app/molecule-ai/molecule-core",
"https://github.com/Molecule-AI/molecule-core",
"https://molecule.ai"
]
}
@ -270,7 +270,7 @@ Configure it in your project's `.mcp.json` and any AI agent (Claude Code, Cursor
→ [External Agent Registration Guide](/docs/guides/external-agent-registration) — full step-by-step with Python and Node.js reference implementations
→ [GitHub: molecule-core](https://git.moleculesai.app/molecule-ai/molecule-core) — source and issues
→ [GitHub: molecule-core](https://github.com/Molecule-AI/molecule-core) — source and issues
→ [Phase 30 Launch Thread on X](https://x.com) — follow for updates

View File

@ -170,4 +170,4 @@ The `staging` branch is now on `a2a-sdk` 1.0.0. The `main` branch still carries
If you're running `a2a-sdk` 0.3.x and planning the 1.0.0 migration, this post is the reference. The four breaking changes are well-contained, the migration is a single PR, and the eight smoke scenarios above will tell you whether the upgrade is clean before you merge.
Questions? The [A2A protocol spec](https://github.com/google-a2a/a2a-specification) is the authoritative source. For Molecule AI's production A2A implementation, see [External Agent Registration](https://docs.molecule.ai/docs/guides/external-agent-registration) or open an issue in the [molecule-core](https://git.moleculesai.app/molecule-ai/molecule-core) repo.
Questions? The [A2A protocol spec](https://github.com/google-a2a/a2a-specification) is the authoritative source. For Molecule AI's production A2A implementation, see [External Agent Registration](https://docs.molecule.ai/docs/guides/external-agent-registration) or open an issue in the [molecule-core](https://github.com/Molecule-AI/molecule-core) repo.

Some files were not shown because too many files have changed in this diff Show More