Compare commits
No commits in common. "main" and "runtime-v0.0.34" have entirely different histories.
main
...
runtime-v0
@ -1,118 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# audit-force-merge — detect a §SOP-6 force-merge after PR close, emit
|
||||
# `incident.force_merge` to stdout as structured JSON.
|
||||
#
|
||||
# Vector's docker_logs source picks up runner stdout; the JSON gets
|
||||
# shipped to Loki on molecule-canonical-obs, indexable by event_type.
|
||||
# Query example:
|
||||
#
|
||||
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
|
||||
#
|
||||
# A force-merge is detected when a PR closed-with-merged=true had at
|
||||
# least one of the repo's required-status-check contexts in a state
|
||||
# other than "success" at the merge commit's SHA. That's exactly what
|
||||
# the Gitea force_merge:true API call lets through, so it's a faithful
|
||||
# detector of the override path.
|
||||
#
|
||||
# Triggers on `pull_request_target: closed` (loaded from base branch
|
||||
# per §SOP-6 security model). No-op when merged=false.
|
||||
#
|
||||
# Required env (set by the workflow):
|
||||
# GITEA_TOKEN, GITEA_HOST, REPO, PR_NUMBER, REQUIRED_CHECKS
|
||||
#
|
||||
# REQUIRED_CHECKS is a newline-separated list of status-check context
|
||||
# names that branch protection requires. Declared in the workflow YAML
|
||||
# rather than fetched from /branch_protections (which needs admin
|
||||
# scope — sop-tier-bot has read-only). Trade dynamism for simplicity:
|
||||
# when the required-check set changes, update both branch protection
|
||||
# AND this env. Keeping them in sync is less complexity than granting
|
||||
# the audit bot admin perms on every repo.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
: "${GITEA_TOKEN:?required}"
|
||||
: "${GITEA_HOST:?required}"
|
||||
: "${REPO:?required}"
|
||||
: "${PR_NUMBER:?required}"
|
||||
: "${REQUIRED_CHECKS:?required (newline-separated context names)}"
|
||||
|
||||
OWNER="${REPO%%/*}"
|
||||
NAME="${REPO##*/}"
|
||||
API="https://${GITEA_HOST}/api/v1"
|
||||
AUTH="Authorization: token ${GITEA_TOKEN}"
|
||||
|
||||
# 1. Fetch the PR. If not merged, no-op.
|
||||
PR=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}")
|
||||
MERGED=$(echo "$PR" | jq -r '.merged // false')
|
||||
if [ "$MERGED" != "true" ]; then
|
||||
echo "::notice::PR #${PR_NUMBER} closed without merge — no audit emission."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MERGE_SHA=$(echo "$PR" | jq -r '.merge_commit_sha // empty')
|
||||
MERGED_BY=$(echo "$PR" | jq -r '.merged_by.login // "unknown"')
|
||||
TITLE=$(echo "$PR" | jq -r '.title // ""')
|
||||
BASE_BRANCH=$(echo "$PR" | jq -r '.base.ref // "main"')
|
||||
HEAD_SHA=$(echo "$PR" | jq -r '.head.sha // empty')
|
||||
|
||||
if [ -z "$MERGE_SHA" ]; then
|
||||
echo "::warning::PR #${PR_NUMBER} merged=true but no merge_commit_sha — cannot evaluate force-merge."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 2. Required status checks declared in the workflow env.
|
||||
REQUIRED="$REQUIRED_CHECKS"
|
||||
if [ -z "${REQUIRED//[[:space:]]/}" ]; then
|
||||
echo "::notice::REQUIRED_CHECKS empty — force-merge not applicable."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 3. Status-check state at the PR HEAD (where checks ran). The merge
|
||||
# commit doesn't get its own checks; we evaluate the PR's last
|
||||
# commit, which is what branch protection compared against.
|
||||
STATUS=$(curl -sS -H "$AUTH" \
|
||||
"${API}/repos/${OWNER}/${NAME}/commits/${HEAD_SHA}/status")
|
||||
declare -A CHECK_STATE
|
||||
while IFS=$'\t' read -r ctx state; do
|
||||
[ -n "$ctx" ] && CHECK_STATE[$ctx]="$state"
|
||||
done < <(echo "$STATUS" | jq -r '.statuses // [] | .[] | "\(.context)\t\(.status)"')
|
||||
|
||||
# 4. For each required check, was it green at merge? YAML block scalars
|
||||
# (`|`) leave a trailing newline; skip blank/whitespace-only lines.
|
||||
FAILED_CHECKS=()
|
||||
while IFS= read -r req; do
|
||||
trimmed="${req#"${req%%[![:space:]]*}"}" # ltrim
|
||||
trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" # rtrim
|
||||
[ -z "$trimmed" ] && continue
|
||||
state="${CHECK_STATE[$trimmed]:-missing}"
|
||||
if [ "$state" != "success" ]; then
|
||||
FAILED_CHECKS+=("${trimmed}=${state}")
|
||||
fi
|
||||
done <<< "$REQUIRED"
|
||||
|
||||
if [ "${#FAILED_CHECKS[@]}" -eq 0 ]; then
|
||||
echo "::notice::PR #${PR_NUMBER} merged with all required checks green — not a force-merge."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 5. Emit structured audit event.
|
||||
NOW=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
FAILED_JSON=$(printf '%s\n' "${FAILED_CHECKS[@]}" | jq -R . | jq -s .)
|
||||
|
||||
# Print as a single-line JSON so Vector's parse_json transform can pick
|
||||
# it up cleanly from docker_logs.
|
||||
jq -nc \
|
||||
--arg event_type "incident.force_merge" \
|
||||
--arg ts "$NOW" \
|
||||
--arg repo "$REPO" \
|
||||
--argjson pr "$PR_NUMBER" \
|
||||
--arg title "$TITLE" \
|
||||
--arg base "$BASE_BRANCH" \
|
||||
--arg merged_by "$MERGED_BY" \
|
||||
--arg merge_sha "$MERGE_SHA" \
|
||||
--argjson failed_checks "$FAILED_JSON" \
|
||||
'{event_type: $event_type, ts: $ts, repo: $repo, pr: $pr, title: $title,
|
||||
base_branch: $base, merged_by: $merged_by, merge_sha: $merge_sha,
|
||||
failed_checks: $failed_checks}'
|
||||
|
||||
echo "::warning::FORCE-MERGE detected on PR #${PR_NUMBER} by ${MERGED_BY}: ${#FAILED_CHECKS[@]} required check(s) not green at merge time."
|
||||
@ -1,149 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# sop-tier-check — verify a Gitea PR satisfies the §SOP-6 approval gate.
|
||||
#
|
||||
# Reads the PR's tier label, walks approving reviewers, and checks each
|
||||
# approver's Gitea team membership against the tier's eligible-team set.
|
||||
# Marks pass only when at least one non-author approver is in an eligible
|
||||
# team.
|
||||
#
|
||||
# Invoked from `.gitea/workflows/sop-tier-check.yml`. The workflow sets
|
||||
# the env vars below; this script does no IO outside of stdout/stderr +
|
||||
# the Gitea API.
|
||||
#
|
||||
# Required env:
|
||||
# GITEA_TOKEN — bot PAT with read:organization,read:user,
|
||||
# read:issue,read:repository scopes
|
||||
# GITEA_HOST — e.g. git.moleculesai.app
|
||||
# REPO — owner/name (from github.repository)
|
||||
# PR_NUMBER — int (from github.event.pull_request.number)
|
||||
# PR_AUTHOR — login (from github.event.pull_request.user.login)
|
||||
#
|
||||
# Optional:
|
||||
# SOP_DEBUG=1 — print per-API-call diagnostic lines (HTTP codes,
|
||||
# raw response bodies). Default: off.
|
||||
#
|
||||
# Stale-status caveat: Gitea Actions does not always re-fire workflows
|
||||
# on `labeled` / `pull_request_review:submitted` events. If the
|
||||
# sop-tier-check status is stale (e.g. red after labels/approvals were
|
||||
# added), push an empty commit to the PR branch to force a synchronize
|
||||
# event, OR re-request reviews. Tracked: internal#46.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
debug() {
|
||||
if [ "${SOP_DEBUG:-}" = "1" ]; then
|
||||
echo " [debug] $*" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate env
|
||||
: "${GITEA_TOKEN:?GITEA_TOKEN required}"
|
||||
: "${GITEA_HOST:?GITEA_HOST required}"
|
||||
: "${REPO:?REPO required (owner/name)}"
|
||||
: "${PR_NUMBER:?PR_NUMBER required}"
|
||||
: "${PR_AUTHOR:?PR_AUTHOR required}"
|
||||
|
||||
OWNER="${REPO%%/*}"
|
||||
NAME="${REPO##*/}"
|
||||
API="https://${GITEA_HOST}/api/v1"
|
||||
AUTH="Authorization: token ${GITEA_TOKEN}"
|
||||
echo "::notice::tier-check start: repo=$OWNER/$NAME pr=$PR_NUMBER author=$PR_AUTHOR"
|
||||
|
||||
# Sanity: token resolves to a user
|
||||
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""')
|
||||
if [ -z "$WHOAMI" ]; then
|
||||
echo "::error::GITEA_TOKEN cannot resolve a user via /api/v1/user — check the token scope and that the secret is wired correctly."
|
||||
exit 1
|
||||
fi
|
||||
echo "::notice::token resolves to user: $WHOAMI"
|
||||
|
||||
# 1. Read tier label
|
||||
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name')
|
||||
TIER=""
|
||||
for L in $LABELS; do
|
||||
case "$L" in
|
||||
tier:low|tier:medium|tier:high)
|
||||
if [ -n "$TIER" ]; then
|
||||
echo "::error::Multiple tier labels: $TIER + $L. Apply exactly one."
|
||||
exit 1
|
||||
fi
|
||||
TIER="$L"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ -z "$TIER" ]; then
|
||||
echo "::error::PR has no tier:low|tier:medium|tier:high label. Apply one before merge."
|
||||
exit 1
|
||||
fi
|
||||
debug "tier=$TIER"
|
||||
|
||||
# 2. Tier → eligible teams
|
||||
case "$TIER" in
|
||||
tier:low) ELIGIBLE="engineers managers ceo" ;;
|
||||
tier:medium) ELIGIBLE="managers ceo" ;;
|
||||
tier:high) ELIGIBLE="ceo" ;;
|
||||
esac
|
||||
debug "eligible_teams=$ELIGIBLE"
|
||||
|
||||
# Resolve team-name → team-id once. /orgs/{org}/teams/{slug}/... endpoints
|
||||
# don't exist on Gitea 1.22; we have to use /teams/{id}.
|
||||
ORG_TEAMS_FILE=$(mktemp)
|
||||
trap 'rm -f "$ORG_TEAMS_FILE"' EXIT
|
||||
HTTP_CODE=$(curl -sS -o "$ORG_TEAMS_FILE" -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/orgs/${OWNER}/teams")
|
||||
debug "teams-list HTTP=$HTTP_CODE size=$(wc -c <"$ORG_TEAMS_FILE")"
|
||||
if [ "${SOP_DEBUG:-}" = "1" ]; then
|
||||
echo " [debug] teams-list body (first 300 chars):" >&2
|
||||
head -c 300 "$ORG_TEAMS_FILE" >&2; echo >&2
|
||||
fi
|
||||
if [ "$HTTP_CODE" != "200" ]; then
|
||||
echo "::error::GET /orgs/${OWNER}/teams returned HTTP $HTTP_CODE — token likely lacks read:org scope. Add a SOP_TIER_CHECK_TOKEN secret with read:organization scope at the org level."
|
||||
exit 1
|
||||
fi
|
||||
declare -A TEAM_ID
|
||||
for T in $ELIGIBLE; do
|
||||
ID=$(jq -r --arg t "$T" '.[] | select(.name==$t) | .id' <"$ORG_TEAMS_FILE" | head -1)
|
||||
if [ -z "$ID" ] || [ "$ID" = "null" ]; then
|
||||
VISIBLE=$(jq -r '.[]?.name? // empty' <"$ORG_TEAMS_FILE" 2>/dev/null | tr '\n' ' ')
|
||||
echo "::error::Team \"$T\" not found in org $OWNER. Teams visible: $VISIBLE"
|
||||
exit 1
|
||||
fi
|
||||
TEAM_ID[$T]="$ID"
|
||||
debug "team-id: $T → $ID"
|
||||
done
|
||||
|
||||
# 3. Read approving reviewers
|
||||
REVIEWS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}/reviews")
|
||||
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]')
|
||||
if [ -z "$APPROVERS" ]; then
|
||||
echo "::error::No approving reviews. Tier $TIER requires approval from {$ELIGIBLE} (non-author)."
|
||||
exit 1
|
||||
fi
|
||||
debug "approvers: $(echo "$APPROVERS" | tr '\n' ' ')"
|
||||
|
||||
# 4. For each approver: check non-author + team membership (by id)
|
||||
OK=""
|
||||
for U in $APPROVERS; do
|
||||
if [ "$U" = "$PR_AUTHOR" ]; then
|
||||
debug "skip self-review by $U"
|
||||
continue
|
||||
fi
|
||||
for T in $ELIGIBLE; do
|
||||
ID="${TEAM_ID[$T]}"
|
||||
CODE=$(curl -sS -o /dev/null -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/teams/${ID}/members/${U}")
|
||||
debug "probe: $U in team $T (id=$ID) → HTTP $CODE"
|
||||
if [ "$CODE" = "200" ] || [ "$CODE" = "204" ]; then
|
||||
echo "::notice::approver $U is in team $T (eligible for $TIER)"
|
||||
OK="yes"
|
||||
break
|
||||
fi
|
||||
done
|
||||
[ -n "$OK" ] && break
|
||||
done
|
||||
|
||||
if [ -z "$OK" ]; then
|
||||
echo "::error::Tier $TIER requires approval from a non-author member of {$ELIGIBLE}. Got approvers: $APPROVERS — none of them satisfied team membership. Set SOP_DEBUG=1 to see per-probe HTTP codes."
|
||||
exit 1
|
||||
fi
|
||||
echo "::notice::sop-tier-check passed: $TIER, approver in {$ELIGIBLE}"
|
||||
@ -1,58 +0,0 @@
|
||||
# audit-force-merge — emit `incident.force_merge` to runner stdout when
|
||||
# a PR is merged with required-status-checks not green. Vector picks
|
||||
# the JSON line off docker_logs and ships to Loki on
|
||||
# molecule-canonical-obs (per `reference_obs_stack_phase1`); query as:
|
||||
#
|
||||
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
|
||||
#
|
||||
# Closes the §SOP-6 audit gap (the doc says force-merges write to
|
||||
# `structure_events`, but that table lives in the platform DB, not
|
||||
# Gitea-side; Loki is the practical equivalent for Gitea Actions
|
||||
# events). When the credential / observability stack converges later,
|
||||
# this can sync into structure_events from Loki via a backfill job —
|
||||
# the structured JSON shape is forward-compatible.
|
||||
#
|
||||
# Logic in `.gitea/scripts/audit-force-merge.sh` per the same script-
|
||||
# extract pattern as sop-tier-check.
|
||||
|
||||
name: audit-force-merge
|
||||
|
||||
# pull_request_target loads from the base branch — same security model
|
||||
# as sop-tier-check. Without this, an attacker could rewrite the
|
||||
# workflow on a PR and skip the audit emission for their own
|
||||
# force-merge. See `.gitea/workflows/sop-tier-check.yml` for the full
|
||||
# rationale.
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
# Skip when PR is closed without merge — saves a runner.
|
||||
if: github.event.pull_request.merged == true
|
||||
steps:
|
||||
- name: Check out base branch (for the script)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Detect force-merge + emit audit event
|
||||
env:
|
||||
# Same org-level secret the sop-tier-check workflow uses.
|
||||
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITEA_HOST: git.moleculesai.app
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
# Required-status-check contexts to evaluate at merge time.
|
||||
# Newline-separated. Mirror this against branch protection
|
||||
# (settings → branches → protected branch → required checks).
|
||||
# Declared here rather than fetched from /branch_protections
|
||||
# because that endpoint requires admin write — sop-tier-bot is
|
||||
# read-only by design (least-privilege).
|
||||
REQUIRED_CHECKS: |
|
||||
sop-tier-check / tier-check (pull_request)
|
||||
Secret scan / Scan diff for credential-shaped strings (pull_request)
|
||||
run: bash .gitea/scripts/audit-force-merge.sh
|
||||
@ -1,191 +0,0 @@
|
||||
name: Secret scan
|
||||
|
||||
# Hard CI gate. Refuses any PR / push whose diff additions contain a
|
||||
# recognisable credential. Defense-in-depth for the #2090-class incident
|
||||
# (2026-04-24): GitHub's hosted Copilot Coding Agent leaked a ghs_*
|
||||
# installation token into tenant-proxy/package.json via `npm init`
|
||||
# slurping the URL from a token-embedded origin remote. We can't fix
|
||||
# upstream's clone hygiene, so we gate here.
|
||||
#
|
||||
# Same regex set as the runtime's bundled pre-commit hook
|
||||
# (molecule-ai-workspace-runtime: molecule_runtime/scripts/pre-commit-checks.sh).
|
||||
# Keep the two sides aligned when adding patterns.
|
||||
#
|
||||
# Ported from .github/workflows/secret-scan.yml so the gate actually
|
||||
# fires on Gitea Actions. Differences from the GitHub version:
|
||||
# - drops `merge_group` event (Gitea has no merge queue)
|
||||
# - drops `workflow_call` (no cross-repo reusable invocation on Gitea)
|
||||
# - SELF path updated to .gitea/workflows/secret-scan.yml
|
||||
# The job name + step name are identical to the GitHub workflow so the
|
||||
# status-check context (`Secret scan / Scan diff for credential-shaped
|
||||
# strings (pull_request)`) matches branch protection on molecule-core/main.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches: [main, staging]
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
name: Scan diff for credential-shaped strings
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 2 # need previous commit to diff against on push events
|
||||
|
||||
# For pull_request events the diff base may be many commits behind
|
||||
# HEAD and absent from the shallow clone. Fetch it explicitly.
|
||||
- name: Fetch PR base SHA (pull_request events only)
|
||||
if: github.event_name == 'pull_request'
|
||||
run: git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }}
|
||||
|
||||
- name: Refuse if credential-shaped strings appear in diff additions
|
||||
env:
|
||||
# Plumb event-specific SHAs through env so the script doesn't
|
||||
# need conditional `${{ ... }}` interpolation per event type.
|
||||
# github.event.before/after only exist on push events;
|
||||
# pull_request has pull_request.base.sha / pull_request.head.sha.
|
||||
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
||||
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
PUSH_BEFORE: ${{ github.event.before }}
|
||||
PUSH_AFTER: ${{ github.event.after }}
|
||||
run: |
|
||||
# Pattern set covers GitHub family (the actual #2090 vector),
|
||||
# Anthropic / OpenAI / Slack / AWS. Anchored on prefixes with low
|
||||
# false-positive rates against agent-generated content. Mirror of
|
||||
# molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh
|
||||
# — keep aligned.
|
||||
SECRET_PATTERNS=(
|
||||
'ghp_[A-Za-z0-9]{36,}' # GitHub PAT (classic)
|
||||
'ghs_[A-Za-z0-9]{36,}' # GitHub App installation token
|
||||
'gho_[A-Za-z0-9]{36,}' # GitHub OAuth user-to-server
|
||||
'ghu_[A-Za-z0-9]{36,}' # GitHub OAuth user
|
||||
'ghr_[A-Za-z0-9]{36,}' # GitHub OAuth refresh
|
||||
'github_pat_[A-Za-z0-9_]{82,}' # GitHub fine-grained PAT
|
||||
'sk-ant-[A-Za-z0-9_-]{40,}' # Anthropic API key
|
||||
'sk-proj-[A-Za-z0-9_-]{40,}' # OpenAI project key
|
||||
'sk-svcacct-[A-Za-z0-9_-]{40,}' # OpenAI service-account key
|
||||
'sk-cp-[A-Za-z0-9_-]{60,}' # MiniMax API key (F1088 vector — caught only after the fact)
|
||||
'xox[baprs]-[A-Za-z0-9-]{20,}' # Slack tokens
|
||||
'AKIA[0-9A-Z]{16}' # AWS access key ID
|
||||
'ASIA[0-9A-Z]{16}' # AWS STS temp access key ID
|
||||
)
|
||||
|
||||
# Determine the diff base. Each event type stores its SHAs in
|
||||
# a different place — see the env block above.
|
||||
case "${{ github.event_name }}" in
|
||||
pull_request)
|
||||
BASE="$PR_BASE_SHA"
|
||||
HEAD="$PR_HEAD_SHA"
|
||||
;;
|
||||
*)
|
||||
BASE="$PUSH_BEFORE"
|
||||
HEAD="$PUSH_AFTER"
|
||||
;;
|
||||
esac
|
||||
|
||||
# On push events with shallow clones, BASE may be present in
|
||||
# the event payload but absent from the local object DB
|
||||
# (fetch-depth=2 doesn't always reach the previous commit
|
||||
# across true merges). Try fetching it on demand. If the
|
||||
# fetch fails — e.g. the SHA was force-overwritten — we fall
|
||||
# through to the empty-BASE branch below, which scans the
|
||||
# entire tree as if every file were new. Correct, just slow.
|
||||
if [ -n "$BASE" ] && ! echo "$BASE" | grep -qE '^0+$'; then
|
||||
if ! git cat-file -e "$BASE" 2>/dev/null; then
|
||||
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Files added or modified in this change.
|
||||
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$' || ! git cat-file -e "$BASE" 2>/dev/null; then
|
||||
# New branch / no previous SHA / BASE unreachable — check the
|
||||
# entire tree as added content. Slower, but correct on first
|
||||
# push.
|
||||
CHANGED=$(git ls-tree -r --name-only HEAD)
|
||||
DIFF_RANGE=""
|
||||
else
|
||||
CHANGED=$(git diff --name-only --diff-filter=AM "$BASE" "$HEAD")
|
||||
DIFF_RANGE="$BASE $HEAD"
|
||||
fi
|
||||
|
||||
if [ -z "$CHANGED" ]; then
|
||||
echo "No changed files to inspect."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Self-exclude: this workflow file legitimately contains the
|
||||
# pattern strings as regex literals. Without an exclude it would
|
||||
# block its own merge. Both the .github/ original and this
|
||||
# .gitea/ port are excluded so a sync between them stays clean.
|
||||
SELF_GITHUB=".github/workflows/secret-scan.yml"
|
||||
SELF_GITEA=".gitea/workflows/secret-scan.yml"
|
||||
|
||||
OFFENDING=""
|
||||
# `while IFS= read -r` (not `for f in $CHANGED`) so filenames
|
||||
# containing whitespace don't word-split silently — a path
|
||||
# with a space would otherwise produce two iterations on
|
||||
# tokens that aren't real filenames, breaking the
|
||||
# self-exclude + diff lookup.
|
||||
while IFS= read -r f; do
|
||||
[ -z "$f" ] && continue
|
||||
[ "$f" = "$SELF_GITHUB" ] && continue
|
||||
[ "$f" = "$SELF_GITEA" ] && continue
|
||||
if [ -n "$DIFF_RANGE" ]; then
|
||||
ADDED=$(git diff --no-color --unified=0 "$BASE" "$HEAD" -- "$f" 2>/dev/null | grep -E '^\+[^+]' || true)
|
||||
else
|
||||
# No diff range (new branch first push) — scan the full file
|
||||
# contents as if every line were new.
|
||||
ADDED=$(cat "$f" 2>/dev/null || true)
|
||||
fi
|
||||
[ -z "$ADDED" ] && continue
|
||||
for pattern in "${SECRET_PATTERNS[@]}"; do
|
||||
if echo "$ADDED" | grep -qE "$pattern"; then
|
||||
OFFENDING="${OFFENDING}${f} (matched: ${pattern})\n"
|
||||
break
|
||||
fi
|
||||
done
|
||||
done <<< "$CHANGED"
|
||||
|
||||
if [ -n "$OFFENDING" ]; then
|
||||
echo "::error::Credential-shaped strings detected in diff additions:"
|
||||
# `printf '%b' "$OFFENDING"` interprets backslash escapes
|
||||
# (the literal `\n` we appended above becomes a newline)
|
||||
# WITHOUT treating OFFENDING as a format string. Plain
|
||||
# `printf "$OFFENDING"` is a format-string sink: a filename
|
||||
# containing `%` would be interpreted as a conversion
|
||||
# specifier, corrupting the error message (or printing
|
||||
# `%(missing)` artifacts).
|
||||
printf '%b' "$OFFENDING"
|
||||
echo ""
|
||||
echo "The actual matched values are NOT echoed here, deliberately —"
|
||||
echo "round-tripping a leaked credential into CI logs widens the blast"
|
||||
echo "radius (logs are searchable + retained)."
|
||||
echo ""
|
||||
echo "Recovery:"
|
||||
echo " 1. Remove the secret from the file. Replace with an env var"
|
||||
echo " reference (e.g. \${{ secrets.GITHUB_TOKEN }} in workflows,"
|
||||
echo " process.env.X in code)."
|
||||
echo " 2. If the credential was already pushed (this PR's commit"
|
||||
echo " history reaches a public ref), treat it as compromised —"
|
||||
echo " ROTATE it immediately, do not just remove it. The token"
|
||||
echo " remains valid in git history forever and may be in any"
|
||||
echo " log/cache that consumed this branch."
|
||||
echo " 3. Force-push the cleaned commit (or stack a revert) and"
|
||||
echo " re-run CI."
|
||||
echo ""
|
||||
echo "If the match is a false positive (test fixture, docs example,"
|
||||
echo "or this workflow's own regex literals): use a clearly-fake"
|
||||
echo "placeholder like ghs_EXAMPLE_DO_NOT_USE that doesn't satisfy"
|
||||
echo "the length suffix, OR add the file path to the SELF exclude"
|
||||
echo "list in this workflow with a short reason."
|
||||
echo ""
|
||||
echo "Mirror of the regex set lives in the runtime's bundled"
|
||||
echo "pre-commit hook (molecule-ai-workspace-runtime:"
|
||||
echo "molecule_runtime/scripts/pre-commit-checks.sh) — keep aligned."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ No credential-shaped strings in this change."
|
||||
@ -1,81 +0,0 @@
|
||||
# sop-tier-check — canonical Gitea Actions workflow for §SOP-6 enforcement.
|
||||
#
|
||||
# Logic lives in `.gitea/scripts/sop-tier-check.sh` (extracted 2026-05-09
|
||||
# from the previous inline-bash version). The script is the single source
|
||||
# of truth; this workflow file just sets env + invokes it.
|
||||
#
|
||||
# Copy BOTH files (`.gitea/workflows/sop-tier-check.yml` +
|
||||
# `.gitea/scripts/sop-tier-check.sh`) into any repo that wants the
|
||||
# §SOP-6 PR gate enforced. Pair with branch protection on the protected
|
||||
# branch:
|
||||
# required_status_checks: ["sop-tier-check / tier-check (pull_request)"]
|
||||
# required_approving_reviews: 1
|
||||
# approving_review_teams: ["ceo", "managers", "engineers"]
|
||||
#
|
||||
# Tier → eligible-team mapping (mirror of dev-sop §SOP-6):
|
||||
# tier:low → engineers, managers, ceo
|
||||
# tier:medium → managers, ceo
|
||||
# tier:high → ceo
|
||||
#
|
||||
# Force-merge: Owners-team override remains available out-of-band via
|
||||
# the Gitea merge API; force-merge writes `incident.force_merge` to
|
||||
# `structure_events` per §Persistent structured logging gate (Phase 3).
|
||||
#
|
||||
# Set `SOP_DEBUG: '1'` in the env block to enable per-API-call diagnostic
|
||||
# lines — useful when diagnosing token-scope or team-id-resolution
|
||||
# issues. Default off.
|
||||
|
||||
name: sop-tier-check
|
||||
|
||||
# SECURITY: triggers MUST use `pull_request_target`, not `pull_request`.
|
||||
# `pull_request_target` loads the workflow definition from the BASE
|
||||
# branch (i.e. `main`), not the PR's HEAD. With `pull_request`, anyone
|
||||
# with write access to a feature branch could rewrite this file in
|
||||
# their PR to dump SOP_TIER_CHECK_TOKEN (org-read scope) to logs and
|
||||
# exfiltrate it. Verified 2026-05-09 against Gitea 1.22.6 —
|
||||
# `pull_request_target` (added in Gitea 1.21 via go-gitea/gitea#25229)
|
||||
# is the documented mitigation.
|
||||
#
|
||||
# This workflow does NOT call `actions/checkout` of PR HEAD code, so no
|
||||
# untrusted code is ever executed in the runner — we only HTTP-call the
|
||||
# Gitea API. If a future change adds a checkout step, it MUST pin to
|
||||
# `${{ github.event.pull_request.base.sha }}` (NOT `head.sha`) to keep
|
||||
# the trust boundary.
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened, labeled, unlabeled]
|
||||
pull_request_review:
|
||||
types: [submitted, dismissed, edited]
|
||||
|
||||
jobs:
|
||||
tier-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
steps:
|
||||
- name: Check out base branch (for the script)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
# Pin to base.sha — pull_request_target's protection only
|
||||
# works if we never check out PR HEAD. Same SHA the workflow
|
||||
# itself was loaded from.
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Verify tier label + reviewer team membership
|
||||
env:
|
||||
# SOP_TIER_CHECK_TOKEN is the org-level secret for the
|
||||
# sop-tier-bot PAT (read:organization,read:user,read:issue,
|
||||
# read:repository). Stored at the org level
|
||||
# (/api/v1/orgs/molecule-ai/actions/secrets) so per-repo
|
||||
# configuration is unnecessary — every repo in the org
|
||||
# picks it up automatically.
|
||||
# Falls back to GITHUB_TOKEN with a clear error if missing.
|
||||
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITEA_HOST: git.moleculesai.app
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
# Set to '1' for diagnostic per-API-call output. Off by default
|
||||
# so production logs aren't noisy.
|
||||
SOP_DEBUG: '0'
|
||||
run: bash .gitea/scripts/sop-tier-check.sh
|
||||
2
.github/scripts/lint_secret_pattern_drift.py
vendored
2
.github/scripts/lint_secret_pattern_drift.py
vendored
@ -37,7 +37,7 @@ CANONICAL_FILE = Path(".github/workflows/secret-scan.yml")
|
||||
CONSUMERS: list[tuple[str, str]] = [
|
||||
(
|
||||
"molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh",
|
||||
"https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-runtime/raw/branch/main/molecule_runtime/scripts/pre-commit-checks.sh",
|
||||
"https://raw.githubusercontent.com/Molecule-AI/molecule-ai-workspace-runtime/main/molecule_runtime/scripts/pre-commit-checks.sh",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
429
.github/workflows/auto-promote-on-e2e.yml
vendored
Normal file
429
.github/workflows/auto-promote-on-e2e.yml
vendored
Normal file
@ -0,0 +1,429 @@
|
||||
name: Auto-promote :latest after main image build
|
||||
|
||||
# Retags `ghcr.io/molecule-ai/{platform,platform-tenant}:staging-<sha>`
|
||||
# → `:latest` after either the image build or E2E completes on a `main`
|
||||
# push, gated on E2E Staging SaaS not being red for that SHA.
|
||||
#
|
||||
# Why two triggers:
|
||||
#
|
||||
# `publish-workspace-server-image` and `e2e-staging-saas` are both
|
||||
# paths-filtered, but with DIFFERENT path sets:
|
||||
#
|
||||
# publish-workspace-server-image:
|
||||
# workspace-server/**, canvas/**, manifest.json
|
||||
#
|
||||
# e2e-staging-saas (full lifecycle):
|
||||
# workspace-server/internal/handlers/{registry,workspace_provision,
|
||||
# a2a_proxy}.go, workspace-server/internal/middleware/**,
|
||||
# workspace-server/internal/provisioner/**, tests/e2e/test_staging_full_saas.sh
|
||||
#
|
||||
# The E2E set is a strict SUBSET of the publish set. So:
|
||||
# - canvas/** changes → publish fires, E2E does not
|
||||
# - workspace-server/cmd/** changes → publish fires, E2E does not
|
||||
# - workspace-server/internal/sweep/** → publish fires, E2E does not
|
||||
#
|
||||
# The previous version triggered ONLY on E2E completion, which meant
|
||||
# non-E2E-path changes (canvas, cmd, sweep, etc.) rebuilt the image
|
||||
# but never advanced `:latest`. Result: as of 2026-04-28 this workflow
|
||||
# had run zero times since merge despite eight main pushes — `:latest`
|
||||
# was ~7 hours / 9 PRs behind main with no human realising. See
|
||||
# `molecule-core` Slack discussion 2026-04-28.
|
||||
#
|
||||
# Adding `publish-workspace-server-image` as a second trigger closes
|
||||
# the gap: any image rebuild on main eligibly advances `:latest`.
|
||||
#
|
||||
# Why E2E remains a kill-switch (not the trigger):
|
||||
#
|
||||
# When E2E DID run for this SHA and ended red, we abort — `:latest`
|
||||
# stays on the prior known-good digest. When E2E didn't run (paths
|
||||
# filtered out), we proceed: pre-merge gates already validated this
|
||||
# SHA on staging via auto-promote-staging requiring CI + E2E Canvas +
|
||||
# E2E API + CodeQL all green. Image content for non-E2E-paths
|
||||
# (canvas, cmd, sweep) is exercised by those staging gates.
|
||||
#
|
||||
# Why `main` only:
|
||||
#
|
||||
# `:latest` is what prod tenants pull. We only want SHAs that have
|
||||
# reached main (via auto-promote-staging) to advance `:latest`.
|
||||
# Triggering on staging would let a staging-only revert advance
|
||||
# `:latest` to a SHA that never reaches main, breaking the "production
|
||||
# runs what's on main" invariant.
|
||||
#
|
||||
# Idempotency:
|
||||
#
|
||||
# When a SHA touches paths that match BOTH publish and E2E, both
|
||||
# workflows fire and complete. Both trigger this workflow on
|
||||
# completion → two runs race. Both retag `:staging-<sha>` →
|
||||
# `:latest`. crane tag is idempotent (re-tagging the same digest is a
|
||||
# no-op), so the second run is harmless. concurrency group serializes
|
||||
# them anyway.
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- 'E2E Staging SaaS (full lifecycle)'
|
||||
- 'publish-workspace-server-image'
|
||||
types: [completed]
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: 'Short sha to promote (override; defaults to upstream workflow_run head_sha)'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
# Serialize promotes per-SHA so the publish+E2E both-fired race lands
|
||||
# cleanly. Different SHAs can promote in parallel.
|
||||
group: auto-promote-latest-${{ github.event.workflow_run.head_sha || github.event.inputs.sha || github.sha }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
IMAGE_NAME: ghcr.io/molecule-ai/platform
|
||||
TENANT_IMAGE_NAME: ghcr.io/molecule-ai/platform-tenant
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
# Proceed if upstream succeeded OR manual dispatch. Upstream-failure
|
||||
# paths are filtered here; the E2E-was-red kill-switch lives in the
|
||||
# gate-check step below (covers the case where upstream is publish
|
||||
# success but E2E for the same SHA failed).
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Compute short sha
|
||||
id: sha
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -n "${{ github.event.inputs.sha }}" ]; then
|
||||
FULL="${{ github.event.inputs.sha }}"
|
||||
else
|
||||
FULL="${{ github.event.workflow_run.head_sha }}"
|
||||
fi
|
||||
echo "short=${FULL:0:7}" >> "$GITHUB_OUTPUT"
|
||||
echo "full=${FULL}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Gate — E2E Staging SaaS state for this SHA
|
||||
# When upstream IS E2E success, we know it's green (filtered by
|
||||
# the job-level `if` already). When upstream is publish, look up
|
||||
# E2E state for the same SHA. Four buckets:
|
||||
#
|
||||
# - completed/success: E2E confirmed safe → proceed
|
||||
# - completed/failure|cancelled|timed_out: E2E found a
|
||||
# regression → ABORT (exit 1), `:latest` stays put
|
||||
# - in_progress|queued|requested: E2E is RACING with publish
|
||||
# for a runtime-touching SHA. publish typically completes
|
||||
# ~5-10min before E2E (~10-15min). If we promote on the
|
||||
# publish signal here, a later E2E failure can't roll back
|
||||
# `:latest` — it'd already be wrongly advanced. So we DEFER:
|
||||
# skip subsequent steps (proceed=false) and let E2E's own
|
||||
# completion event re-fire this workflow, which then takes
|
||||
# the upstream-is-E2E path. exit 0 so the run shows as
|
||||
# success rather than a noisy fake-failure.
|
||||
# - none/none: E2E was paths-filtered out for this SHA (the
|
||||
# change touched canvas/cmd/sweep/etc. — paths covered by
|
||||
# publish but not by E2E). pre-merge gates on staging
|
||||
# already validated this SHA → proceed.
|
||||
#
|
||||
# Manual dispatch skips this check — operator override.
|
||||
id: gate
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
SHA: ${{ steps.sha.outputs.full }}
|
||||
UPSTREAM_NAME: ${{ github.event.workflow_run.name }}
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Manual dispatch — skipping E2E gate (operator override)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$UPSTREAM_NAME" = "E2E Staging SaaS (full lifecycle)" ]; then
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Upstream is E2E itself (success per job-level if) — gate trivially satisfied"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Upstream is publish-workspace-server-image. Check E2E state.
|
||||
# The jq filter must defend against TWO empty cases that gh
|
||||
# CLI emits indistinguishably:
|
||||
# 1. gh exits non-zero (network blip, auth issue) → handled
|
||||
# by the `|| echo "none/none"` fallback below.
|
||||
# 2. gh exits zero but returns `[]` (no E2E run on this
|
||||
# main SHA — the common case for canvas-only / cmd-only
|
||||
# / sweep-only changes whose paths don't trigger E2E).
|
||||
# Without `(.[0] // {})`, jq sees `null` and emits
|
||||
# "null/none" — which the case statement below has no
|
||||
# branch for, so it falls into *) → exit 1.
|
||||
# Surfaced 2026-04-30 the first time the App-token chain
|
||||
# (#2389) actually fired auto-promote-on-e2e from a publish
|
||||
# upstream — every prior run was E2E-upstream which
|
||||
# short-circuits before this gate.
|
||||
RESULT=$(gh run list \
|
||||
--repo "$REPO" \
|
||||
--workflow e2e-staging-saas.yml \
|
||||
--branch main \
|
||||
--commit "$SHA" \
|
||||
--limit 1 \
|
||||
--json status,conclusion \
|
||||
--jq '(.[0] // {}) | "\(.status // "none")/\(.conclusion // "none")"' \
|
||||
2>/dev/null || echo "none/none")
|
||||
|
||||
echo "E2E Staging SaaS for ${SHA:0:7}: $RESULT"
|
||||
|
||||
case "$RESULT" in
|
||||
completed/success)
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::E2E green for this SHA — proceeding with promote"
|
||||
;;
|
||||
completed/failure|completed/timed_out)
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❌ Auto-promote aborted — E2E Staging SaaS failed"
|
||||
echo
|
||||
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\`"
|
||||
echo "\`:latest\` stays on the prior known-good digest."
|
||||
echo
|
||||
echo "If the failure was a flake, manually dispatch this workflow with the same sha to override."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
completed/cancelled)
|
||||
# cancelled ≠ failure. Per-SHA concurrency cancels older E2E
|
||||
# runs when a newer push lands (memory:
|
||||
# feedback_concurrency_group_per_sha) — the newer SHA will
|
||||
# have its own E2E + promote chain. Treat the same as
|
||||
# in_progress: defer without aborting, let the next E2E run
|
||||
# promote when it lands.
|
||||
#
|
||||
# Caught 2026-05-05 02:03 on sha 31f9a5e — auto-promote
|
||||
# blocked the whole chain because this case fell through to
|
||||
# exit 1 instead of clean defer.
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ⏭ Auto-promote deferred — E2E Staging SaaS was cancelled"
|
||||
echo
|
||||
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\`"
|
||||
echo "Likely per-SHA concurrency (newer push superseded this E2E run)."
|
||||
echo "The newer SHA's E2E will fire its own promote when it lands."
|
||||
echo "If you need this specific SHA promoted, manually dispatch."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
;;
|
||||
in_progress/*|queued/*|requested/*|waiting/*|pending/*)
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ⏳ Auto-promote deferred — E2E Staging SaaS still running"
|
||||
echo
|
||||
echo "Publish completed before E2E for \`${SHA:0:7}\` (state: \`$RESULT\`)."
|
||||
echo "Skipping retag here — E2E's own completion event will re-fire this workflow."
|
||||
echo "If E2E ends green, that run promotes \`:latest\`. If red, it aborts."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
;;
|
||||
none/none)
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::E2E paths-filtered out for this SHA — pre-merge staging gates carry"
|
||||
;;
|
||||
*)
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❓ Auto-promote aborted — unexpected E2E state"
|
||||
echo
|
||||
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\` (unhandled)"
|
||||
echo "Manual investigation needed; re-dispatch with the same sha once resolved."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- if: steps.gate.outputs.proceed == 'true'
|
||||
uses: imjasonh/setup-crane@6da1ae018866400525525ce74ff892880c099987 # v0.5
|
||||
|
||||
- name: GHCR login
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
||||
crane auth login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: Verify :staging-<sha> exists for both images
|
||||
# Better to fail fast with a clear message than to half-tag
|
||||
# (platform retagged but platform-tenant missing → tenants pull
|
||||
# a stale image).
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for img in "${IMAGE_NAME}" "${TENANT_IMAGE_NAME}"; do
|
||||
tag="${img}:staging-${{ steps.sha.outputs.short }}"
|
||||
if ! crane manifest "$tag" >/dev/null 2>&1; then
|
||||
echo "::error::Missing tag: $tag"
|
||||
echo "::error::publish-workspace-server-image must complete on this SHA before auto-promote can retag :latest."
|
||||
exit 1
|
||||
fi
|
||||
echo " ok: $tag exists"
|
||||
done
|
||||
|
||||
- name: Ancestry check — refuse to promote :latest backwards
|
||||
# #2244: workflow_run completions arrive in arbitrary order. If
|
||||
# SHA-A and SHA-B both reach main within ~10 min and SHA-B's E2E
|
||||
# completes before SHA-A's, this workflow can fire for SHA-A
|
||||
# AFTER it already promoted SHA-B → :latest goes backwards. The
|
||||
# orphan-reconciler "next run corrects it" doesn't apply: there's
|
||||
# no auto-corrective re-promote, :latest stays wrong until the
|
||||
# next main push lands.
|
||||
#
|
||||
# Detection: read current :latest's `org.opencontainers.image.revision`
|
||||
# label (set by publish-workspace-server-image.yml at build time)
|
||||
# and ask the GitHub compare API whether the candidate SHA is
|
||||
# ahead-of / identical-to / behind / diverged-from current.
|
||||
# Hard-fail on `behind` and `diverged` per the approved design —
|
||||
# silent-bypass is the class we're moving away from. Workflow
|
||||
# goes red, oncall sees it, operator decides how to recover
|
||||
# (manual dispatch with the right SHA, force-promote, etc.).
|
||||
#
|
||||
# Manual dispatch skips this check — operator override semantics
|
||||
# match the gate-check step above.
|
||||
#
|
||||
# Backward-compat: when current :latest carries no revision
|
||||
# label (legacy image pre-publish-with-label), skip-with-warning.
|
||||
# All :latest images on main are post-label as of 2026-04-29, so
|
||||
# this branch will be dead within 90 days; remove then.
|
||||
if: steps.gate.outputs.proceed == 'true' && github.event_name != 'workflow_dispatch'
|
||||
id: ancestry
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
TARGET_SHA: ${{ steps.sha.outputs.full }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Read the current :latest config and pull the revision label.
|
||||
# `crane config` returns the OCI image config blob (not the manifest);
|
||||
# labels live under `.config.Labels`. `// empty` makes jq return ""
|
||||
# rather than the literal "null" so the test below works.
|
||||
CURRENT_REVISION=$(crane config "${IMAGE_NAME}:latest" 2>/dev/null \
|
||||
| jq -r '.config.Labels["org.opencontainers.image.revision"] // empty' \
|
||||
|| true)
|
||||
|
||||
if [ -z "$CURRENT_REVISION" ]; then
|
||||
echo "decision=skip-no-label" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ⚠ Ancestry check skipped — current :latest has no revision label"
|
||||
echo
|
||||
echo "Likely a legacy image built before \`org.opencontainers.image.revision\` was set."
|
||||
echo "Falling through to retag. After all \`:latest\` images are post-label (TODO 90 days), this branch is dead and should be removed."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::warning::Current :latest carries no revision label — skipping ancestry check (legacy image)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$CURRENT_REVISION" = "$TARGET_SHA" ]; then
|
||||
echo "decision=identical" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice:::latest already at ${TARGET_SHA:0:7} — retag will be a no-op"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Ask GitHub which side of the merge graph TARGET_SHA sits on
|
||||
# relative to CURRENT_REVISION. Returns one of: ahead | identical
|
||||
# | behind | diverged. Network or auth errors collapse to "error"
|
||||
# via the explicit fallback so the case below always matches.
|
||||
STATUS=$(gh api \
|
||||
"repos/${REPO}/compare/${CURRENT_REVISION}...${TARGET_SHA}" \
|
||||
--jq '.status' 2>/dev/null || echo "error")
|
||||
|
||||
echo "ancestry compare ${CURRENT_REVISION:0:7} → ${TARGET_SHA:0:7}: $STATUS"
|
||||
|
||||
case "$STATUS" in
|
||||
ahead)
|
||||
echo "decision=ahead" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Target ${TARGET_SHA:0:7} is ahead of current :latest (${CURRENT_REVISION:0:7}) — proceeding with retag"
|
||||
;;
|
||||
identical)
|
||||
echo "decision=identical" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Target identical to :latest — retag will be a no-op"
|
||||
;;
|
||||
behind)
|
||||
echo "decision=behind" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❌ Auto-promote refused — target is BEHIND current :latest"
|
||||
echo
|
||||
echo "| Field | Value |"
|
||||
echo "|---|---|"
|
||||
echo "| Target SHA | \`$TARGET_SHA\` |"
|
||||
echo "| Current :latest revision | \`$CURRENT_REVISION\` |"
|
||||
echo "| GitHub compare status | \`behind\` |"
|
||||
echo
|
||||
echo "This guard catches the workflow_run-completion-order race (#2244):"
|
||||
echo "two rapid main pushes whose E2Es complete out-of-order can otherwise"
|
||||
echo "promote \`:latest\` backwards. \`:latest\` stays on \`${CURRENT_REVISION:0:7}\`."
|
||||
echo
|
||||
echo "**Recovery:** if this is a legitimate revert that should land on \`:latest\`,"
|
||||
echo "manually dispatch this workflow with the target sha as input — the manual-dispatch"
|
||||
echo "path skips the ancestry check (operator override)."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
diverged)
|
||||
echo "decision=diverged" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❓ Auto-promote refused — history diverged"
|
||||
echo
|
||||
echo "| Field | Value |"
|
||||
echo "|---|---|"
|
||||
echo "| Target SHA | \`$TARGET_SHA\` |"
|
||||
echo "| Current :latest revision | \`$CURRENT_REVISION\` |"
|
||||
echo "| GitHub compare status | \`diverged\` |"
|
||||
echo
|
||||
echo "Likely cause: force-push rewrote main's history, leaving the previous"
|
||||
echo "\`:latest\` revision orphaned. Needs human review before \`:latest\` advances."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
error|*)
|
||||
echo "decision=error" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❌ Auto-promote aborted — ancestry-check API error"
|
||||
echo
|
||||
echo "\`gh api repos/${REPO}/compare/${CURRENT_REVISION}...${TARGET_SHA}\` returned unexpected status: \`$STATUS\`"
|
||||
echo
|
||||
echo "Manual dispatch with the target sha bypasses this check."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Retag platform :staging-<sha> → :latest
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
crane tag "${IMAGE_NAME}:staging-${{ steps.sha.outputs.short }}" latest
|
||||
|
||||
- name: Retag tenant :staging-<sha> → :latest
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
crane tag "${TENANT_IMAGE_NAME}:staging-${{ steps.sha.outputs.short }}" latest
|
||||
|
||||
- name: Summary
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
{
|
||||
echo "## :latest promoted to ${{ steps.sha.outputs.short }}"
|
||||
echo
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "- Trigger: manual dispatch"
|
||||
else
|
||||
echo "- Upstream: \`${{ github.event.workflow_run.name }}\` ([run](${{ github.event.workflow_run.html_url }}))"
|
||||
fi
|
||||
echo "- platform:staging-${{ steps.sha.outputs.short }} → :latest"
|
||||
echo "- platform-tenant:staging-${{ steps.sha.outputs.short }} → :latest"
|
||||
echo
|
||||
echo "Tenant fleet auto-pulls within 5 min via IMAGE_AUTO_REFRESH=true."
|
||||
echo "Force immediate fanout: dispatch redeploy-tenants-on-main.yml."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
492
.github/workflows/auto-promote-staging.yml
vendored
Normal file
492
.github/workflows/auto-promote-staging.yml
vendored
Normal file
@ -0,0 +1,492 @@
|
||||
name: Auto-promote staging → main
|
||||
|
||||
# Fires after any of the staging-branch quality gates complete. When ALL
|
||||
# required gates are green on the same staging SHA, opens (or re-uses)
|
||||
# a PR `staging → main` and schedules Gitea auto-merge so the PR lands
|
||||
# automatically once approval + status checks are satisfied.
|
||||
#
|
||||
# ============================================================
|
||||
# What this workflow does
|
||||
# ============================================================
|
||||
#
|
||||
# 1. On a workflow_run completion event for one of the staging gate
|
||||
# workflows (CI, E2E Staging Canvas, E2E API Smoke, CodeQL),
|
||||
# checks if the combined status on the staging head SHA is green.
|
||||
# 2. If green, opens (or re-uses) a PR `head: staging → base: main`
|
||||
# via Gitea REST `POST /api/v1/repos/.../pulls`.
|
||||
# 3. Schedules auto-merge via `POST /api/v1/repos/.../pulls/{index}/merge`
|
||||
# with `merge_when_checks_succeed: true`. Gitea waits for the
|
||||
# approval requirement on `main` (`required_approvals: 1`) and
|
||||
# the status-check gates, then merges.
|
||||
# 4. The merge commit lands on `main` and fires
|
||||
# `publish-workspace-server-image.yml` naturally via its
|
||||
# `on: push: branches: [main]` trigger — no explicit dispatch
|
||||
# needed (see "Why no workflow_dispatch tail" below).
|
||||
#
|
||||
# `auto-sync-main-to-staging.yml` is the reverse-direction
|
||||
# counterpart (main → staging, fast-forward push). Together they
|
||||
# keep the staging-superset-of-main invariant tight.
|
||||
#
|
||||
# ============================================================
|
||||
# Why Gitea REST (and not `gh pr create`)
|
||||
# ============================================================
|
||||
#
|
||||
# Pre-2026-05-06 this workflow used `gh pr create`, `gh pr merge --auto`,
|
||||
# `gh run list`, and `gh workflow run` against GitHub. After the
|
||||
# GitHub→Gitea cutover those calls fail because:
|
||||
#
|
||||
# - `gh pr create / merge / view / list` route to GitHub GraphQL
|
||||
# (`/api/graphql`). Gitea does not expose a GraphQL endpoint;
|
||||
# every call returns `HTTP 405 Method Not Allowed` — same root
|
||||
# cause as #65 (auto-sync) which PR #66 fixed by dropping `gh`
|
||||
# entirely.
|
||||
# - `gh run list --workflow=...` GitHub-shape; Gitea has the
|
||||
# simpler `GET /repos/.../commits/{ref}/status` combined-status
|
||||
# endpoint instead.
|
||||
# - `gh workflow run X.yml` calls `POST /repos/.../actions/workflows/{id}/dispatches`,
|
||||
# which does NOT exist on Gitea 1.22.6 (verified via swagger.v1.json).
|
||||
#
|
||||
# So this workflow uses direct `curl` calls to Gitea REST. No `gh`
|
||||
# CLI dependency, no GraphQL, no missing-endpoint footgun.
|
||||
#
|
||||
# ============================================================
|
||||
# Why no workflow_dispatch tail (was load-bearing on GitHub, dead on Gitea)
|
||||
# ============================================================
|
||||
#
|
||||
# The GitHub-era version had a 60-line polling step that waited for
|
||||
# the promote PR to merge, then explicitly dispatched
|
||||
# `publish-workspace-server-image.yml` on `--ref main`. That step
|
||||
# existed because GitHub's GITHUB_TOKEN-initiated merges suppress
|
||||
# downstream `on: push` workflows (the documented "no recursion" rule
|
||||
# — https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow).
|
||||
# The explicit dispatch was the workaround.
|
||||
#
|
||||
# Gitea Actions does NOT have this no-recursion rule. PR #66's auto-
|
||||
# sync merge to main fired `auto-promote-staging` on the next push
|
||||
# trigger naturally. So the cascade fires on the natural push event;
|
||||
# the explicit dispatch is dead code. (And even if we wanted to
|
||||
# preserve it, Gitea has no `workflow_dispatch` REST endpoint.)
|
||||
#
|
||||
# Removed in this rewrite. If we ever observe the cascade misfire,
|
||||
# operator can push an empty commit to `main` to wake it.
|
||||
#
|
||||
# ============================================================
|
||||
# Why open a PR (and not direct push)
|
||||
# ============================================================
|
||||
#
|
||||
# `main` branch protection has `enable_push: false` with NO
|
||||
# `push_whitelist_usernames`. Direct push is impossible for any
|
||||
# persona, including admins. PR-mediated merge is the only path,
|
||||
# which is intentional: prod state mutations (and staging→main IS a
|
||||
# prod mutation, since the next deploy fans out to tenants) require
|
||||
# Hongming's approval per `feedback_prod_apply_needs_hongming_chat_go`.
|
||||
#
|
||||
# The auto-merge schedule preserves this gate: `merge_when_checks_succeed`
|
||||
# does NOT bypass `required_approvals: 1`. Gitea waits for BOTH
|
||||
# approval AND green checks before merging. Hongming reviews via the
|
||||
# canvas/chat-handle of the PR notification, approves, and Gitea
|
||||
# auto-merges within seconds.
|
||||
#
|
||||
# ============================================================
|
||||
# Identity + token (anti-bot-ring per saved-memory
|
||||
# `feedback_per_agent_gitea_identity_default`)
|
||||
# ============================================================
|
||||
#
|
||||
# This workflow uses `secrets.AUTO_SYNC_TOKEN` — a personal access
|
||||
# token issued to the `devops-engineer` Gitea persona. NOT the
|
||||
# founder PAT. The bot-ring fingerprint that triggered the GitHub
|
||||
# org suspension on 2026-05-06 was characterised by founder PAT
|
||||
# acting as CI at machine speed.
|
||||
#
|
||||
# Token scope: `push: true` (read+write) on this repo. The persona
|
||||
# can: open PRs, comment on PRs, schedule auto-merge. The persona
|
||||
# CANNOT bypass main's branch protection (`required_approvals: 1`
|
||||
# still applies — only Hongming's review unblocks merge).
|
||||
#
|
||||
# Authorship: the PR is opened by `devops-engineer`; the merge
|
||||
# commit credits Hongming-as-approver and `devops-engineer` as
|
||||
# the merger.
|
||||
#
|
||||
# ============================================================
|
||||
# Failure modes & operational notes
|
||||
# ============================================================
|
||||
#
|
||||
# A — staging gates not all green at trigger time:
|
||||
# - The combined-status check returns `state: pending|failure`.
|
||||
# Workflow exits 0 with a step-summary "not all green; staying
|
||||
# on current main". Re-fires on the next gate completion.
|
||||
#
|
||||
# B — Gitea PR-create returns non-201 (e.g. 422 already-exists):
|
||||
# - Idempotent: the workflow first GETs the existing open
|
||||
# staging→main PR. If found, reuse it; if not, POST a new one.
|
||||
# 422 should never surface; if it does (race), step summary
|
||||
# captures the body and the next workflow_run picks up.
|
||||
#
|
||||
# C — `merge_when_checks_succeed` schedule fails:
|
||||
# - 422 with "Pull request is not mergeable" if there are
|
||||
# conflicts or stale base. Step summary surfaces it; operator
|
||||
# (or `auto-sync-main-to-staging`) needs to bring staging up
|
||||
# to date with main first. Workflow exits 1 to surface red.
|
||||
#
|
||||
# D — `AUTO_SYNC_TOKEN` rotated / wrong scope:
|
||||
# - 401/403 on first REST call. Step summary surfaces it.
|
||||
# Re-issue the token from `~/.molecule-ai/personas/` on the
|
||||
# operator host and update the repo Actions secret.
|
||||
#
|
||||
# ============================================================
|
||||
# Loop safety
|
||||
# ============================================================
|
||||
#
|
||||
# When the promote PR merges to main, `auto-sync-main-to-staging.yml`
|
||||
# fires (on:push:main) and pushes the merge commit back to staging.
|
||||
# That push to staging is by `devops-engineer`, NOT this workflow's
|
||||
# token, and triggers the staging gate workflows. When they all
|
||||
# complete, we end up back here — but the tree-diff guard catches
|
||||
# it: staging tree == main tree (the merge commit changes nothing),
|
||||
# so we skip and the cycle terminates.
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- CI
|
||||
- E2E Staging Canvas (Playwright)
|
||||
- E2E API Smoke Test
|
||||
- CodeQL
|
||||
types: [completed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: "Force promote even when AUTO_PROMOTE_ENABLED is unset (manual override)"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
# Serialize auto-promote runs. Multiple staging gate completions can land
|
||||
# in quick succession (CI + E2E + CodeQL all finish within seconds of
|
||||
# each other on a green PR) — without this, two parallel runs both:
|
||||
# 1. Would race the GET-or-POST PR step.
|
||||
# 2. Would both call merge-schedule (idempotent — fine on Gitea).
|
||||
# cancel-in-progress: false because the second run on a fresh staging
|
||||
# tip should NOT kill the first which has already opened the PR.
|
||||
concurrency:
|
||||
group: auto-promote-staging
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
check-all-gates-green:
|
||||
# Only consider staging pushes. PRs into staging don't promote.
|
||||
if: >
|
||||
(github.event_name == 'workflow_run' &&
|
||||
github.event.workflow_run.head_branch == 'staging' &&
|
||||
github.event.workflow_run.event == 'push')
|
||||
|| github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
all_green: ${{ steps.gates.outputs.all_green }}
|
||||
head_sha: ${{ steps.gates.outputs.head_sha }}
|
||||
steps:
|
||||
# Skip empty-tree promotes (the perpetual auto-promote↔auto-sync
|
||||
# cycle observed pre-cutover on GitHub). On Gitea the cycle shape
|
||||
# is different (auto-sync uses fast-forward, no merge commit),
|
||||
# but the tree-diff guard is cheap insurance and protects against
|
||||
# any future merge-style regression.
|
||||
- name: Checkout for tree-diff check
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: staging
|
||||
|
||||
- name: Skip if staging tree == main tree (cycle-break safety)
|
||||
id: tree-diff
|
||||
env:
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
run: |
|
||||
set -eu
|
||||
git fetch origin main --depth=50 || { echo "::warning::git fetch main failed — proceeding (fail-open)"; exit 0; }
|
||||
if git diff --quiet origin/main "$HEAD_SHA" -- 2>/dev/null; then
|
||||
{
|
||||
echo "## Skipped — no code to promote"
|
||||
echo
|
||||
echo "staging tip (\`${HEAD_SHA:0:8}\`) and \`main\` have identical trees."
|
||||
echo "Skipping to avoid opening an empty promote PR."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::notice::auto-promote: staging tree == main tree — no code to promote, skipping"
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "skip=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Check combined status on staging head
|
||||
if: steps.tree-diff.outputs.skip != 'true'
|
||||
id: gates
|
||||
env:
|
||||
GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
REPO: ${{ github.repository }}
|
||||
GITEA_HOST: ${{ vars.GITEA_HOST || 'https://git.moleculesai.app' }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Gitea-native combined-status endpoint aggregates every
|
||||
# check context attached to a SHA. This is structurally
|
||||
# cleaner than the GitHub-era per-workflow `gh run list`
|
||||
# loop because:
|
||||
#
|
||||
# 1. There's no risk of "workflow name collision" (the
|
||||
# GitHub-era code had to switch from `--workflow=NAME`
|
||||
# to `--workflow=FILE.YML` to disambiguate "CodeQL"
|
||||
# between the explicit workflow and GitHub's UI-
|
||||
# configured default setup; Gitea has no such
|
||||
# duplicate-name surface).
|
||||
# 2. Gitea's combined state already encodes the AND
|
||||
# across all contexts: success only if EVERY context
|
||||
# is success. Pending or failure on any context
|
||||
# produces non-success state.
|
||||
#
|
||||
# See https://docs.gitea.com/api/1.22 for the schema —
|
||||
# `state` is one of: success, pending, failure, error.
|
||||
|
||||
echo "head_sha=${HEAD_SHA}" >> "$GITHUB_OUTPUT"
|
||||
echo "Checking combined status on SHA ${HEAD_SHA}"
|
||||
|
||||
# `set +o pipefail` for the http-code capture pattern; restore
|
||||
# immediately. Pattern hardened per `feedback_curl_status_capture_pollution`.
|
||||
BODY_FILE=$(mktemp)
|
||||
set +e
|
||||
STATUS=$(curl -sS \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
-H "Accept: application/json" \
|
||||
-o "${BODY_FILE}" \
|
||||
-w "%{http_code}" \
|
||||
"${GITEA_HOST}/api/v1/repos/${REPO}/commits/${HEAD_SHA}/status")
|
||||
CURL_RC=$?
|
||||
set -e
|
||||
|
||||
if [ "${CURL_RC}" -ne 0 ] || [ "${STATUS}" != "200" ]; then
|
||||
echo "::error::combined-status fetch failed: curl=${CURL_RC} http=${STATUS}"
|
||||
cat "${BODY_FILE}" | head -c 500 || true
|
||||
rm -f "${BODY_FILE}"
|
||||
echo "all_green=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
STATE=$(jq -r '.state // "missing"' < "${BODY_FILE}")
|
||||
TOTAL=$(jq -r '.total_count // 0' < "${BODY_FILE}")
|
||||
rm -f "${BODY_FILE}"
|
||||
|
||||
echo "Combined status: state=${STATE} total_count=${TOTAL}"
|
||||
|
||||
if [ "${STATE}" = "success" ] && [ "${TOTAL}" -gt 0 ]; then
|
||||
echo "all_green=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::All gates green on ${HEAD_SHA} (${TOTAL} contexts)"
|
||||
else
|
||||
echo "all_green=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## Not promoting — combined status not green"
|
||||
echo
|
||||
echo "- SHA: \`${HEAD_SHA:0:8}\`"
|
||||
echo "- Combined state: \`${STATE}\`"
|
||||
echo "- Context count: ${TOTAL}"
|
||||
echo
|
||||
echo "Will re-fire on the next gate completion. Investigate any red gate via the Actions UI."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::notice::auto-promote: combined status is ${STATE} on ${HEAD_SHA} — staying on current main"
|
||||
fi
|
||||
|
||||
promote:
|
||||
needs: check-all-gates-green
|
||||
if: needs.check-all-gates-green.outputs.all_green == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check rollout gate
|
||||
env:
|
||||
AUTO_PROMOTE_ENABLED: ${{ vars.AUTO_PROMOTE_ENABLED }}
|
||||
FORCE_INPUT: ${{ github.event.inputs.force }}
|
||||
run: |
|
||||
set -eu
|
||||
# Repo variable AUTO_PROMOTE_ENABLED=true flips this on. While
|
||||
# it's unset, the workflow dry-runs (logs what it would have
|
||||
# done) but doesn't open the promote PR. Set the variable in
|
||||
# Settings → Actions → Variables.
|
||||
if [ "${AUTO_PROMOTE_ENABLED:-}" != "true" ] && [ "${FORCE_INPUT:-false}" != "true" ]; then
|
||||
{
|
||||
echo "## Auto-promote disabled"
|
||||
echo
|
||||
echo "Repo variable \`AUTO_PROMOTE_ENABLED\` is not set to \`true\`."
|
||||
echo "All gates are green on staging; would have opened a promote PR to \`main\`."
|
||||
echo
|
||||
echo "To enable: Settings → Actions → Variables → \`AUTO_PROMOTE_ENABLED=true\`."
|
||||
echo "To test once manually: workflow_dispatch with \`force=true\`."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::notice::auto-promote disabled — dry run only"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
- name: Open or reuse promote PR + schedule auto-merge
|
||||
if: ${{ vars.AUTO_PROMOTE_ENABLED == 'true' || github.event.inputs.force == 'true' }}
|
||||
env:
|
||||
GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
TARGET_SHA: ${{ needs.check-all-gates-green.outputs.head_sha }}
|
||||
GITEA_HOST: ${{ vars.GITEA_HOST || 'https://git.moleculesai.app' }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
API="${GITEA_HOST}/api/v1/repos/${REPO}"
|
||||
AUTH=(-H "Authorization: token ${GITEA_TOKEN}" -H "Accept: application/json")
|
||||
|
||||
# http_status_get RESULT_VAR URL
|
||||
# Sets RESULT_VAR to "<http_code>:<body_file>". Curl status
|
||||
# capture pattern per `feedback_curl_status_capture_pollution`:
|
||||
# http_code goes to its own tempfile-equivalent (-w), body to
|
||||
# another tempfile, set +e/-e bracket protects pipeline state.
|
||||
http_get() {
|
||||
local body_file="$1"; shift
|
||||
local url="$1"; shift
|
||||
set +e
|
||||
local code
|
||||
code=$(curl -sS "${AUTH[@]}" -o "${body_file}" -w "%{http_code}" "${url}")
|
||||
local rc=$?
|
||||
set -e
|
||||
if [ "${rc}" -ne 0 ]; then
|
||||
echo "::error::curl GET failed (rc=${rc}) on ${url}"
|
||||
return 99
|
||||
fi
|
||||
echo "${code}"
|
||||
}
|
||||
http_post_json() {
|
||||
local body_file="$1"; shift
|
||||
local data="$1"; shift
|
||||
local url="$1"; shift
|
||||
set +e
|
||||
local code
|
||||
code=$(curl -sS "${AUTH[@]}" -H "Content-Type: application/json" \
|
||||
-X POST -d "${data}" -o "${body_file}" -w "%{http_code}" "${url}")
|
||||
local rc=$?
|
||||
set -e
|
||||
if [ "${rc}" -ne 0 ]; then
|
||||
echo "::error::curl POST failed (rc=${rc}) on ${url}"
|
||||
return 99
|
||||
fi
|
||||
echo "${code}"
|
||||
}
|
||||
|
||||
# Step 1: look for an existing open staging→main promote PR
|
||||
# (idempotent on workflow re-run). Gitea doesn't have a
|
||||
# head/base filter on the list endpoint that's as ergonomic
|
||||
# as gh's, but the dedicated `/pulls/{base}/{head}` lookup
|
||||
# works.
|
||||
BODY=$(mktemp)
|
||||
STATUS=$(http_get "${BODY}" "${API}/pulls/main/staging") || true
|
||||
|
||||
PR_NUM=""
|
||||
if [ "${STATUS}" = "200" ]; then
|
||||
STATE=$(jq -r '.state // "missing"' < "${BODY}")
|
||||
if [ "${STATE}" = "open" ]; then
|
||||
PR_NUM=$(jq -r '.number // ""' < "${BODY}")
|
||||
echo "::notice::Re-using existing open promote PR #${PR_NUM}"
|
||||
fi
|
||||
fi
|
||||
rm -f "${BODY}"
|
||||
|
||||
# Step 2: if no open PR, create one.
|
||||
if [ -z "${PR_NUM}" ]; then
|
||||
TITLE="staging → main: auto-promote ${TARGET_SHA:0:7}"
|
||||
BODY_TEXT=$(cat <<EOFBODY
|
||||
Automated promotion of \`staging\` (\`${TARGET_SHA:0:8}\`) to \`main\`. All required staging gates are green at this SHA (combined status reported success).
|
||||
|
||||
This PR is auto-generated by \`.github/workflows/auto-promote-staging.yml\` whenever every required gate completes green on the same staging SHA.
|
||||
|
||||
**Approval gate:** \`main\` branch protection requires 1 approval before this can land. Once approved, Gitea will auto-merge (the workflow scheduled \`merge_when_checks_succeed: true\` immediately after open).
|
||||
|
||||
The reverse-direction sync (the merge commit on \`main\` → \`staging\`) is handled automatically by \`auto-sync-main-to-staging.yml\` after this PR lands.
|
||||
|
||||
---
|
||||
- Source: staging at \`${TARGET_SHA}\`
|
||||
- Opened by: \`devops-engineer\` persona (anti-bot-ring; never founder PAT)
|
||||
- Refs: #65, #73, #195
|
||||
EOFBODY
|
||||
)
|
||||
REQ=$(jq -n \
|
||||
--arg title "${TITLE}" \
|
||||
--arg body "${BODY_TEXT}" \
|
||||
--arg base "main" \
|
||||
--arg head "staging" \
|
||||
'{title:$title, body:$body, base:$base, head:$head}')
|
||||
|
||||
BODY=$(mktemp)
|
||||
STATUS=$(http_post_json "${BODY}" "${REQ}" "${API}/pulls")
|
||||
|
||||
if [ "${STATUS}" = "201" ]; then
|
||||
PR_NUM=$(jq -r '.number // ""' < "${BODY}")
|
||||
echo "::notice::Opened promote PR #${PR_NUM}"
|
||||
else
|
||||
echo "::error::Failed to create promote PR: HTTP ${STATUS}"
|
||||
jq -r '.message // .' < "${BODY}" | head -c 500
|
||||
rm -f "${BODY}"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "${BODY}"
|
||||
fi
|
||||
|
||||
# Step 3: schedule auto-merge. merge_when_checks_succeed
|
||||
# tells Gitea to wait for both:
|
||||
# - all required status checks to pass
|
||||
# - the required-approvals gate (1 approval on main)
|
||||
# before merging. On approval+green, Gitea merges within
|
||||
# seconds. On any check failing or approval being denied,
|
||||
# the schedule stays armed but doesn't fire.
|
||||
#
|
||||
# Idempotent: re-arming on an already-armed PR is a no-op.
|
||||
REQ=$(jq -n '{Do:"merge", merge_when_checks_succeed:true}')
|
||||
BODY=$(mktemp)
|
||||
STATUS=$(http_post_json "${BODY}" "${REQ}" "${API}/pulls/${PR_NUM}/merge")
|
||||
|
||||
# Gitea returns:
|
||||
# - 200/204 on successful immediate merge (gates already green AND approved)
|
||||
# - 405 "Please try again later" when scheduled successfully but waiting
|
||||
# - 422 on "Pull request is not mergeable" (conflict, stale base, etc.)
|
||||
#
|
||||
# 405 here is benign — Gitea's way of saying "scheduled, not merging now".
|
||||
# We treat 200/204/405 as success, anything else as failure.
|
||||
case "${STATUS}" in
|
||||
200|204)
|
||||
MERGE_OUTCOME="merged-immediately"
|
||||
echo "::notice::Promote PR #${PR_NUM} merged immediately (gates+approval already green)"
|
||||
;;
|
||||
405)
|
||||
MERGE_OUTCOME="auto-merge-scheduled"
|
||||
echo "::notice::Promote PR #${PR_NUM}: auto-merge scheduled (Gitea will land on approval+green)"
|
||||
;;
|
||||
422)
|
||||
MERGE_OUTCOME="not-mergeable"
|
||||
echo "::warning::Promote PR #${PR_NUM}: not mergeable (conflict, stale base, or already merging)."
|
||||
jq -r '.message // .' < "${BODY}" | head -c 500
|
||||
;;
|
||||
*)
|
||||
echo "::error::Unexpected status ${STATUS} on merge schedule"
|
||||
jq -r '.message // .' < "${BODY}" | head -c 500
|
||||
rm -f "${BODY}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
rm -f "${BODY}"
|
||||
|
||||
{
|
||||
echo "## Auto-promote PR opened"
|
||||
echo
|
||||
echo "- Source: staging at \`${TARGET_SHA:0:8}\`"
|
||||
echo "- PR: #${PR_NUM}"
|
||||
echo "- Outcome: \`${MERGE_OUTCOME}\`"
|
||||
echo
|
||||
if [ "${MERGE_OUTCOME}" = "auto-merge-scheduled" ]; then
|
||||
echo "Gitea will auto-merge once Hongming approves and all checks are green. No human action needed beyond approval."
|
||||
elif [ "${MERGE_OUTCOME}" = "merged-immediately" ]; then
|
||||
echo "Merged immediately. \`publish-workspace-server-image.yml\` will fire naturally on the resulting \`main\` push."
|
||||
else
|
||||
echo "PR is not auto-merging. Operator may need to bring staging up to date with main, then re-trigger this workflow via workflow_dispatch."
|
||||
fi
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
83
.github/workflows/auto-promote-stale-alarm.yml
vendored
Normal file
83
.github/workflows/auto-promote-stale-alarm.yml
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
name: auto-promote-stale-alarm
|
||||
|
||||
# Hourly cron + on-demand alarm for the silent-block failure mode that
|
||||
# motivated issue #2975:
|
||||
# - The auto-promote-staging.yml workflow opened a PR + armed
|
||||
# auto-merge, but main's branch protection requires a human review
|
||||
# (reviewDecision=REVIEW_REQUIRED). The PR sat BLOCKED with no
|
||||
# surface-up-the-stack for 12+ hours, holding 25 commits hostage
|
||||
# including the Memory v2 redesign and a reno-stars data-loss fix.
|
||||
#
|
||||
# This workflow runs `scripts/check-stale-promote-pr.sh` against the
|
||||
# repo's open auto-promote PRs (base=main head=staging). When a PR has
|
||||
# been BLOCKED on REVIEW_REQUIRED for >4h, it:
|
||||
# 1. Emits a workflow-level warning (visible in run summary + the
|
||||
# Actions UI feed).
|
||||
# 2. Posts a comment on the PR (idempotent — one alarm per PR).
|
||||
#
|
||||
# The detection logic lives in scripts/check-stale-promote-pr.sh so
|
||||
# it's unit-testable with stubbed `gh` (see test-check-stale-promote-pr.sh).
|
||||
# This file is the schedule + invocation surface only — SSOT for the
|
||||
# detector itself.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Hourly. Cheap (one `gh pr list` + jq), and 1h granularity is
|
||||
# plenty for a 4h staleness threshold — operators see the alarm
|
||||
# within at most 1h of crossing the threshold.
|
||||
- cron: "27 * * * *" # at :27 to dodge the cron herd at :00
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
stale_hours:
|
||||
description: "Hours after which a BLOCKED+REVIEW_REQUIRED PR is stale (default 4)"
|
||||
required: false
|
||||
default: "4"
|
||||
post_comment:
|
||||
description: "Post a comment on stale PRs (default true)"
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write # post comments on stale PRs
|
||||
|
||||
# Serialize so the on-demand and scheduled runs don't double-comment
|
||||
# the same PR. cancel-in-progress=false because the script is idempotent
|
||||
# (existing comment marker prevents dupes), but a scheduled run firing
|
||||
# while a manual one runs would just re-list the same PR set.
|
||||
concurrency:
|
||||
group: auto-promote-stale-alarm
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout (need scripts/ only)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
sparse-checkout: |
|
||||
scripts/check-stale-promote-pr.sh
|
||||
sparse-checkout-cone-mode: false
|
||||
- name: Run stale-PR detector
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
STALE_HOURS: ${{ inputs.stale_hours || '4' }}
|
||||
POST_COMMENT: ${{ inputs.post_comment || 'true' }}
|
||||
run: |
|
||||
# The script's exit code reflects the count of stale PRs.
|
||||
# We don't want a stale finding to fail the workflow run —
|
||||
# the warning + comment are the signal, the green/red is
|
||||
# noise. So convert any non-zero exit to a workflow notice
|
||||
# and exit 0.
|
||||
set +e
|
||||
bash scripts/check-stale-promote-pr.sh
|
||||
rc=$?
|
||||
set -e
|
||||
if [ "$rc" -ne 0 ]; then
|
||||
echo "::notice::Stale PR detector found $rc PR(s) needing attention. See warnings above + comments on the PRs."
|
||||
fi
|
||||
# Always succeed — operator-facing surface is the warning,
|
||||
# not the workflow status.
|
||||
exit 0
|
||||
255
.github/workflows/auto-sync-main-to-staging.yml
vendored
Normal file
255
.github/workflows/auto-sync-main-to-staging.yml
vendored
Normal file
@ -0,0 +1,255 @@
|
||||
name: Auto-sync main → staging
|
||||
|
||||
# Reflects every push to `main` back onto `staging` so the
|
||||
# staging-as-superset-of-main invariant holds.
|
||||
#
|
||||
# ============================================================
|
||||
# What this workflow does
|
||||
# ============================================================
|
||||
#
|
||||
# On every push to `main`:
|
||||
# 1. Checks if staging already contains main → no-op.
|
||||
# 2. Fetches both branches, merges main into staging in the
|
||||
# runner workspace (fast-forward if possible, else
|
||||
# `--no-ff` merge commit).
|
||||
# 3. Pushes staging directly to origin via the
|
||||
# `devops-engineer` persona's `AUTO_SYNC_TOKEN`.
|
||||
#
|
||||
# Authoritative path: a single `git push origin staging` from
|
||||
# inside this workflow is the SSOT for advancing staging after
|
||||
# a main push. No PR, no merge queue, no human approval —
|
||||
# staging is mechanically maintained as a superset of main.
|
||||
#
|
||||
# `auto-promote-staging.yml` is the reverse-direction
|
||||
# counterpart (staging → main, gated on green CI). Together
|
||||
# they keep the staging-superset-of-main invariant tight.
|
||||
#
|
||||
# ============================================================
|
||||
# Why direct push (and not "open a PR")
|
||||
# ============================================================
|
||||
#
|
||||
# Pre-2026-05-06 the canonical SCM was GitHub.com, where:
|
||||
# - The `staging` branch had a `merge_queue` ruleset that
|
||||
# blocked ALL direct pushes (no bypass even for org
|
||||
# admins or the GitHub Actions integration).
|
||||
# - Therefore this workflow opened a PR via `gh pr create`
|
||||
# and let auto-merge land it through the queue.
|
||||
#
|
||||
# Post-2026-05-06 the canonical SCM is Gitea
|
||||
# (`git.moleculesai.app/molecule-ai/molecule-core`). Gitea:
|
||||
# - Has no `merge_queue` concept.
|
||||
# - Allows direct push to protected branches via per-user
|
||||
# `push_whitelist_usernames` on the branch protection.
|
||||
# - Does not expose a GraphQL endpoint, so `gh pr create`
|
||||
# returns `HTTP 405 Method Not Allowed
|
||||
# (https://git.moleculesai.app/api/graphql)` — the
|
||||
# pre-suspension architecture cannot work on Gitea.
|
||||
#
|
||||
# The molecule-ai/molecule-core staging branch protection
|
||||
# (verified via `GET /api/v1/repos/.../branch_protections`)
|
||||
# whitelists `devops-engineer` for direct push. So the
|
||||
# correct Gitea-shape architecture is: authenticate as
|
||||
# `devops-engineer`, merge locally, push staging directly.
|
||||
#
|
||||
# This is structurally simpler than the GitHub-era PR dance
|
||||
# and removes the dependence on `gh` CLI / GraphQL entirely.
|
||||
#
|
||||
# ============================================================
|
||||
# Identity + token (anti-bot-ring per saved-memory
|
||||
# `feedback_per_agent_gitea_identity_default`)
|
||||
# ============================================================
|
||||
#
|
||||
# This workflow uses `secrets.AUTO_SYNC_TOKEN`, which is a
|
||||
# personal access token issued to the `devops-engineer`
|
||||
# persona on Gitea — NOT the founder PAT. The bot-ring
|
||||
# fingerprint that triggered the GitHub org suspension on
|
||||
# 2026-05-06 was characterised by founder PAT acting as CI
|
||||
# at machine speed; per-persona identities split the
|
||||
# attribution honestly.
|
||||
#
|
||||
# Token scope on Gitea: repo write. Push target restricted
|
||||
# to `staging` (this workflow is the only writer; main is
|
||||
# untouched). Compromise blast radius: bounded to staging
|
||||
# branch + this repo's read surface.
|
||||
#
|
||||
# Commits are authored by the persona email
|
||||
# `devops-engineer@agents.moleculesai.app` so commit history
|
||||
# reflects which automation produced the merge.
|
||||
#
|
||||
# ============================================================
|
||||
# Failure modes & operational notes
|
||||
# ============================================================
|
||||
#
|
||||
# A — staging has commits main doesn't, and the merge
|
||||
# conflicts:
|
||||
# - The `--no-ff` merge step exits non-zero. Workflow
|
||||
# fails red. Operator (devops-engineer or human)
|
||||
# resolves manually:
|
||||
# git fetch origin
|
||||
# git checkout staging
|
||||
# git merge --no-ff origin/main
|
||||
# # resolve conflicts
|
||||
# git push origin staging
|
||||
# - Step summary surfaces the conflict so the failed run
|
||||
# is self-explanatory.
|
||||
#
|
||||
# B — `AUTO_SYNC_TOKEN` rotated / wrong scope:
|
||||
# - `git push` step exits non-zero with `HTTP 401` /
|
||||
# `403`. Step summary surfaces the failed push.
|
||||
# - Re-issue the token from `~/.molecule-ai/personas/`
|
||||
# on the operator host and update the repo Actions
|
||||
# secret. Re-run the workflow.
|
||||
#
|
||||
# C — staging branch protection no longer whitelists
|
||||
# `devops-engineer`:
|
||||
# - `git push` exits non-zero with a Gitea protected-
|
||||
# branch rejection. Step summary surfaces it.
|
||||
# - Re-add `devops-engineer` to
|
||||
# `push_whitelist_usernames` on the staging
|
||||
# protection (Settings → Branches → staging).
|
||||
#
|
||||
# D — concurrent push to main while a sync is in flight:
|
||||
# - The `concurrency` group below serialises runs.
|
||||
# The second waits for the first; if main advances
|
||||
# again while we're syncing, the second run picks
|
||||
# up the new tip on its own fetch.
|
||||
#
|
||||
# ============================================================
|
||||
# Loop safety
|
||||
# ============================================================
|
||||
#
|
||||
# The push to staging from this workflow does NOT itself
|
||||
# fire a `push: branches: [main]` event (different branch),
|
||||
# so there's no risk of self-recursion. `auto-promote-staging.yml`
|
||||
# fires on `workflow_run` of CI etc. — it sees the new
|
||||
# staging tip on its next gate-completion event, NOT on this
|
||||
# push directly. No loop.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
# workflow_dispatch lets operators manually backfill a
|
||||
# missed sync (e.g. if AUTO_SYNC_TOKEN was rotated and a
|
||||
# main push slipped through while the secret was stale).
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
concurrency:
|
||||
group: auto-sync-main-to-staging
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
sync-staging:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout staging (with devops-engineer push token)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: staging
|
||||
# AUTO_SYNC_TOKEN authenticates as the
|
||||
# `devops-engineer` Gitea persona — the only
|
||||
# identity whitelisted for direct push to
|
||||
# staging. See header comment for context.
|
||||
token: ${{ secrets.AUTO_SYNC_TOKEN }}
|
||||
|
||||
- name: Configure git author
|
||||
run: |
|
||||
# Per-persona identity, NOT founder PAT.
|
||||
# `feedback_per_agent_gitea_identity_default`.
|
||||
git config user.name "devops-engineer"
|
||||
git config user.email "devops-engineer@agents.moleculesai.app"
|
||||
|
||||
- name: Check if staging already contains main
|
||||
id: check
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git fetch origin main
|
||||
if git merge-base --is-ancestor origin/main HEAD; then
|
||||
echo "needs_sync=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## No-op"
|
||||
echo
|
||||
echo "staging already contains \`origin/main\` ($(git rev-parse --short=8 origin/main))."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
else
|
||||
echo "needs_sync=true" >> "$GITHUB_OUTPUT"
|
||||
MAIN_SHORT=$(git rev-parse --short=8 origin/main)
|
||||
echo "main_short=${MAIN_SHORT}" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::staging is missing main's tip (${MAIN_SHORT}) — merging in-runner and pushing"
|
||||
fi
|
||||
|
||||
- name: Merge main into staging (in-runner)
|
||||
if: steps.check.outputs.needs_sync == 'true'
|
||||
id: merge
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Already on staging from checkout. Try fast-forward
|
||||
# first (cleanest history); fall back to merge commit
|
||||
# if staging has commits main doesn't.
|
||||
if git merge --ff-only origin/main; then
|
||||
echo "did_ff=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Fast-forwarded staging to origin/main"
|
||||
else
|
||||
echo "did_ff=false" >> "$GITHUB_OUTPUT"
|
||||
if ! git merge --no-ff origin/main \
|
||||
-m "chore: sync main → staging (auto, ${{ steps.check.outputs.main_short }})"; then
|
||||
# Hygiene: leave the work tree clean before failing.
|
||||
git merge --abort || true
|
||||
{
|
||||
echo "## Conflict"
|
||||
echo
|
||||
echo "Auto-merge \`main → staging\` failed with conflicts."
|
||||
echo "A human (or devops-engineer persona) needs to resolve manually:"
|
||||
echo
|
||||
echo '```'
|
||||
echo "git fetch origin"
|
||||
echo "git checkout staging"
|
||||
echo "git merge --no-ff origin/main"
|
||||
echo "# resolve conflicts"
|
||||
echo "git push origin staging"
|
||||
echo '```'
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Push staging to origin
|
||||
if: steps.check.outputs.needs_sync == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Direct push to staging. devops-engineer persona is
|
||||
# whitelisted for direct push on the staging branch
|
||||
# protection (Settings → Branches → staging).
|
||||
#
|
||||
# No --force / --force-with-lease: a fast-forward or
|
||||
# legitimate merge commit on top of current staging
|
||||
# is the only thing we'd ever push. If origin/staging
|
||||
# advanced under us (concurrent merge), the push
|
||||
# legitimately rejects and the next run picks up the
|
||||
# new state.
|
||||
if ! git push origin staging; then
|
||||
{
|
||||
echo "## Push rejected"
|
||||
echo
|
||||
echo "Direct push to \`staging\` failed. Likely causes:"
|
||||
echo "- \`AUTO_SYNC_TOKEN\` rotated / wrong scope (HTTP 401/403)"
|
||||
echo "- \`devops-engineer\` no longer in"
|
||||
echo " \`push_whitelist_usernames\` on the staging"
|
||||
echo " branch protection (HTTP 422)"
|
||||
echo "- staging advanced concurrently — re-running this"
|
||||
echo " workflow on the new main tip will pick it up"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
{
|
||||
echo "## Auto-sync succeeded"
|
||||
echo
|
||||
echo "- staging advanced to: \`$(git rev-parse --short=8 HEAD)\`"
|
||||
echo "- main tip: \`${{ steps.check.outputs.main_short }}\`"
|
||||
echo "- Strategy: $([ "${{ steps.merge.outputs.did_ff }}" = "true" ] && echo "fast-forward" || echo "merge commit")"
|
||||
echo "- Pushed by: \`devops-engineer\` (per-agent persona, anti-bot-ring)"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
82
.github/workflows/canary-staging.yml
vendored
82
.github/workflows/canary-staging.yml
vendored
@ -20,19 +20,6 @@ on:
|
||||
# a few minutes under load — that's fine for a canary.
|
||||
- cron: '*/30 * * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
keep_on_failure:
|
||||
description: >-
|
||||
Skip teardown when the canary fails (debugging only). The
|
||||
tenant org + EC2 + CF tunnel + DNS stay alive so an operator
|
||||
can SSM into the workspace EC2 and capture docker logs of the
|
||||
failing claude-code container. REMEMBER to manually delete
|
||||
via DELETE /cp/admin/tenants/<slug> when done so the org
|
||||
doesn't accumulate cost. Only honored on workflow_dispatch;
|
||||
cron runs always tear down (we don't want unattended cron
|
||||
to leak resources).
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
# Serialise with the full-SaaS workflow so they don't contend for the
|
||||
# same org-create quota on staging. Different group key from
|
||||
@ -93,14 +80,6 @@ jobs:
|
||||
# is "Token Plan only" but cheap-per-token and fast.
|
||||
E2E_MODEL_SLUG: MiniMax-M2.7-highspeed
|
||||
E2E_RUN_ID: "canary-${{ github.run_id }}"
|
||||
# Debug-only: when an operator dispatches with keep_on_failure=true,
|
||||
# the canary script's E2E_KEEP_ORG=1 path skips teardown so the
|
||||
# tenant org + EC2 stay alive for SSM-based log capture. Cron runs
|
||||
# never set this (the input only exists on workflow_dispatch) so
|
||||
# unattended cron always tears down. See molecule-core#129
|
||||
# failure mode #1 — capturing the actual exception requires
|
||||
# docker logs from the live container.
|
||||
E2E_KEEP_ORG: ${{ github.event.inputs.keep_on_failure == 'true' && '1' || '0' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
@ -158,28 +137,27 @@ jobs:
|
||||
id: canary
|
||||
run: bash tests/e2e/test_staging_full_saas.sh
|
||||
|
||||
# Alerting: open a sticky issue on the FIRST failure; comment on
|
||||
# subsequent failures; auto-close on next green. Comment-on-existing
|
||||
# de-duplicates so a single open issue accumulates the streak —
|
||||
# ops sees one issue with N comments rather than N issues.
|
||||
# Alerting: open an issue only after THREE consecutive failures so
|
||||
# transient flakes (Cloudflare DNS hiccup, AWS API blip) don't spam
|
||||
# the issue list. If an issue is already open, we still comment on
|
||||
# every failure so ops sees the streak. Auto-close on next green.
|
||||
#
|
||||
# Why no consecutive-failures threshold (e.g., wait 3 runs before
|
||||
# filing): the prior threshold check used
|
||||
# `github.rest.actions.listWorkflowRuns()` which Gitea 1.22.6 does
|
||||
# not expose (returns 404). On Gitea Actions the threshold call
|
||||
# ALWAYS failed, breaking the entire alerting step and going days
|
||||
# silent on real regressions (38h+ chronic red on 2026-05-07/08
|
||||
# before this fix; tracked in molecule-core#129). Filing on first
|
||||
# failure is also better UX — we want to know about the first red,
|
||||
# not wait 90 min for it to "count." Real flakes get one issue +
|
||||
# a quick close-on-green; persistent reds accumulate comments.
|
||||
# Threshold rationale: canary fires every 30 min, so 3 failures =
|
||||
# ~90 min of consecutive red — well past any single-run flake but
|
||||
# still tight enough that a real outage gets surfaced before the
|
||||
# next deploy window.
|
||||
- name: Open issue on failure
|
||||
if: failure()
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
env:
|
||||
# Inject the workflow path explicitly — context.workflow is
|
||||
# the *name*, not the file path the actions API needs.
|
||||
WORKFLOW_PATH: '.github/workflows/canary-staging.yml'
|
||||
CONSECUTIVE_THRESHOLD: '3'
|
||||
with:
|
||||
script: |
|
||||
const title = '🔴 Canary failing: staging SaaS smoke';
|
||||
const runURL = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
|
||||
const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
|
||||
|
||||
// Find an existing open canary issue (stable title match).
|
||||
// If one exists, this isn't a "first failure" — comment and exit.
|
||||
@ -199,12 +177,32 @@ jobs:
|
||||
return;
|
||||
}
|
||||
|
||||
// No open issue yet — file one on this first failure. The
|
||||
// comment-on-existing branch above means subsequent failures
|
||||
// accumulate as comments on this same issue, so we don't
|
||||
// spam new issues per run.
|
||||
// No open issue yet — check the last N-1 runs' conclusions.
|
||||
// We open the issue only if the last (THRESHOLD-1) runs ALSO
|
||||
// failed (so this is the 3rd consecutive red).
|
||||
const threshold = parseInt(process.env.CONSECUTIVE_THRESHOLD, 10);
|
||||
const { data: runs } = await github.rest.actions.listWorkflowRuns({
|
||||
owner: context.repo.owner, repo: context.repo.repo,
|
||||
workflow_id: process.env.WORKFLOW_PATH,
|
||||
status: 'completed',
|
||||
per_page: threshold,
|
||||
// Skip the current in-progress run; it isn't 'completed' yet.
|
||||
});
|
||||
// listWorkflowRuns returns recent first. We need (threshold-1)
|
||||
// prior failures (current run is the threshold-th).
|
||||
const priorFailures = (runs.workflow_runs || [])
|
||||
.slice(0, threshold - 1)
|
||||
.filter(r => r.id !== context.runId)
|
||||
.filter(r => r.conclusion === 'failure')
|
||||
.length;
|
||||
if (priorFailures < threshold - 1) {
|
||||
core.info(`Below threshold: ${priorFailures + 1}/${threshold} consecutive failures — not filing yet`);
|
||||
return;
|
||||
}
|
||||
|
||||
const body =
|
||||
`Canary run failed at ${new Date().toISOString()}.\n\n` +
|
||||
`Canary run failed at ${new Date().toISOString()}, ` +
|
||||
`${threshold} consecutive runs red.\n\n` +
|
||||
`Run: ${runURL}\n\n` +
|
||||
`This issue auto-closes on the next green canary run. ` +
|
||||
`Consecutive failures add a comment here rather than a new issue.`;
|
||||
@ -213,7 +211,7 @@ jobs:
|
||||
title, body,
|
||||
labels: ['canary-staging', 'bug'],
|
||||
});
|
||||
core.info('Opened canary failure issue (first red)');
|
||||
core.info(`Opened canary failure issue (${threshold} consecutive reds)`);
|
||||
|
||||
- name: Auto-close canary issue on success
|
||||
if: success()
|
||||
|
||||
2
.github/workflows/canary-verify.yml
vendored
2
.github/workflows/canary-verify.yml
vendored
@ -108,7 +108,7 @@ jobs:
|
||||
echo
|
||||
echo "One or more canary secrets are unset (\`CANARY_TENANT_URLS\`, \`CANARY_ADMIN_TOKENS\`, \`CANARY_CP_SHARED_SECRET\`)."
|
||||
echo "Phase 2 canary fleet has not been stood up yet —"
|
||||
echo "see [canary-tenants.md](https://git.moleculesai.app/molecule-ai/molecule-controlplane/blob/main/docs/canary-tenants.md)."
|
||||
echo "see [canary-tenants.md](https://github.com/molecule-ai/molecule-controlplane/blob/main/docs/canary-tenants.md)."
|
||||
echo
|
||||
echo "**Skipped — promote-to-latest will NOT auto-fire.** Dispatch \`promote-latest.yml\` manually when ready."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@ -235,13 +235,7 @@ jobs:
|
||||
run: npx vitest run --coverage
|
||||
- name: Upload coverage summary as artifact
|
||||
if: needs.changes.outputs.canvas == 'true' && always()
|
||||
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
|
||||
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
|
||||
# implement, surfacing as `GHESNotSupportedError: @actions/artifact
|
||||
# v2.0.0+, upload-artifact@v4+ and download-artifact@v4+ are not
|
||||
# currently supported on GHES`. Drop this pin when Gitea ships
|
||||
# the v4 protocol (tracked: post-Gitea-1.23 followup).
|
||||
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: canvas-coverage-${{ github.run_id }}
|
||||
path: canvas/coverage/
|
||||
|
||||
130
.github/workflows/e2e-api.yml
vendored
130
.github/workflows/e2e-api.yml
vendored
@ -12,59 +12,6 @@ name: E2E API Smoke Test
|
||||
# spending CI cycles. See the in-job comment on the `e2e-api` job for
|
||||
# why this is one job (not two-jobs-sharing-name) and the 2026-04-29
|
||||
# PR #2264 incident that drove the consolidation.
|
||||
#
|
||||
# Parallel-safety (Class B Hongming-owned CICD red sweep, 2026-05-08)
|
||||
# -------------------------------------------------------------------
|
||||
# Same substrate hazard as PR #98 (handlers-postgres-integration). Our
|
||||
# Gitea act_runner runs with `container.network: host` (operator host
|
||||
# `/opt/molecule/runners/config.yaml`), which means:
|
||||
#
|
||||
# * Two concurrent runs both try to bind their `-p 15432:5432` /
|
||||
# `-p 16379:6379` host ports — the second postgres/redis FATALs
|
||||
# with `Address in use` and `docker run` returns exit 125 with
|
||||
# `Conflict. The container name "/molecule-ci-postgres" is already
|
||||
# in use by container ...`. Verified in run a7/2727 on 2026-05-07.
|
||||
# * The fixed container names `molecule-ci-postgres` / `-redis` (the
|
||||
# pre-fix shape) collide on name AS WELL AS port. The cleanup-with-
|
||||
# `docker rm -f` at the start of the second job KILLS the first
|
||||
# job's still-running postgres/redis.
|
||||
#
|
||||
# Fix shape (mirrors PR #98's bridge-net pattern, adapted because
|
||||
# platform-server is a Go binary on the host, not a containerised
|
||||
# step):
|
||||
#
|
||||
# 1. Unique container names per run:
|
||||
# pg-e2e-api-${RUN_ID}-${RUN_ATTEMPT}
|
||||
# redis-e2e-api-${RUN_ID}-${RUN_ATTEMPT}
|
||||
# `${RUN_ID}-${RUN_ATTEMPT}` is unique even across reruns of the
|
||||
# same run_id.
|
||||
# 2. Ephemeral host port per run (`-p 0:5432`), then read the actual
|
||||
# bound port via `docker port` and export DATABASE_URL/REDIS_URL
|
||||
# pointing at it. No fixed host-port → no port collision.
|
||||
# 3. `127.0.0.1` (NOT `localhost`) in URLs — IPv6 first-resolve was
|
||||
# the original flake fixed in #92 and the script's still IPv6-
|
||||
# enabled.
|
||||
# 4. `if: always()` cleanup so containers don't leak when test steps
|
||||
# fail.
|
||||
#
|
||||
# Issue #94 items #2 + #3 (also fixed here):
|
||||
# * Pre-pull `alpine:latest` so the platform-server's provisioner
|
||||
# (`internal/handlers/container_files.go`) can stand up its
|
||||
# ephemeral token-write helper without a daemon.io round-trip.
|
||||
# * Create `molecule-core-net` bridge network if missing so the
|
||||
# provisioner's container.HostConfig {NetworkMode: ...} attach
|
||||
# succeeds.
|
||||
# Item #1 (timeouts) — evidence on recent runs (77/3191, ae/4270, 0e/
|
||||
# 2318) shows Postgres ready in 3s, Redis in 1s, Platform in 1s when
|
||||
# they DO come up. Timeouts are not the bottleneck; not bumped.
|
||||
#
|
||||
# Item explicitly NOT fixed here: failing test `Status back online`
|
||||
# fails because the platform's langgraph workspace template image
|
||||
# (ghcr.io/molecule-ai/workspace-template-langgraph:latest) returns
|
||||
# 403 Forbidden post-2026-05-06 GitHub org suspension. That is a
|
||||
# template-registry resolution issue (ADR-002 / local-build mode) and
|
||||
# belongs in a separate change that touches workspace-server, not
|
||||
# this workflow file.
|
||||
|
||||
on:
|
||||
push:
|
||||
@ -131,14 +78,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
# Unique per-run container names so concurrent runs on the host-
|
||||
# network act_runner don't collide on name OR port.
|
||||
# `${RUN_ID}-${RUN_ATTEMPT}` stays unique across reruns of the
|
||||
# same run_id. PORT is set later (after docker port lookup) since
|
||||
# we let Docker assign an ephemeral host port.
|
||||
PG_CONTAINER: pg-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
REDIS_CONTAINER: redis-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
DATABASE_URL: postgres://dev:dev@localhost:15432/molecule?sslmode=disable
|
||||
REDIS_URL: redis://localhost:16379
|
||||
PORT: "8080"
|
||||
PG_CONTAINER: molecule-ci-postgres
|
||||
REDIS_CONTAINER: molecule-ci-redis
|
||||
steps:
|
||||
- name: No-op pass (paths filter excluded this commit)
|
||||
if: needs.detect-changes.outputs.api != 'true'
|
||||
@ -153,53 +97,11 @@ jobs:
|
||||
go-version: 'stable'
|
||||
cache: true
|
||||
cache-dependency-path: workspace-server/go.sum
|
||||
- name: Pre-pull alpine + ensure provisioner network (Issue #94 items #2 + #3)
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
# Provisioner uses alpine:latest for ephemeral token-write
|
||||
# containers (workspace-server/internal/handlers/container_files.go).
|
||||
# Pre-pull so the first provision in test_api.sh doesn't race
|
||||
# the daemon's pull cache. Idempotent — `docker pull` is a no-op
|
||||
# when the image is already present.
|
||||
docker pull alpine:latest >/dev/null
|
||||
# Provisioner attaches workspace containers to
|
||||
# molecule-core-net (workspace-server/internal/provisioner/
|
||||
# provisioner.go::DefaultNetwork). The bridge already exists on
|
||||
# the operator host's docker daemon — `network create` is
|
||||
# idempotent via `|| true`.
|
||||
docker network create molecule-core-net >/dev/null 2>&1 || true
|
||||
echo "alpine:latest pre-pulled; molecule-core-net ensured."
|
||||
- name: Start Postgres (docker)
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
# Defensive cleanup — only matches THIS run's container name,
|
||||
# so it cannot kill a sibling run's postgres. (Pre-fix the
|
||||
# name was static and this rm hit other runs' containers.)
|
||||
docker rm -f "$PG_CONTAINER" 2>/dev/null || true
|
||||
# `-p 0:5432` requests an ephemeral host port; we read it back
|
||||
# below and export DATABASE_URL.
|
||||
docker run -d --name "$PG_CONTAINER" \
|
||||
-e POSTGRES_USER=dev -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=molecule \
|
||||
-p 0:5432 postgres:16 >/dev/null
|
||||
# Resolve the host-side port assignment. `docker port` prints
|
||||
# `0.0.0.0:NNNN` (and on host-net runners may also print an
|
||||
# IPv6 line — take the first IPv4 line).
|
||||
PG_PORT=$(docker port "$PG_CONTAINER" 5432/tcp | awk -F: '/^0\.0\.0\.0:/ {print $2; exit}')
|
||||
if [ -z "$PG_PORT" ]; then
|
||||
# Fallback: any first line. Some Docker versions print only
|
||||
# one line.
|
||||
PG_PORT=$(docker port "$PG_CONTAINER" 5432/tcp | head -1 | awk -F: '{print $NF}')
|
||||
fi
|
||||
if [ -z "$PG_PORT" ]; then
|
||||
echo "::error::Could not resolve host port for $PG_CONTAINER"
|
||||
docker port "$PG_CONTAINER" 5432/tcp || true
|
||||
docker logs "$PG_CONTAINER" || true
|
||||
exit 1
|
||||
fi
|
||||
# 127.0.0.1 (NOT localhost) — IPv6 first-resolve flake (#92).
|
||||
echo "PG_PORT=${PG_PORT}" >> "$GITHUB_ENV"
|
||||
echo "DATABASE_URL=postgres://dev:dev@127.0.0.1:${PG_PORT}/molecule?sslmode=disable" >> "$GITHUB_ENV"
|
||||
echo "Postgres host port: ${PG_PORT}"
|
||||
docker run -d --name "$PG_CONTAINER" -e POSTGRES_USER=dev -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=molecule -p 15432:5432 postgres:16
|
||||
for i in $(seq 1 30); do
|
||||
if docker exec "$PG_CONTAINER" pg_isready -U dev >/dev/null 2>&1; then
|
||||
echo "Postgres ready after ${i}s"
|
||||
@ -214,20 +116,7 @@ jobs:
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
docker rm -f "$REDIS_CONTAINER" 2>/dev/null || true
|
||||
docker run -d --name "$REDIS_CONTAINER" -p 0:6379 redis:7 >/dev/null
|
||||
REDIS_PORT=$(docker port "$REDIS_CONTAINER" 6379/tcp | awk -F: '/^0\.0\.0\.0:/ {print $2; exit}')
|
||||
if [ -z "$REDIS_PORT" ]; then
|
||||
REDIS_PORT=$(docker port "$REDIS_CONTAINER" 6379/tcp | head -1 | awk -F: '{print $NF}')
|
||||
fi
|
||||
if [ -z "$REDIS_PORT" ]; then
|
||||
echo "::error::Could not resolve host port for $REDIS_CONTAINER"
|
||||
docker port "$REDIS_CONTAINER" 6379/tcp || true
|
||||
docker logs "$REDIS_CONTAINER" || true
|
||||
exit 1
|
||||
fi
|
||||
echo "REDIS_PORT=${REDIS_PORT}" >> "$GITHUB_ENV"
|
||||
echo "REDIS_URL=redis://127.0.0.1:${REDIS_PORT}" >> "$GITHUB_ENV"
|
||||
echo "Redis host port: ${REDIS_PORT}"
|
||||
docker run -d --name "$REDIS_CONTAINER" -p 16379:6379 redis:7
|
||||
for i in $(seq 1 15); do
|
||||
if docker exec "$REDIS_CONTAINER" redis-cli ping 2>/dev/null | grep -q PONG; then
|
||||
echo "Redis ready after ${i}s"
|
||||
@ -246,15 +135,13 @@ jobs:
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
working-directory: workspace-server
|
||||
run: |
|
||||
# DATABASE_URL + REDIS_URL exported by the start-postgres /
|
||||
# start-redis steps point at this run's per-run host ports.
|
||||
./platform-server > platform.log 2>&1 &
|
||||
echo $! > platform.pid
|
||||
- name: Wait for /health
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
for i in $(seq 1 30); do
|
||||
if curl -sf http://127.0.0.1:8080/health > /dev/null; then
|
||||
if curl -sf http://localhost:8080/health > /dev/null; then
|
||||
echo "Platform up after ${i}s"
|
||||
exit 0
|
||||
fi
|
||||
@ -298,9 +185,6 @@ jobs:
|
||||
kill "$(cat workspace-server/platform.pid)" 2>/dev/null || true
|
||||
fi
|
||||
- name: Stop service containers
|
||||
# always() so containers don't leak when test steps fail. The
|
||||
# cleanup is best-effort: if the container is already gone
|
||||
# (e.g. concurrent rerun race), don't fail the job.
|
||||
if: always() && needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
docker rm -f "$PG_CONTAINER" 2>/dev/null || true
|
||||
|
||||
13
.github/workflows/e2e-staging-canvas.yml
vendored
13
.github/workflows/e2e-staging-canvas.yml
vendored
@ -22,9 +22,9 @@ on:
|
||||
# spending CI cycles. See e2e-api.yml for the rationale on why this
|
||||
# is a single job rather than two-jobs-sharing-name.
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [main, staging]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [main, staging]
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Weekly on Sunday 08:00 UTC — catches Chrome / Playwright / Next.js
|
||||
@ -139,11 +139,7 @@ jobs:
|
||||
|
||||
- name: Upload Playwright report on failure
|
||||
if: failure() && needs.detect-changes.outputs.canvas == 'true'
|
||||
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
|
||||
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
|
||||
# implement (see ci.yml upload step for the canonical error
|
||||
# cite). Drop this pin when Gitea ships the v4 protocol.
|
||||
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: playwright-report-staging
|
||||
path: canvas/playwright-report-staging/
|
||||
@ -151,8 +147,7 @@ jobs:
|
||||
|
||||
- name: Upload screenshots on failure
|
||||
if: failure() && needs.detect-changes.outputs.canvas == 'true'
|
||||
# Pinned to v3 for Gitea act_runner v0.6 compatibility (see above).
|
||||
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: playwright-screenshots
|
||||
path: canvas/test-results/
|
||||
|
||||
4
.github/workflows/e2e-staging-external.yml
vendored
4
.github/workflows/e2e-staging-external.yml
vendored
@ -32,7 +32,7 @@ name: E2E Staging External Runtime
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/workspace.go'
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
@ -44,7 +44,7 @@ on:
|
||||
- 'tests/e2e/test_staging_external_runtime.sh'
|
||||
- '.github/workflows/e2e-staging-external.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/workspace.go'
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
|
||||
13
.github/workflows/e2e-staging-saas.yml
vendored
13
.github/workflows/e2e-staging-saas.yml
vendored
@ -20,12 +20,13 @@ name: E2E Staging SaaS (full lifecycle)
|
||||
# via the same paths watcher that e2e-api.yml uses)
|
||||
|
||||
on:
|
||||
# Trunk-based (Phase 3 of internal#81): main is the only branch.
|
||||
# Previously this fired on staging push too because staging was a
|
||||
# superset of main and ran the gate ahead of auto-promote; with no
|
||||
# staging branch, main is where E2E gates the deploy.
|
||||
# Fire on staging push too — previously this only ran on main, which
|
||||
# meant the most thorough end-to-end test caught regressions AFTER
|
||||
# they shipped to staging (and then to the auto-promote PR). Running
|
||||
# on staging push catches them BEFORE the staging→main promotion
|
||||
# opens, so a green canary into auto-promote is more meaningful.
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
- 'workspace-server/internal/handlers/workspace_provision.go'
|
||||
@ -35,7 +36,7 @@ on:
|
||||
- 'tests/e2e/test_staging_full_saas.sh'
|
||||
- '.github/workflows/e2e-staging-saas.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
- 'workspace-server/internal/handlers/workspace_provision.go'
|
||||
|
||||
139
.github/workflows/handlers-postgres-integration.yml
vendored
139
.github/workflows/handlers-postgres-integration.yml
vendored
@ -14,42 +14,12 @@ name: Handlers Postgres Integration
|
||||
# self-review caught it took 2 minutes to set up and would have caught
|
||||
# the bug at PR-time.
|
||||
#
|
||||
# Why this workflow does NOT use `services: postgres:` (Class B fix)
|
||||
# ------------------------------------------------------------------
|
||||
# Our act_runner config has `container.network: host` (operator host
|
||||
# /opt/molecule/runners/config.yaml), which act_runner applies to BOTH
|
||||
# the job container AND every service container. With host-net, two
|
||||
# concurrent runs of this workflow both try to bind 0.0.0.0:5432 — the
|
||||
# second postgres FATALs with `could not create any TCP/IP sockets:
|
||||
# Address in use`, and Docker auto-removes it (act_runner sets
|
||||
# AutoRemove:true on service containers). By the time the migrations
|
||||
# step runs `psql`, the postgres container is gone, hence
|
||||
# `Connection refused` then `failed to remove container: No such
|
||||
# container` at cleanup time.
|
||||
# This job spins a Postgres service container, applies the migration,
|
||||
# and runs `go test -tags=integration` against a live DB. Required
|
||||
# check on staging branch protection — backend handler PRs cannot
|
||||
# merge without a real-DB regression gate.
|
||||
#
|
||||
# Per-job `container.network` override is silently ignored by
|
||||
# act_runner — `--network and --net in the options will be ignored.`
|
||||
# appears in the runner log. Documented constraint.
|
||||
#
|
||||
# So we sidestep `services:` entirely. The job container still uses
|
||||
# host-net (inherited from runner config; required for cache server
|
||||
# discovery on the bridge IP 172.18.0.17:42631). We launch a sibling
|
||||
# postgres on the existing `molecule-core-net` bridge with a
|
||||
# UNIQUE name per run — `pg-handlers-${RUN_ID}-${RUN_ATTEMPT}` — and
|
||||
# read its bridge IP via `docker inspect`. A host-net job container
|
||||
# can reach a bridge-net container directly via the bridge IP (verified
|
||||
# manually on operator host 2026-05-08).
|
||||
#
|
||||
# Trade-offs vs. the original `services:` shape:
|
||||
# + No host-port collision; N parallel runs share the bridge cleanly
|
||||
# + `if: always()` cleanup runs even on test-step failure
|
||||
# - One more step in the workflow (+~3 lines)
|
||||
# - Requires `molecule-core-net` to exist on the operator host
|
||||
# (it does; declared in docker-compose.yml + docker-compose.infra.yml)
|
||||
#
|
||||
# Class B Hongming-owned CICD red sweep, 2026-05-08.
|
||||
#
|
||||
# Cost: ~30s job (postgres pull from cache + go build + 4 tests).
|
||||
# Cost: ~30s job (postgres pull from GH cache + go build + 4 tests).
|
||||
|
||||
on:
|
||||
push:
|
||||
@ -89,14 +59,20 @@ jobs:
|
||||
name: Handlers Postgres Integration
|
||||
needs: detect-changes
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Unique name per run so concurrent jobs don't collide on the
|
||||
# bridge network. ${RUN_ID}-${RUN_ATTEMPT} is unique even across
|
||||
# workflow_dispatch reruns of the same run_id.
|
||||
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
# Bridge network already exists on the operator host (declared
|
||||
# in docker-compose.yml + docker-compose.infra.yml).
|
||||
PG_NETWORK: molecule-core-net
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
env:
|
||||
POSTGRES_PASSWORD: test
|
||||
POSTGRES_DB: molecule
|
||||
ports:
|
||||
- 5432:5432
|
||||
# GHA spins this with --health-cmd built in for postgres images.
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 5s
|
||||
--health-timeout 5s
|
||||
--health-retries 10
|
||||
defaults:
|
||||
run:
|
||||
working-directory: workspace-server
|
||||
@ -113,57 +89,16 @@ jobs:
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- if: needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Start sibling Postgres on bridge network
|
||||
working-directory: .
|
||||
run: |
|
||||
# Sanity: the bridge network must exist on the operator host.
|
||||
# Hard-fail loud if it doesn't — easier to spot than a silent
|
||||
# auto-create that diverges from the rest of the stack.
|
||||
if ! docker network inspect "${PG_NETWORK}" >/dev/null 2>&1; then
|
||||
echo "::error::Bridge network '${PG_NETWORK}' missing on operator host. Re-run docker-compose.infra.yml or check ops handbook."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If a stale container with the same name exists (rerun on
|
||||
# the same run_id), wipe it first.
|
||||
docker rm -f "${PG_NAME}" >/dev/null 2>&1 || true
|
||||
|
||||
docker run -d \
|
||||
--name "${PG_NAME}" \
|
||||
--network "${PG_NETWORK}" \
|
||||
--health-cmd "pg_isready -U postgres" \
|
||||
--health-interval 5s \
|
||||
--health-timeout 5s \
|
||||
--health-retries 10 \
|
||||
-e POSTGRES_PASSWORD=test \
|
||||
-e POSTGRES_DB=molecule \
|
||||
postgres:15-alpine >/dev/null
|
||||
|
||||
# Read back the bridge IP. Always present immediately after
|
||||
# `docker run -d` for bridge networks.
|
||||
PG_HOST=$(docker inspect "${PG_NAME}" \
|
||||
--format "{{(index .NetworkSettings.Networks \"${PG_NETWORK}\").IPAddress}}")
|
||||
if [ -z "${PG_HOST}" ]; then
|
||||
echo "::error::Could not resolve PG_HOST for ${PG_NAME} on ${PG_NETWORK}"
|
||||
docker logs "${PG_NAME}" || true
|
||||
exit 1
|
||||
fi
|
||||
echo "PG_HOST=${PG_HOST}" >> "$GITHUB_ENV"
|
||||
echo "INTEGRATION_DB_URL=postgres://postgres:test@${PG_HOST}:5432/molecule?sslmode=disable" >> "$GITHUB_ENV"
|
||||
echo "Started ${PG_NAME} at ${PG_HOST}:5432"
|
||||
|
||||
- if: needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Apply migrations to Postgres service
|
||||
env:
|
||||
PGPASSWORD: test
|
||||
run: |
|
||||
# Wait for postgres to actually accept connections. Docker's
|
||||
# health-cmd handles container-side readiness, but the wire
|
||||
# to the bridge IP is best-tested with pg_isready directly.
|
||||
# Wait for postgres to actually accept connections (the
|
||||
# GHA --health-cmd is best-effort but psql can still race).
|
||||
for i in {1..15}; do
|
||||
if pg_isready -h "${PG_HOST}" -p 5432 -U postgres -q; then break; fi
|
||||
echo "waiting for postgres at ${PG_HOST}:5432..."; sleep 2
|
||||
if pg_isready -h localhost -p 5432 -U postgres -q; then break; fi
|
||||
echo "waiting for postgres..."; sleep 2
|
||||
done
|
||||
|
||||
# Apply every .up.sql in lexicographic order with
|
||||
@ -196,7 +131,7 @@ jobs:
|
||||
# not fine once a cross-table atomicity test came in.
|
||||
set +e
|
||||
for migration in $(ls migrations/*.sql 2>/dev/null | grep -v '\.down\.sql$' | sort); do
|
||||
if psql -h "${PG_HOST}" -U postgres -d molecule -v ON_ERROR_STOP=1 \
|
||||
if psql -h localhost -U postgres -d molecule -v ON_ERROR_STOP=1 \
|
||||
-f "$migration" >/dev/null 2>&1; then
|
||||
echo "✓ $(basename "$migration")"
|
||||
else
|
||||
@ -210,7 +145,7 @@ jobs:
|
||||
# fail if any didn't land — that would be a real regression we
|
||||
# want loud.
|
||||
for tbl in delegations workspaces activity_logs pending_uploads; do
|
||||
if ! psql -h "${PG_HOST}" -U postgres -d molecule -tA \
|
||||
if ! psql -h localhost -U postgres -d molecule -tA \
|
||||
-c "SELECT 1 FROM information_schema.tables WHERE table_name = '$tbl'" \
|
||||
| grep -q 1; then
|
||||
echo "::error::$tbl table missing after migration replay — handler integration tests would be meaningless"
|
||||
@ -221,32 +156,16 @@ jobs:
|
||||
|
||||
- if: needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Run integration tests
|
||||
env:
|
||||
INTEGRATION_DB_URL: postgres://postgres:test@localhost:5432/molecule?sslmode=disable
|
||||
run: |
|
||||
# INTEGRATION_DB_URL is exported by the start-postgres step;
|
||||
# points at the per-run bridge IP, not 127.0.0.1, so concurrent
|
||||
# workflow runs don't fight over a host-net 5432 port.
|
||||
go test -tags=integration -timeout 5m -v ./internal/handlers/ -run "^TestIntegration_"
|
||||
|
||||
- if: failure() && needs.detect-changes.outputs.handlers == 'true'
|
||||
- if: needs.detect-changes.outputs.handlers == 'true' && failure()
|
||||
name: Diagnostic dump on failure
|
||||
env:
|
||||
PGPASSWORD: test
|
||||
run: |
|
||||
echo "::group::postgres container status"
|
||||
docker ps -a --filter "name=${PG_NAME}" --format '{{.Status}} {{.Names}}' || true
|
||||
docker logs "${PG_NAME}" 2>&1 | tail -50 || true
|
||||
echo "::endgroup::"
|
||||
echo "::group::delegations table state"
|
||||
psql -h "${PG_HOST}" -U postgres -d molecule -c "SELECT * FROM delegations LIMIT 50;" || true
|
||||
psql -h localhost -U postgres -d molecule -c "SELECT * FROM delegations LIMIT 50;" || true
|
||||
echo "::endgroup::"
|
||||
|
||||
- if: always() && needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Stop sibling Postgres
|
||||
working-directory: .
|
||||
run: |
|
||||
# always() so containers don't leak when migrations or tests
|
||||
# fail. The cleanup is best-effort: if the container is
|
||||
# already gone (e.g. concurrent rerun race), don't fail the job.
|
||||
docker rm -f "${PG_NAME}" >/dev/null 2>&1 || true
|
||||
echo "Cleaned up ${PG_NAME}"
|
||||
|
||||
|
||||
11
.github/workflows/harness-replays.yml
vendored
11
.github/workflows/harness-replays.yml
vendored
@ -119,17 +119,6 @@ jobs:
|
||||
# symptom, different root cause: staging still has the in-image
|
||||
# clone path, hits the auth error directly).
|
||||
#
|
||||
# 2026-05-08 sub-finding (#192): the clone step ALSO fails when
|
||||
# any referenced workspace-template repo is private and the
|
||||
# AUTO_SYNC_TOKEN bearer (devops-engineer persona) lacks read
|
||||
# access. Root cause: 5 of 9 workspace-template repos
|
||||
# (openclaw, codex, crewai, deepagents, gemini-cli) had been
|
||||
# marked private with no team grant. Resolution: flipped them
|
||||
# to public per `feedback_oss_first_repo_visibility_default`
|
||||
# (the OSS surface should be public). Layer-3 (customer-private +
|
||||
# marketplace third-party repos) tracked separately in
|
||||
# internal#102.
|
||||
#
|
||||
# Token shape matches publish-workspace-server-image.yml: AUTO_SYNC_TOKEN
|
||||
# is the devops-engineer persona PAT, NOT the founder PAT (per
|
||||
# `feedback_per_agent_gitea_identity_default`). clone-manifest.sh
|
||||
|
||||
59
.github/workflows/pr-guards.yml
vendored
59
.github/workflows/pr-guards.yml
vendored
@ -1,25 +1,14 @@
|
||||
name: pr-guards
|
||||
|
||||
# PR-time guards. Today the only guard is "disable auto-merge when a
|
||||
# new commit is pushed after auto-merge was enabled" — added 2026-04-27
|
||||
# after PR #2174 auto-merged with only its first commit because the
|
||||
# second commit was pushed after the merge queue had locked the PR's
|
||||
# SHA.
|
||||
# Thin caller that delegates to the molecule-ci reusable guard. Today
|
||||
# the guard is just "disable auto-merge when a new commit is pushed
|
||||
# after auto-merge was enabled" — added 2026-04-27 after PR #2174
|
||||
# auto-merged with only its first commit because the second commit
|
||||
# was pushed after the merge queue had locked the PR's SHA.
|
||||
#
|
||||
# Why this is inlined (not delegated to molecule-ci's reusable
|
||||
# workflow): the reusable workflow uses `gh pr merge --disable-auto`,
|
||||
# which calls GitHub's GraphQL API. Gitea has no GraphQL endpoint and
|
||||
# returns HTTP 405 on /api/graphql, so the job failed on every Gitea
|
||||
# PR push since the 2026-05-06 migration. Gitea also has no `--auto`
|
||||
# merge primitive that this job could be acting on, so the right
|
||||
# behaviour on Gitea is "no-op + green status" — not a 405.
|
||||
#
|
||||
# Inlining (vs. an `if:` on the `uses:` line) keeps the job ALWAYS
|
||||
# running, which matters for branch protection: required-check names
|
||||
# need a job that emits SUCCESS terminal state, not SKIPPED. See
|
||||
# `feedback_branch_protection_check_name_parity` and `feedback_pr_merge_safety_guards`.
|
||||
#
|
||||
# Issue #88 item 1.
|
||||
# When more PR-time guards land in molecule-ci, add them here as
|
||||
# additional jobs that share the same pull_request:synchronize
|
||||
# trigger.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@ -30,34 +19,4 @@ permissions:
|
||||
|
||||
jobs:
|
||||
disable-auto-merge-on-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Detect Gitea Actions. act_runner sets GITEA_ACTIONS=true in the
|
||||
# step env on every job. Belt-and-suspenders: also check the repo
|
||||
# url's host, which is independent of any runner-side env config
|
||||
# (covers a future Gitea host where the env var is forgotten).
|
||||
- name: Detect runner host
|
||||
id: host
|
||||
run: |
|
||||
if [[ "${GITEA_ACTIONS:-}" == "true" ]] || [[ "${{ github.server_url }}" == *moleculesai.app* ]] || [[ "${{ github.event.repository.html_url }}" == *moleculesai.app* ]]; then
|
||||
echo "is_gitea=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Gitea Actions detected — auto-merge gating is not applicable here (Gitea has no --auto merge primitive). Job will no-op."
|
||||
else
|
||||
echo "is_gitea=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Disable auto-merge (GitHub only)
|
||||
if: steps.host.outputs.is_gitea != 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PR: ${{ github.event.pull_request.number }}
|
||||
REPO: ${{ github.repository }}
|
||||
NEW_SHA: ${{ github.sha }}
|
||||
run: |
|
||||
set -eu
|
||||
gh pr merge "$PR" --disable-auto -R "$REPO" || true
|
||||
gh pr comment "$PR" -R "$REPO" --body "🔒 Auto-merge disabled — new commit (\`${NEW_SHA:0:7}\`) pushed after auto-merge was enabled. The merge queue locks SHAs at entry, so subsequent pushes can race. Verify the new commit and re-enable with \`gh pr merge --auto\`."
|
||||
|
||||
- name: Gitea no-op
|
||||
if: steps.host.outputs.is_gitea == 'true'
|
||||
run: echo "Gitea Actions — auto-merge gating not applicable; no-op (job intentionally green so branch protection's required-check name lands SUCCESS)."
|
||||
uses: molecule-ai/molecule-ci/.github/workflows/disable-auto-merge-on-push.yml@main
|
||||
|
||||
177
.github/workflows/publish-runtime.yml
vendored
177
.github/workflows/publish-runtime.yml
vendored
@ -282,33 +282,42 @@ jobs:
|
||||
echo "::error::Refusing to fan out cascade against stale or corrupt PyPI surfaces."
|
||||
exit 1
|
||||
|
||||
- name: Fan out via push to .runtime-version
|
||||
- name: Fan out repository_dispatch
|
||||
env:
|
||||
# Gitea PAT with write:repository scope on the 8 cascade-active
|
||||
# template repos. Used here for `git push` (NOT for an API
|
||||
# dispatch — Gitea 1.22.6 has no repository_dispatch endpoint;
|
||||
# empirically verified across 6 candidate paths in molecule-
|
||||
# core#20 issuecomment-913). The push trips each template's
|
||||
# existing `on: push: branches: [main]` trigger on
|
||||
# publish-image.yml, which then reads the updated
|
||||
# .runtime-version via its resolve-version job.
|
||||
DISPATCH_TOKEN: ${{ secrets.DISPATCH_TOKEN }}
|
||||
# Fine-grained PAT with `actions:write` on the 8 template repos.
|
||||
# GITHUB_TOKEN can't fire dispatches across repos — needs an explicit
|
||||
# token. Stored as a repo secret; rotate per the standard schedule.
|
||||
DISPATCH_TOKEN: ${{ secrets.TEMPLATE_DISPATCH_TOKEN }}
|
||||
# Single source of truth: the publish job's output, which handles
|
||||
# tag/manual-input/auto-bump uniformly. The previous fallback
|
||||
# (`steps.version.outputs.version` from inside the cascade job)
|
||||
# was a dead reference — different job, no shared step scope.
|
||||
RUNTIME_VERSION: ${{ needs.publish.outputs.version }}
|
||||
run: |
|
||||
set +e # don't abort on a single repo failure — collect them all
|
||||
|
||||
# Soft-skip on workflow_dispatch when the token is missing
|
||||
# (operator ad-hoc test); hard-fail on push so unattended
|
||||
# publishes can't silently skip the cascade. Same shape as
|
||||
# the original v1, intentional split per the schedule-vs-
|
||||
# dispatch hardening 2026-04-28.
|
||||
# Schedule-vs-dispatch behaviour split (hardened 2026-04-28
|
||||
# after the sweep-cf-orphans soft-skip incident — same class
|
||||
# of bug):
|
||||
#
|
||||
# The earlier "skipping cascade. templates will pick up the
|
||||
# new version on their own next rebuild" message was wrong —
|
||||
# templates only build on this dispatch trigger; without it
|
||||
# they stay pinned to whatever runtime version they last saw.
|
||||
# A silent skip here means "PyPI is current, templates are
|
||||
# not" and the gap is invisible until someone notices a
|
||||
# template still on the old version weeks later.
|
||||
#
|
||||
# - push → exit 1 (red CI surfaces the gap)
|
||||
# - workflow_dispatch → exit 0 with a warning (operator
|
||||
# ran this ad-hoc; let them rerun
|
||||
# after fixing the secret)
|
||||
if [ -z "$DISPATCH_TOKEN" ]; then
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "::warning::DISPATCH_TOKEN secret not set — skipping cascade."
|
||||
echo "::warning::TEMPLATE_DISPATCH_TOKEN secret not set — skipping cascade."
|
||||
echo "::warning::set it at Settings → Secrets and Variables → Actions, then rerun. Templates will stay on the prior runtime version until either this token is set or each template is rebuilt manually."
|
||||
exit 0
|
||||
fi
|
||||
echo "::error::DISPATCH_TOKEN secret missing — cascade cannot fan out."
|
||||
echo "::error::TEMPLATE_DISPATCH_TOKEN secret missing — cascade cannot fan out."
|
||||
echo "::error::PyPI was published, but the 8 template repos will NOT pick up the new version until this token is restored and a republish dispatches the cascade."
|
||||
echo "::error::set it at Settings → Secrets and Variables → Actions; then re-trigger publish-runtime via workflow_dispatch."
|
||||
exit 1
|
||||
@ -318,119 +327,37 @@ jobs:
|
||||
echo "::error::publish job did not expose a version output — cascade cannot fan out"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# All 9 workspace templates declared in manifest.json. The list
|
||||
# MUST stay aligned with manifest.json's workspace_templates —
|
||||
# cascade-list-drift-gate.yml enforces this in CI per the
|
||||
# codex-stuck-on-stale-runtime invariant from PR #2556.
|
||||
# Long-term goal: derive this list from manifest.json so it
|
||||
# can't drift even on a manifest edit (RFC #388 Phase-1).
|
||||
#
|
||||
# Per-template publish-image.yml presence is checked at
|
||||
# cascade-time below: codex doesn't ship one today, so the
|
||||
# cascade soft-skips it with an informational message rather
|
||||
# than dropping it from this list (which would re-introduce
|
||||
# the drift the gate exists to catch).
|
||||
GITEA_URL="${GITEA_URL:-https://git.moleculesai.app}"
|
||||
# All 9 active workspace template repos. The PR #2536 pruning
|
||||
# ("deprecated, no shipping images") was empirically wrong:
|
||||
# continuous-synth-e2e.yml defaults to langgraph as its primary
|
||||
# canary (line 44), and every excluded template had successful
|
||||
# publish-image runs as of 2026-05-03 — none were dormant.
|
||||
# Symptom of the prune: today's a2a-sdk strict-mode fix
|
||||
# (#2566 / commit e1628c4) cascaded to 4 templates but never
|
||||
# reached langgraph, so the synth-E2E correctly canary'd a fix
|
||||
# that had landed but not deployed. Re-added the 5 templates.
|
||||
# Long-term: derive this list from manifest.json so cascade
|
||||
# scope can't drift from E2E scope — tracked in RFC #388 as a
|
||||
# Phase-1 invariant.
|
||||
TEMPLATES="claude-code hermes openclaw codex langgraph crewai autogen deepagents gemini-cli"
|
||||
FAILED=""
|
||||
SKIPPED=""
|
||||
|
||||
# Configure git identity once. The persona owning DISPATCH_TOKEN
|
||||
# is the same identity that authored this commit on each
|
||||
# template; using a generic "publish-runtime cascade" co-author
|
||||
# trailer in the message keeps the audit trail honest about the
|
||||
# workflow-driven origin.
|
||||
git config --global user.name "publish-runtime cascade"
|
||||
git config --global user.email "publish-runtime@moleculesai.app"
|
||||
|
||||
WORKDIR="$(mktemp -d)"
|
||||
for tpl in $TEMPLATES; do
|
||||
REPO="molecule-ai/molecule-ai-workspace-template-$tpl"
|
||||
CLONE="$WORKDIR/$tpl"
|
||||
|
||||
# Pre-check: skip templates without a publish-image.yml.
|
||||
# The cascade's job is to trip the template's on-push
|
||||
# rebuild — if there's no rebuild workflow, pushing a
|
||||
# .runtime-version commit is just noise on the target
|
||||
# repo. Use the Gitea contents API (no clone required for
|
||||
# the probe). 200 = present; 404 = absent.
|
||||
HTTP=$(curl -sS -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: token $DISPATCH_TOKEN" \
|
||||
"$GITEA_URL/api/v1/repos/$REPO/contents/.github/workflows/publish-image.yml")
|
||||
if [ "$HTTP" = "404" ]; then
|
||||
echo "↷ $tpl has no publish-image.yml — soft-skip (informational; manifest still tracks it)"
|
||||
SKIPPED="$SKIPPED $tpl"
|
||||
continue
|
||||
fi
|
||||
if [ "$HTTP" != "200" ]; then
|
||||
echo "::warning::$tpl publish-image.yml probe returned HTTP $HTTP — proceeding anyway, push will surface the real failure if any"
|
||||
fi
|
||||
|
||||
# Use a per-template attempt loop so a transient race (e.g.
|
||||
# human pushing to the same template at the same instant)
|
||||
# doesn't lose the cascade. Bounded retries (3) — beyond
|
||||
# that we surface the failure and let the operator retry.
|
||||
attempt=0
|
||||
success=false
|
||||
while [ $attempt -lt 3 ]; do
|
||||
attempt=$((attempt + 1))
|
||||
rm -rf "$CLONE"
|
||||
if ! git clone --depth=1 \
|
||||
"https://x-access-token:${DISPATCH_TOKEN}@${GITEA_URL#https://}/$REPO.git" \
|
||||
"$CLONE" >/tmp/clone.log 2>&1; then
|
||||
echo "::warning::clone $tpl attempt $attempt failed: $(tail -n3 /tmp/clone.log)"
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
|
||||
cd "$CLONE"
|
||||
echo "$VERSION" > .runtime-version
|
||||
|
||||
# Idempotency guard: if the file already matches, this
|
||||
# publish is a re-run for a version already cascaded.
|
||||
# Don't push a no-op commit (would spuriously re-trip the
|
||||
# template's on-push and rebuild for nothing).
|
||||
if git diff --quiet -- .runtime-version; then
|
||||
echo "✓ $tpl already at $VERSION — no commit needed (idempotent)"
|
||||
success=true
|
||||
cd - >/dev/null
|
||||
break
|
||||
fi
|
||||
|
||||
git add .runtime-version
|
||||
git commit -m "chore: pin runtime to $VERSION (publish-runtime cascade)" \
|
||||
-m "Co-Authored-By: publish-runtime cascade <publish-runtime@moleculesai.app>" \
|
||||
>/dev/null
|
||||
|
||||
if git push origin HEAD:main >/tmp/push.log 2>&1; then
|
||||
echo "✓ $tpl pushed $VERSION on attempt $attempt"
|
||||
success=true
|
||||
cd - >/dev/null
|
||||
break
|
||||
fi
|
||||
|
||||
# Likely a non-fast-forward — pull-rebase and retry.
|
||||
# Don't force-push: that would silently overwrite a racing
|
||||
# human/cascade commit.
|
||||
echo "::warning::push $tpl attempt $attempt failed, pull-rebasing: $(tail -n3 /tmp/push.log)"
|
||||
git pull --rebase origin main >/tmp/rebase.log 2>&1 || true
|
||||
cd - >/dev/null
|
||||
done
|
||||
|
||||
if [ "$success" != "true" ]; then
|
||||
STATUS=$(curl -sS -o /tmp/dispatch.out -w "%{http_code}" \
|
||||
-X POST "https://api.github.com/repos/$REPO/dispatches" \
|
||||
-H "Authorization: Bearer $DISPATCH_TOKEN" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
-d "{\"event_type\":\"runtime-published\",\"client_payload\":{\"runtime_version\":\"$VERSION\"}}")
|
||||
if [ "$STATUS" = "204" ]; then
|
||||
echo "✓ dispatched $tpl ($VERSION)"
|
||||
else
|
||||
echo "::warning::✗ failed to dispatch $tpl: HTTP $STATUS — $(cat /tmp/dispatch.out)"
|
||||
FAILED="$FAILED $tpl"
|
||||
fi
|
||||
done
|
||||
rm -rf "$WORKDIR"
|
||||
|
||||
if [ -n "$FAILED" ]; then
|
||||
echo "::error::Cascade incomplete after 3 retries each. Failed templates:$FAILED"
|
||||
echo "::error::PyPI publish succeeded; failed templates lag the new version. Re-run this workflow_dispatch with the same version to retry only the laggers (idempotent — already-cascaded templates skip)."
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$SKIPPED" ]; then
|
||||
echo "Cascade complete: pinned $VERSION on cascade-active templates. Soft-skipped (no publish-image.yml):$SKIPPED"
|
||||
else
|
||||
echo "Cascade complete: $VERSION pinned across all manifest workspace_templates."
|
||||
echo "::warning::Cascade incomplete. Failed templates:$FAILED"
|
||||
# Don't fail the whole job — PyPI publish already succeeded;
|
||||
# operators can retry the failed templates manually.
|
||||
fi
|
||||
|
||||
@ -36,7 +36,7 @@ on:
|
||||
workflow_run:
|
||||
workflows: ['publish-workspace-server-image']
|
||||
types: [completed]
|
||||
branches: [main]
|
||||
branches: [staging]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
target_tag:
|
||||
|
||||
105
.github/workflows/retarget-main-to-staging.yml
vendored
Normal file
105
.github/workflows/retarget-main-to-staging.yml
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
name: Retarget main PRs to staging
|
||||
|
||||
# Mechanical enforcement of SHARED_RULES rule 8 ("Staging-first workflow, no
|
||||
# exceptions"). When a bot opens a PR against main, retarget it to staging
|
||||
# automatically and leave an explanatory comment. Human CEO-authored PRs (the
|
||||
# staging→main promotion PR, etc.) are left alone — they're the authorised
|
||||
# exception to the rule.
|
||||
#
|
||||
# Why an Action instead of only a prompt rule: prompt rules depend on every
|
||||
# role's system-prompt.md staying in sync. Today 5 of 8 engineer roles
|
||||
# (core-be, core-fe, app-fe, app-qa, devops-engineer) don't have the
|
||||
# staging-first section — the bot keeps opening PRs to main. An Action
|
||||
# enforces the invariant regardless of prompt drift.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened]
|
||||
branches: [main]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
retarget:
|
||||
name: Retarget to staging
|
||||
runs-on: ubuntu-latest
|
||||
# Only fire for bot-authored PRs. Human CEO PRs (staging→main promotion)
|
||||
# are intentional and pass through.
|
||||
#
|
||||
# Head-ref guard: never retarget a PR whose head IS `staging` — those
|
||||
# are the auto-promote staging→main PRs (opened by molecule-ai[bot]
|
||||
# since #2586 switched to an App token, which now passes the bot
|
||||
# filter below). Retargeting head=staging onto base=staging fails
|
||||
# with HTTP 422 "no new commits between base 'staging' and head
|
||||
# 'staging'", which used to surface as a noisy red workflow run on
|
||||
# every auto-promote (caught 2026-05-03 on PR #2588).
|
||||
if: >-
|
||||
github.event.pull_request.head.ref != 'staging'
|
||||
&& (
|
||||
github.event.pull_request.user.type == 'Bot'
|
||||
|| endsWith(github.event.pull_request.user.login, '[bot]')
|
||||
|| github.event.pull_request.user.login == 'app/molecule-ai'
|
||||
|| github.event.pull_request.user.login == 'molecule-ai[bot]'
|
||||
)
|
||||
steps:
|
||||
- name: Retarget PR base to staging
|
||||
id: retarget
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
# Issue #1884: when the bot opens a PR against main and there's
|
||||
# already another PR on the same head branch targeting staging,
|
||||
# GitHub's PATCH /pulls returns 422 with
|
||||
# "A pull request already exists for base branch 'staging' …".
|
||||
# The retarget can't proceed — but the right response is to
|
||||
# close the now-redundant main-PR, not to fail the workflow
|
||||
# noisily. Detect that specific 422 and close instead.
|
||||
run: |
|
||||
set +e
|
||||
echo "Retargeting PR #${PR_NUMBER} (author: ${PR_AUTHOR}) from main → staging"
|
||||
PATCH_OUTPUT=$(gh api -X PATCH \
|
||||
"repos/${{ github.repository }}/pulls/${PR_NUMBER}" \
|
||||
-f base=staging \
|
||||
--jq '.base.ref' 2>&1)
|
||||
PATCH_EXIT=$?
|
||||
set -e
|
||||
if [ "$PATCH_EXIT" -eq 0 ]; then
|
||||
echo "::notice::Retargeted PR #${PR_NUMBER} → staging"
|
||||
echo "outcome=retargeted" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
# Specifically match the 422 duplicate-base/head error so
|
||||
# any OTHER PATCH failure (auth, deleted PR, etc.) still
|
||||
# surfaces as a real workflow failure.
|
||||
if echo "$PATCH_OUTPUT" | grep -q "pull request already exists for base branch 'staging'"; then
|
||||
echo "::notice::PR #${PR_NUMBER}: duplicate target-staging PR exists on same head — closing this main-PR as redundant."
|
||||
gh pr close "$PR_NUMBER" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--comment "[retarget-bot] Closing — another PR on the same head branch already targets \`staging\`. This PR is redundant. See issue #1884 for the rationale."
|
||||
echo "outcome=closed-as-duplicate" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
echo "::error::Retarget PATCH failed and was NOT a duplicate-base error:"
|
||||
echo "$PATCH_OUTPUT" >&2
|
||||
exit 1
|
||||
|
||||
- name: Post explainer comment
|
||||
if: steps.retarget.outputs.outcome == 'retargeted'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
gh pr comment "$PR_NUMBER" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--body "$(cat <<'BODY'
|
||||
[retarget-bot] This PR was opened against `main` and has been retargeted to `staging` automatically.
|
||||
|
||||
**Why:** per [SHARED_RULES rule 8](https://github.com/molecule-ai/molecule-ai-org-template-molecule-dev/blob/main/SHARED_RULES.md), all feature work targets `staging` first; the CEO promotes `staging → main` separately.
|
||||
|
||||
**What changed:** just the base branch — no code change. CI will re-run against `staging`. If you get merge conflicts, rebase on `staging`.
|
||||
|
||||
**If this PR is the CEO's staging→main promotion:** the Action skipped you (only bot-authored PRs are retargeted). If you see this comment on your CEO PR, that's a bug — please tag @HongmingWang-Rabbit.
|
||||
BODY
|
||||
)"
|
||||
@ -22,7 +22,7 @@ development workflow, conventions, and how to get your changes merged.
|
||||
|
||||
```bash
|
||||
# Clone the repo
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
|
||||
git clone https://github.com/Molecule-AI/molecule-core.git
|
||||
cd molecule-core
|
||||
|
||||
# Install git hooks
|
||||
@ -57,7 +57,7 @@ See `CLAUDE.md` for a full list of environment variables and their purposes.
|
||||
|
||||
This repo is scoped to **code** (canvas, workspace, workspace-server, related
|
||||
infra). Public content (blog posts, marketing copy, OG images, SEO briefs,
|
||||
DevRel demos) lives in [`Molecule-AI/docs`](https://git.moleculesai.app/molecule-ai/docs).
|
||||
DevRel demos) lives in [`Molecule-AI/docs`](https://github.com/Molecule-AI/docs).
|
||||
The `Block forbidden paths` CI gate fails any PR that writes to `marketing/`
|
||||
or other removed paths — open against `Molecule-AI/docs` instead.
|
||||
|
||||
@ -110,7 +110,7 @@ causing a render loop when any node position changed.
|
||||
|
||||
1. **Repo-wide:** "Automatically delete head branches" is on. Once a PR merges, the branch is deleted server-side. Any subsequent `git push` to that branch fails with `remote rejected — no such branch`.
|
||||
|
||||
2. **CI:** the `pr-guards` workflow (calling [molecule-ci `disable-auto-merge-on-push`](https://git.moleculesai.app/molecule-ai/molecule-ci/src/branch/main/.github/workflows/disable-auto-merge-on-push.yml)) fires on every push to an open PR. If auto-merge was already enabled, it's disabled and a comment is posted. You must explicitly re-enable after verifying the new commit.
|
||||
2. **CI:** the `pr-guards` workflow (calling [molecule-ci `disable-auto-merge-on-push`](https://github.com/Molecule-AI/molecule-ci/blob/main/.github/workflows/disable-auto-merge-on-push.yml)) fires on every push to an open PR. If auto-merge was already enabled, it's disabled and a comment is posted. You must explicitly re-enable after verifying the new commit.
|
||||
|
||||
**Workflow rules that follow from the guards:**
|
||||
- Push **all** commits before running `gh pr merge --auto`.
|
||||
@ -180,9 +180,9 @@ and run CI manually.
|
||||
Code in this repo lands in molecule-core. Some related runtime artifacts
|
||||
live in their own repos:
|
||||
|
||||
- [`Molecule-AI/molecule-ai-workspace-runtime`](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-runtime) — Python adapter SDK (`molecule_runtime`) that runs inside containerized Molecule workspaces. Bridges Claude Code SDK / hermes / langgraph / etc. → A2A queue.
|
||||
- [`Molecule-AI/molecule-sdk-python`](https://git.moleculesai.app/molecule-ai/molecule-sdk-python) — `A2AServer` + `RemoteAgentClient` for external agents that register over the public `/registry/register` flow.
|
||||
- [`Molecule-AI/molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel) — Claude Code channel plugin. Bridges A2A traffic into a running Claude Code session via MCP `notifications/claude/channel`. Polling-based (no tunnel required); install with `claude --channels plugin:molecule@Molecule-AI/molecule-mcp-claude-channel`.
|
||||
- [`Molecule-AI/molecule-ai-workspace-runtime`](https://github.com/Molecule-AI/molecule-ai-workspace-runtime) — Python adapter SDK (`molecule_runtime`) that runs inside containerized Molecule workspaces. Bridges Claude Code SDK / hermes / langgraph / etc. → A2A queue.
|
||||
- [`Molecule-AI/molecule-sdk-python`](https://github.com/Molecule-AI/molecule-sdk-python) — `A2AServer` + `RemoteAgentClient` for external agents that register over the public `/registry/register` flow.
|
||||
- [`Molecule-AI/molecule-mcp-claude-channel`](https://github.com/Molecule-AI/molecule-mcp-claude-channel) — Claude Code channel plugin. Bridges A2A traffic into a running Claude Code session via MCP `notifications/claude/channel`. Polling-based (no tunnel required); install with `claude --channels plugin:molecule@Molecule-AI/molecule-mcp-claude-channel`.
|
||||
|
||||
When extending the **A2A surface** in molecule-core (`workspace-server/internal/handlers/a2a_proxy.go` etc.), consider whether the change has a downstream impact on the runtime SDK or the channel plugin — they're versioned independently but share the wire shape.
|
||||
|
||||
|
||||
28
Makefile
28
Makefile
@ -1,28 +0,0 @@
|
||||
# Top-level Makefile — convenience wrappers around docker compose.
|
||||
#
|
||||
# Most molecule-core dev work happens via these shortcuts. CI doesn't
|
||||
# use this Makefile; CI calls docker compose / go test directly so the
|
||||
# Makefile can evolve without breaking the build.
|
||||
|
||||
.PHONY: help dev up down logs build test
|
||||
|
||||
help: ## Show this help.
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## ' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-12s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
dev: ## Start the full stack with air hot-reload for the platform service.
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up
|
||||
|
||||
up: ## Start the full stack in production-shape mode (no air, normal Dockerfile).
|
||||
docker compose up
|
||||
|
||||
down: ## Stop the stack and remove containers (volumes preserved).
|
||||
docker compose down
|
||||
|
||||
logs: ## Tail logs from all services (Ctrl-C to detach).
|
||||
docker compose logs -f
|
||||
|
||||
build: ## Force a fresh build of the platform image (no cache).
|
||||
docker compose build --no-cache platform
|
||||
|
||||
test: ## Run Go unit tests in workspace-server/.
|
||||
cd workspace-server && go test -race ./...
|
||||
77
README.md
77
README.md
@ -1,7 +1,7 @@
|
||||
<div align="center">
|
||||
|
||||
<p>
|
||||
<img src="./docs/assets/branding/molecule-icon.svg" alt="Molecule AI" width="160" />
|
||||
<img src="./docs/assets/branding/molecule-icon.png" alt="Molecule AI Icon Logo" width="160" />
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@ -39,8 +39,8 @@
|
||||
<a href="./docs/agent-runtime/workspace-runtime.md"><strong>Workspace Runtime</strong></a>
|
||||
</p>
|
||||
|
||||
[](https://railway.app/new/template?template=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://render.com/deploy?repo=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://railway.app/new/template?template=https://github.com/Molecule-AI/molecule-monorepo)
|
||||
[](https://render.com/deploy?repo=https://github.com/Molecule-AI/molecule-monorepo)
|
||||
|
||||
</div>
|
||||
|
||||
@ -53,8 +53,8 @@ Molecule AI is the most powerful way to govern an AI agent organization in produ
|
||||
It combines the parts that are usually scattered across demos, internal glue code, and framework-specific tooling into one product:
|
||||
|
||||
- one org-native control plane for teams, roles, hierarchy, and lifecycle
|
||||
- one runtime layer that lets **eight** agent runtimes — LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, **Hermes**, **Gemini CLI**, and OpenClaw — run side by side behind one workspace contract
|
||||
- one memory model that keeps recall, sharing, and skill evolution aligned with organizational boundaries (Memory v2 backed by pgvector for semantic recall)
|
||||
- one runtime layer that lets LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, and OpenClaw run side by side
|
||||
- one memory model that keeps recall, sharing, and skill evolution aligned with organizational boundaries
|
||||
- one operational surface for observing, pausing, restarting, inspecting, and improving live workspaces
|
||||
|
||||
Most teams can build a workflow, a strong single agent, a coding agent, or a custom multi-agent graph.
|
||||
@ -75,7 +75,7 @@ You do not wire collaboration paths by hand. Hierarchy defines the default commu
|
||||
|
||||
### 3. Runtime choice stops being a dead-end decision
|
||||
|
||||
LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, Hermes, Gemini CLI, and OpenClaw can all plug into the same workspace abstraction. Teams can standardize governance without forcing every group onto one runtime.
|
||||
LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, and OpenClaw can all plug into the same workspace abstraction. Teams can standardize governance without forcing every group onto one runtime.
|
||||
|
||||
### 4. Memory is treated like infrastructure
|
||||
|
||||
@ -117,8 +117,6 @@ Molecule AI is not trying to replace the frameworks below. It is the system that
|
||||
| **Claude Code** | Shipping on `main` | Real coding workflows, CLI-native continuity | Secure workspace abstraction, A2A delegation, org boundaries, shared control plane |
|
||||
| **CrewAI** | Shipping on `main` | Role-based crews | Persistent workspace identity, policy consistency, shared canvas and registry |
|
||||
| **AutoGen** | Shipping on `main` | Assistant/tool orchestration | Standardized deployment, hierarchy-aware collaboration, shared ops plane |
|
||||
| **Hermes 4** | Shipping on `main` | Hybrid reasoning, native tools, json_schema (NousResearch/hermes-agent) | Option B upstream hook, A2A bridge to OpenAI-compat API, multi-provider provider derivation |
|
||||
| **Gemini CLI** | Shipping on `main` | Google Gemini CLI continuity | Workspace lifecycle, A2A, hierarchy-aware collaboration, shared ops plane |
|
||||
| **OpenClaw** | Shipping on `main` | CLI-native runtime with its own session model | Workspace lifecycle, templates, activity logs, topology-aware collaboration |
|
||||
| **NemoClaw** | WIP on `feat/nemoclaw-t4-docker` | NVIDIA-oriented runtime path | Planned to join the same abstraction once merged; not yet part of `main` |
|
||||
|
||||
@ -184,10 +182,9 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
|
||||
## What Ships In `main`
|
||||
|
||||
### Canvas (v4)
|
||||
### Canvas
|
||||
|
||||
- Next.js 15 + React Flow + Zustand
|
||||
- **warm-paper theme system** — light / dark / follow-system, SSR cookie + nonce'd boot script + ThemeProvider; terminal + code surfaces stay dark unconditionally
|
||||
- drag-to-nest team building
|
||||
- empty-state deployment + onboarding wizard
|
||||
- template palette
|
||||
@ -196,9 +193,8 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
|
||||
### Platform
|
||||
|
||||
- Go 1.25 / Gin control plane (80+ HTTP endpoints + Gorilla WebSocket fanout)
|
||||
- workspace CRUD and provisioning (pluggable Provisioner — Docker locally, EC2 + SSM in production)
|
||||
- **A2A response path is a typed discriminated union (RFC #2967)** — frozen dataclasses + total parser; 100% unit + adversarial fuzz coverage
|
||||
- Go/Gin control plane
|
||||
- workspace CRUD and provisioning
|
||||
- registry and heartbeats
|
||||
- browser-safe A2A proxy
|
||||
- team expansion/collapse
|
||||
@ -208,10 +204,10 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
|
||||
### Runtime
|
||||
|
||||
- unified `workspace/` image; thin AMI in production (us-east-2)
|
||||
- adapter-driven execution across **8 runtimes** (Claude Code, Hermes, Gemini CLI, LangGraph, DeepAgents, CrewAI, AutoGen, OpenClaw)
|
||||
- unified `workspace/` image
|
||||
- adapter-driven execution
|
||||
- Agent Card registration
|
||||
- awareness-backed memory integration; **Memory v2 backed by pgvector** for semantic recall
|
||||
- awareness-backed memory integration
|
||||
- plugin-mounted shared rules/skills
|
||||
- hot-reloadable local skills
|
||||
- coordinator-only delegation path
|
||||
@ -225,21 +221,6 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
- runtime tiers
|
||||
- direct workspace inspection through terminal and files
|
||||
|
||||
### SaaS (via [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane))
|
||||
|
||||
- multi-tenant on AWS EC2 + Neon (per-tenant Postgres branch) + Cloudflare Tunnels (per-tenant, no public ports)
|
||||
- WorkOS AuthKit + Stripe Checkout + Customer Portal
|
||||
- AWS KMS envelope encryption (DB / Redis connection strings); AWS Secrets Manager for tenant bootstrap
|
||||
- `tenant_resources` audit table + 30-min boot-event-aware reconciler — every CF / AWS lifecycle event recorded, claim vs live state diffed
|
||||
|
||||
### Bring your own Claude Code session (via [`molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel))
|
||||
|
||||
- Claude Code plugin that bridges Molecule A2A traffic into a local Claude Code session via MCP
|
||||
- subscribe to one or more workspaces; peer messages surface as conversation turns; replies route back through Molecule's A2A
|
||||
- no tunnel, no public endpoint — the plugin self-registers each watched workspace as `delivery_mode=poll` and long-polls `/activity?since_id=…`
|
||||
- multi-tenant friendly: one plugin install can watch workspaces across multiple Molecule tenants (`MOLECULE_PLATFORM_URLS` per-workspace)
|
||||
- install via the standard marketplace flow: `/plugin marketplace add Molecule-AI/molecule-mcp-claude-channel` → `/plugin install molecule-channel@molecule-mcp-claude-channel`
|
||||
|
||||
## Built For Teams That Need More Than A Demo
|
||||
|
||||
Molecule AI is especially strong when you need to run:
|
||||
@ -252,30 +233,24 @@ Molecule AI is especially strong when you need to run:
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
Canvas (Next.js 15, warm-paper :3000) <--HTTP / WS--> Platform (Go 1.25 :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Provisioner: Docker (local) / EC2 + SSM (prod)
|
||||
| +--> bundles · templates · secrets · KMS
|
||||
Canvas (Next.js :3000) <--HTTP / WS--> Platform (Go :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Docker provisioner / bundles / templates / secrets
|
||||
|
|
||||
+------------------------- shows ------------------------> workspaces, teams, tasks, traces, events
|
||||
+-------------------- shows --------------------> workspaces, teams, tasks, traces, events
|
||||
|
||||
Workspace Runtime (Python ≥3.11, image with adapters)
|
||||
- 8 adapters: LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / Hermes / Gemini CLI / OpenClaw
|
||||
- Agent Card + A2A server (typed-SSOT response path, RFC #2967)
|
||||
- heartbeat + activity + awareness-backed memory (Memory v2 — pgvector semantic recall)
|
||||
Workspace Runtime (Python image with adapters)
|
||||
- LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / OpenClaw
|
||||
- Agent Card + A2A server
|
||||
- heartbeat + activity + awareness-backed memory
|
||||
- skills + plugins + hot reload
|
||||
|
||||
SaaS Control Plane (molecule-controlplane, private)
|
||||
- per-tenant EC2 + Neon (Postgres branch) + Cloudflare Tunnel
|
||||
- WorkOS · Stripe · KMS · AWS Secrets Manager
|
||||
- tenant_resources audit + 30-min reconciler
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
|
||||
cd molecule-core
|
||||
git clone https://github.com/Molecule-AI/molecule-monorepo.git
|
||||
cd molecule-monorepo
|
||||
|
||||
cp .env.example .env
|
||||
# Defaults boot the stack locally out of the box. See .env.example for
|
||||
@ -284,7 +259,7 @@ cp .env.example .env
|
||||
./infra/scripts/setup.sh
|
||||
# Boots Postgres (:5432), Redis (:6379), Langfuse (:3001),
|
||||
# and Temporal (:7233 gRPC, :8233 UI) on the shared
|
||||
# `molecule-core-net` Docker network. Temporal runs with
|
||||
# `molecule-monorepo-net` Docker network. Temporal runs with
|
||||
# no auth on localhost — dev-only; production must gate it.
|
||||
#
|
||||
# Also populates the template/plugin registry by cloning every repo
|
||||
@ -328,11 +303,7 @@ Then open `http://localhost:3000`:
|
||||
|
||||
## Current Scope
|
||||
|
||||
The current `main` branch ships the core platform, Canvas v4 (warm-paper themed), Memory v2 (pgvector semantic recall), the typed-SSOT A2A response path (RFC #2967), **eight production adapters** (Claude Code, Hermes, Gemini CLI, LangGraph, DeepAgents, CrewAI, AutoGen, OpenClaw), skill lifecycle, and operational surfaces.
|
||||
|
||||
The companion private repo [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) provides the SaaS surface — multi-tenant orchestration on EC2 + Neon + Cloudflare Tunnels, KMS envelope encryption, WorkOS auth, Stripe billing, and a `tenant_resources` audit table with a 30-min reconciler.
|
||||
|
||||
Adjacent runtime work such as **NemoClaw** remains branch-level until merged, and this README keeps that distinction explicit on purpose.
|
||||
The current `main` branch already includes the core platform, canvas, memory model, six production adapters, skill lifecycle, and operational surfaces. Adjacent runtime work such as **NemoClaw** remains branch-level until merged, and this README keeps that distinction explicit on purpose.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
<div align="center">
|
||||
|
||||
<p>
|
||||
<img src="./docs/assets/branding/molecule-icon.svg" alt="Molecule AI" width="160" />
|
||||
<img src="./docs/assets/branding/molecule-icon.png" alt="Molecule AI 图案 Logo" width="160" />
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@ -38,8 +38,8 @@
|
||||
<a href="./docs/agent-runtime/workspace-runtime.md"><strong>Workspace Runtime</strong></a>
|
||||
</p>
|
||||
|
||||
[](https://railway.app/new/template?template=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://render.com/deploy?repo=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://railway.app/new/template?template=https://github.com/Molecule-AI/molecule-core)
|
||||
[](https://render.com/deploy?repo=https://github.com/Molecule-AI/molecule-core)
|
||||
|
||||
</div>
|
||||
|
||||
@ -52,8 +52,8 @@ Molecule AI 是目前最强的 AI Agent 组织治理方案之一,用来把 age
|
||||
它把过去分散在 demo、内部胶水代码和各类 framework 私有工具里的关键能力,收敛成一个产品:
|
||||
|
||||
- 一套组织原生 control plane,管理团队、角色、层级和生命周期
|
||||
- 一套 runtime abstraction,让 **8 个** agent runtime —— LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、**Hermes**、**Gemini CLI**、OpenClaw —— 共用一套 workspace 契约
|
||||
- 一套与组织边界对齐的 memory 模型,把 recall、sharing 和 skill evolution 放进同一体系(Memory v2 由 pgvector 支撑语义召回)
|
||||
- 一套 runtime abstraction,让 LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、OpenClaw 并存运行
|
||||
- 一套与组织边界对齐的 memory 模型,把 recall、sharing 和 skill evolution 放进同一体系
|
||||
- 一套面向线上 workspace 的运维面,统一完成观测、暂停、重启、检查和持续改进
|
||||
|
||||
今天很多团队能做好 workflow、单 agent、coding agent,或者自定义 multi-agent graph 中的一种。
|
||||
@ -74,7 +74,7 @@ Molecule AI 填的就是这个空白。
|
||||
|
||||
### 3. Runtime 选择不再是死路
|
||||
|
||||
LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、Hermes、Gemini CLI、OpenClaw 都可以挂到同一个 workspace abstraction 下。团队可以统一治理方式,而不必统一到底层 runtime。
|
||||
LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、OpenClaw 都可以挂到同一个 workspace abstraction 下。团队可以统一治理方式,而不必统一到底层 runtime。
|
||||
|
||||
### 4. Memory 被当成基础设施来做
|
||||
|
||||
@ -116,8 +116,6 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
| **Claude Code** | `main` 已支持 | 真实编码工作流、CLI-native continuity | 安全 workspace 抽象、A2A delegation、组织边界、共享 control plane |
|
||||
| **CrewAI** | `main` 已支持 | 角色型 crew 模式清晰 | 持久 workspace 身份、统一策略、共享 Canvas 和 registry |
|
||||
| **AutoGen** | `main` 已支持 | assistant/tool orchestration | 统一部署、层级协作、共享运维平面 |
|
||||
| **Hermes 4** | `main` 已支持 | 混合推理、原生工具调用、json_schema 输出(NousResearch/hermes-agent) | Option B 上游 hook、A2A 桥接 OpenAI 兼容 API、多 provider 自动派生 |
|
||||
| **Gemini CLI** | `main` 已支持 | Google Gemini CLI 持续会话 | workspace 生命周期、A2A、层级感知协作、共享运维平面 |
|
||||
| **OpenClaw** | `main` 已支持 | CLI-native runtime,自有 session 模型 | workspace 生命周期、templates、activity logs、拓扑感知协作 |
|
||||
| **NemoClaw** | `feat/nemoclaw-t4-docker` 分支 WIP | NVIDIA 方向 runtime 路线 | 计划并入同一抽象层,但当前还不是 `main` 已合并能力 |
|
||||
|
||||
@ -183,10 +181,9 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
|
||||
## `main` 分支已经具备什么
|
||||
|
||||
### Canvas(v4)
|
||||
### Canvas
|
||||
|
||||
- Next.js 15 + React Flow + Zustand
|
||||
- **warm-paper 主题系统** —— light / dark / 跟随系统;SSR cookie + nonce'd boot 脚本 + ThemeProvider;终端与代码面板始终保持深色
|
||||
- drag-to-nest 团队构建
|
||||
- empty state + onboarding wizard
|
||||
- template palette
|
||||
@ -195,9 +192,8 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
|
||||
### Platform
|
||||
|
||||
- Go 1.25 / Gin control plane(80+ HTTP 端点 + Gorilla WebSocket fanout)
|
||||
- workspace CRUD 和 provisioning(可插拔 Provisioner —— 本地 Docker、生产 EC2 + SSM)
|
||||
- **A2A 响应路径已收敛为类型化的判别联合(RFC #2967)** —— 冻结 dataclass + 全量 parser;100% 单元测试 + 对抗性 fuzz 覆盖
|
||||
- Go/Gin control plane
|
||||
- workspace CRUD 和 provisioning
|
||||
- registry 与 heartbeat
|
||||
- 浏览器安全的 A2A proxy
|
||||
- team expansion/collapse
|
||||
@ -207,10 +203,10 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
|
||||
### Runtime
|
||||
|
||||
- 统一 `workspace/` 镜像;生产环境采用 thin AMI(us-east-2)
|
||||
- adapter 驱动执行,覆盖 **8 个 runtime**(Claude Code、Hermes、Gemini CLI、LangGraph、DeepAgents、CrewAI、AutoGen、OpenClaw)
|
||||
- 统一 `workspace/` 镜像
|
||||
- adapter 驱动执行
|
||||
- Agent Card 注册
|
||||
- awareness-backed memory;**Memory v2 由 pgvector 支撑**语义召回
|
||||
- awareness-backed memory
|
||||
- plugin 挂载共享 rules/skills
|
||||
- 本地 skills 热加载
|
||||
- coordinator-only delegation 路径
|
||||
@ -224,21 +220,6 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
- runtime tiers
|
||||
- 终端与文件层面的 workspace 直接排障
|
||||
|
||||
### SaaS(由 [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) 提供)
|
||||
|
||||
- 多租户运行在 AWS EC2 + Neon(每租户一个 Postgres branch)+ Cloudflare Tunnels(每租户一条隧道,对外不开任何端口)
|
||||
- WorkOS AuthKit + Stripe Checkout + Customer Portal
|
||||
- AWS KMS 信封加密(DB / Redis 连接串);AWS Secrets Manager 负责租户 bootstrap
|
||||
- `tenant_resources` 审计表 + 30 分钟 boot-event-aware reconciler —— 每个 CF / AWS lifecycle 事件都有记录,每 30 分钟比对 claim 与实际状态
|
||||
|
||||
### 在 Claude Code 里直接接入(由 [`molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel) 提供)
|
||||
|
||||
- 把 Molecule A2A 流量桥接到本地 Claude Code 会话的 MCP 插件
|
||||
- 订阅一个或多个 workspace;peer 的消息会以 user-turn 出现,回复会经 Molecule A2A 路由出去
|
||||
- 无需公网隧道、无需公开端点 —— 插件启动时自动把每个 watched workspace 注册成 `delivery_mode=poll`,长轮询 `/activity?since_id=…`
|
||||
- 多租户友好:单次安装即可同时 watch 跨多个 Molecule 租户的 workspace(`MOLECULE_PLATFORM_URLS` 按 workspace 配置)
|
||||
- 通过标准 marketplace 流程安装:`/plugin marketplace add Molecule-AI/molecule-mcp-claude-channel` → `/plugin install molecule-channel@molecule-mcp-claude-channel`
|
||||
|
||||
## 适合什么团队
|
||||
|
||||
Molecule AI 特别适合下面这些场景:
|
||||
@ -251,29 +232,23 @@ Molecule AI 特别适合下面这些场景:
|
||||
## 架构总览
|
||||
|
||||
```text
|
||||
Canvas (Next.js 15, warm-paper :3000) <--HTTP / WS--> Platform (Go 1.25 :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Provisioner: Docker (本地) / EC2 + SSM (生产)
|
||||
| +--> bundles · templates · secrets · KMS
|
||||
Canvas (Next.js :3000) <--HTTP / WS--> Platform (Go :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Docker provisioner / bundles / templates / secrets
|
||||
|
|
||||
+------------------------- 展示 ------------------------> workspaces, teams, tasks, traces, events
|
||||
+-------------------- 展示 --------------------> workspaces, teams, tasks, traces, events
|
||||
|
||||
Workspace Runtime (Python ≥3.11,含 adapter 集合的镜像)
|
||||
- 8 个 adapter: LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / Hermes / Gemini CLI / OpenClaw
|
||||
- Agent Card + A2A server(typed-SSOT 响应路径,RFC #2967)
|
||||
- heartbeat + activity + awareness-backed memory(Memory v2 —— pgvector 语义召回)
|
||||
Workspace Runtime (Python image with adapters)
|
||||
- LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / OpenClaw
|
||||
- Agent Card + A2A server
|
||||
- heartbeat + activity + awareness-backed memory
|
||||
- skills + plugins + hot reload
|
||||
|
||||
SaaS Control Plane (molecule-controlplane,私有)
|
||||
- 每租户 EC2 + Neon (Postgres branch) + Cloudflare Tunnel
|
||||
- WorkOS · Stripe · KMS · AWS Secrets Manager
|
||||
- tenant_resources 审计 + 30 分钟 reconciler
|
||||
```
|
||||
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
|
||||
git clone https://github.com/Molecule-AI/molecule-core.git
|
||||
cd molecule-core
|
||||
|
||||
cp .env.example .env
|
||||
@ -283,7 +258,7 @@ cp .env.example .env
|
||||
./infra/scripts/setup.sh
|
||||
# 启动 Postgres (:5432)、Redis (:6379)、Langfuse (:3001)
|
||||
# 以及 Temporal (:7233 gRPC, :8233 UI),全部挂在共享的
|
||||
# `molecule-core-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# `molecule-monorepo-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# 仅用于本地开发;生产环境必须加 mTLS / API Key。
|
||||
#
|
||||
# 同时会根据 manifest.json 拉取所有模板/插件仓库到
|
||||
@ -321,11 +296,7 @@ npm run dev
|
||||
|
||||
## 当前范围说明
|
||||
|
||||
当前 `main` 已经包含核心平台、Canvas v4(warm-paper 主题)、Memory v2(pgvector 语义召回)、typed-SSOT A2A 响应路径(RFC #2967)、**8 个正式 adapter**(Claude Code、Hermes、Gemini CLI、LangGraph、DeepAgents、CrewAI、AutoGen、OpenClaw)、skill lifecycle,以及主要运维面。
|
||||
|
||||
配套的私有仓库 [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) 提供 SaaS 层 —— 多租户编排(EC2 + Neon + Cloudflare Tunnels)、KMS 信封加密、WorkOS 鉴权、Stripe 计费,以及 `tenant_resources` 审计表加 30 分钟 reconciler。
|
||||
|
||||
像 **NemoClaw** 这样的相邻 runtime 路线仍然属于分支级工作,只有合并后才会进入正式支持列表,这里会明确区分。
|
||||
当前 `main` 已经包含核心平台、Canvas、memory model、6 个正式 adapter、skill lifecycle 和主要运维面。像 **NemoClaw** 这样的相邻 runtime 路线仍然属于分支级工作,只有合并后才会进入正式支持列表,这里会明确区分。
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@ -1,10 +0,0 @@
|
||||
# Excluded from `docker build` context. Without this, the COPY . . step in
|
||||
# canvas/Dockerfile clobbers the freshly-installed node_modules with the
|
||||
# host's (potentially broken / wrong-arch) copy — the @tailwindcss/oxide
|
||||
# native binary disagreed and broke `next build`.
|
||||
node_modules
|
||||
.next
|
||||
.git
|
||||
*.log
|
||||
.env*
|
||||
!.env.example
|
||||
@ -1,11 +1,7 @@
|
||||
FROM node:22-alpine AS builder
|
||||
WORKDIR /app
|
||||
COPY package.json package-lock.json* ./
|
||||
# `npm ci` (not `install`) for lockfile-exact reproducibility.
|
||||
# `--include=optional` ensures the platform-specific @tailwindcss/oxide
|
||||
# native binary lands — without it, postcss fails with "Cannot read
|
||||
# properties of undefined (reading 'All')" at build time.
|
||||
RUN npm ci --include=optional
|
||||
RUN npm install
|
||||
COPY . .
|
||||
ARG NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080
|
||||
ARG NEXT_PUBLIC_WS_URL=ws://localhost:8080/ws
|
||||
|
||||
@ -17,24 +17,6 @@ import { dirname, join } from "node:path";
|
||||
// update one heuristic. Production is unaffected: `output: "standalone"`
|
||||
// bakes resolved env into the build, and the marker file isn't shipped.
|
||||
loadMonorepoEnv();
|
||||
// Boot-time matched-pair guard for ADMIN_TOKEN / NEXT_PUBLIC_ADMIN_TOKEN.
|
||||
// When ADMIN_TOKEN is set on the workspace-server (server-side bearer
|
||||
// gate, wsauth_middleware.go ~L245), the canvas MUST send the matching
|
||||
// NEXT_PUBLIC_ADMIN_TOKEN as `Authorization: Bearer ...` on every API
|
||||
// call. If only one is set, every workspace API call 401s silently —
|
||||
// the canvas hydrates with empty data and the user sees a broken page
|
||||
// with no console hint about the auth-config mismatch.
|
||||
//
|
||||
// Pre-fix the matched-pair contract was descriptive only (a comment in
|
||||
// .env): future devs/agents could re-misconfigure with one of the two
|
||||
// unset and silently 401. Closes the post-PR-#174 self-review gap.
|
||||
//
|
||||
// Warn-only (not exit) — production canvas Docker images bake these
|
||||
// vars into the build at image-build time, and a missed pair there
|
||||
// would still emit the warning at runtime via the standalone server's
|
||||
// startup. Killing the process on misconfiguration would turn a
|
||||
// recoverable auth issue into a hard crashloop.
|
||||
checkAdminTokenPair();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
output: "standalone",
|
||||
@ -75,43 +57,6 @@ function loadMonorepoEnv() {
|
||||
);
|
||||
}
|
||||
|
||||
// Boot-time matched-pair guard. Runs after .env has been loaded so the
|
||||
// check sees the post-load state. The two env vars must be set or
|
||||
// unset together; one-without-the-other is the silent-401 footgun.
|
||||
//
|
||||
// Treats empty string ("") as unset. An explicitly-empty `KEY=` in
|
||||
// .env counts as set-to-empty in `process.env`, but for auth purposes
|
||||
// an empty bearer token is equivalent to no token — so both
|
||||
// `ADMIN_TOKEN=` and an unset ADMIN_TOKEN are equivalent relative to
|
||||
// the matched-pair invariant.
|
||||
//
|
||||
// Returns void; side effect is the console.error warning. Kept as a
|
||||
// separate function (exported) so a future test can reset env, call
|
||||
// this, and assert on captured stderr.
|
||||
export function checkAdminTokenPair(): void {
|
||||
const serverSet = !!process.env.ADMIN_TOKEN;
|
||||
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (serverSet === clientSet) return;
|
||||
// Distinct messages so the operator can tell which half is missing
|
||||
// — the fix is symmetric (set the other one) but the diagnostic
|
||||
// mentions which side is currently set so they don't have to grep.
|
||||
if (serverSet && !clientSet) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
} else {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function findMonorepoRoot(start: string): string | null {
|
||||
let dir = start;
|
||||
for (let i = 0; i < 6; i++) {
|
||||
|
||||
@ -41,7 +41,7 @@ export default function PricingPage() {
|
||||
<p className="mt-2 text-ink-mid">
|
||||
We publish the{" "}
|
||||
<a
|
||||
href="https://git.moleculesai.app/molecule-ai/molecule-monorepo"
|
||||
href="https://github.com/Molecule-AI/molecule-monorepo"
|
||||
className="text-accent underline hover:text-accent"
|
||||
>
|
||||
full source on GitHub
|
||||
|
||||
@ -13,6 +13,7 @@ import { AttachmentPreview } from "./chat/AttachmentPreview";
|
||||
import { extractFilesFromTask } from "./chat/message-parser";
|
||||
import { AgentCommsPanel } from "./chat/AgentCommsPanel";
|
||||
import { appendActivityLine } from "./chat/activityLog";
|
||||
import { activityRowToMessages, type ActivityRowForHydration } from "./chat/historyHydration";
|
||||
import { runtimeDisplayName } from "@/lib/runtime-names";
|
||||
import { ConfirmDialog } from "@/components/ConfirmDialog";
|
||||
|
||||
@ -49,12 +50,38 @@ interface A2AResponse {
|
||||
};
|
||||
}
|
||||
|
||||
// Internal-self-message filtering moved server-side in RFC #2945
|
||||
// PR-C/D — the platform's /chat-history endpoint applies the
|
||||
// IsInternalSelfMessage predicate before returning rows, so the
|
||||
// client no longer needs the local backstop on the history path.
|
||||
// The proper fix is still X-Workspace-ID header (source_id=workspace_id);
|
||||
// the platform-side prefix filter handles the residual cases.
|
||||
/** Detect activity-log rows that the workspace's own runtime fired
|
||||
* against itself but were misclassified as canvas-source. The proper
|
||||
* fix is the X-Workspace-ID header from `self_source_headers()` in
|
||||
* workspace/platform_auth.py, which makes the platform record
|
||||
* source_id = workspace_id. But three failure modes still leak a
|
||||
* self-message into "My Chat":
|
||||
*
|
||||
* 1. Historical rows already in the DB with source_id=NULL.
|
||||
* 2. Workspace containers running pre-fix heartbeat.py / main.py
|
||||
* (the fix only takes effect after an image rebuild + redeploy).
|
||||
* 3. Future internal triggers added without the helper.
|
||||
*
|
||||
* This client-side filter recognises the heartbeat trigger by its
|
||||
* exact prefix — the heartbeat assembles
|
||||
*
|
||||
* "Delegation results are ready. Review them and take appropriate
|
||||
* action:\n" + summary_lines + report_instruction
|
||||
*
|
||||
* in workspace/heartbeat.py. The prefix is template-fixed so a
|
||||
* string match is reliable. If the heartbeat copy ever changes,
|
||||
* update this constant in the same commit.
|
||||
*
|
||||
* This is a backstop, not the primary defence — the X-Workspace-ID
|
||||
* header is. Filtering content is fragile to copy edits, so keep
|
||||
* the list narrow. */
|
||||
const INTERNAL_SELF_MESSAGE_PREFIXES = [
|
||||
"Delegation results are ready. Review them and take appropriate action",
|
||||
];
|
||||
|
||||
function isInternalSelfMessage(text: string): boolean {
|
||||
return INTERNAL_SELF_MESSAGE_PREFIXES.some((p) => text.startsWith(p));
|
||||
}
|
||||
|
||||
// extractReplyText pulls the agent's text reply out of an A2A response.
|
||||
// Concatenates ALL text parts (joined with "\n") rather than returning
|
||||
@ -107,19 +134,8 @@ const INITIAL_HISTORY_LIMIT = 10;
|
||||
const OLDER_HISTORY_BATCH = 20;
|
||||
|
||||
/**
|
||||
* Load chat history from the platform's typed /chat-history endpoint.
|
||||
*
|
||||
* Server-side rendering of activity_logs rows into ChatMessage shape
|
||||
* lives in workspace-server/internal/messagestore/postgres_store.go
|
||||
* (RFC #2945 PR-C/D). The server already applies the canvas-source
|
||||
* filter, the internal-self-message predicate, the role decision
|
||||
* (status=error vs agent-error prefix → system), and the v0/v1
|
||||
* file-shape extraction. Canvas just renders what it receives.
|
||||
*
|
||||
* Wire shape (mirrors ChatMessage exactly, no per-row mapping needed):
|
||||
*
|
||||
* GET /workspaces/:id/chat-history?limit=N&before_ts=T
|
||||
* 200 → {"messages": ChatMessage[], "reached_end": boolean}
|
||||
* Load chat history from the activity_logs database via the platform API.
|
||||
* Uses source=canvas to only get user-initiated messages (not agent-to-agent).
|
||||
*
|
||||
* Pagination:
|
||||
* - Pass `limit` to bound the page size (newest-first from server).
|
||||
@ -127,10 +143,10 @@ const OLDER_HISTORY_BATCH = 20;
|
||||
* timestamp. Combined with limit, this yields the next-older page
|
||||
* when scrolling backward through history.
|
||||
*
|
||||
* `reachedEnd` is propagated from the server. The server computes it
|
||||
* by comparing rowCount vs limit so a partial last page is correctly
|
||||
* detected even when the row→bubble fan-out is non-1:1 (each row
|
||||
* produces 1-2 bubbles).
|
||||
* `reachedEnd` is true when the server returned fewer rows than asked
|
||||
* for — caller uses this to disable further older-batch fetches.
|
||||
* (Counts row-level returns, not chat-bubble count: each row may
|
||||
* produce 1-2 bubbles.)
|
||||
*/
|
||||
async function loadMessagesFromDB(
|
||||
workspaceId: string,
|
||||
@ -138,23 +154,25 @@ async function loadMessagesFromDB(
|
||||
beforeTs?: string,
|
||||
): Promise<{ messages: ChatMessage[]; error: string | null; reachedEnd: boolean }> {
|
||||
try {
|
||||
const params = new URLSearchParams({ limit: String(limit) });
|
||||
const params = new URLSearchParams({
|
||||
type: "a2a_receive",
|
||||
source: "canvas",
|
||||
limit: String(limit),
|
||||
});
|
||||
if (beforeTs) params.set("before_ts", beforeTs);
|
||||
const resp = await api.get<{ messages: ChatMessage[]; reached_end: boolean }>(
|
||||
`/workspaces/${workspaceId}/chat-history?${params.toString()}`,
|
||||
const activities = await api.get<ActivityRowForHydration[]>(
|
||||
`/workspaces/${workspaceId}/activity?${params.toString()}`,
|
||||
);
|
||||
|
||||
// Server emits oldest-first within the page (RFC #2945 PR-C-2
|
||||
// post-fix: server reverses row-aware before returning so the
|
||||
// wire is display-ready). Canvas appends/prepends without
|
||||
// reordering — this avoids the pair-flip bug a naive flat
|
||||
// reverse causes when each row produces a (user, agent) pair
|
||||
// with the same timestamp.
|
||||
return {
|
||||
messages: resp.messages ?? [],
|
||||
error: null,
|
||||
reachedEnd: resp.reached_end,
|
||||
};
|
||||
const messages: ChatMessage[] = [];
|
||||
// Activities are newest-first, reverse for chronological order.
|
||||
// Per-row mapping lives in chat/historyHydration.ts so it can be
|
||||
// unit-tested without spinning up the full ChatTab component
|
||||
// (regression cover for the timestamp-collapse bug).
|
||||
for (const a of [...activities].reverse()) {
|
||||
messages.push(...activityRowToMessages(a, isInternalSelfMessage));
|
||||
}
|
||||
return { messages, error: null, reachedEnd: activities.length < limit };
|
||||
} catch (err) {
|
||||
return {
|
||||
messages: [],
|
||||
|
||||
@ -21,39 +21,20 @@ interface Props {
|
||||
// --- Agent Card Section ---
|
||||
|
||||
function AgentCardSection({ workspaceId }: { workspaceId: string }) {
|
||||
// Initial card value comes from the canvas store — node.data.agentCard
|
||||
// is hydrated by the platform stream when the workspace appears in the
|
||||
// graph, so reading it here avoids a duplicate `GET /workspaces/${id}`
|
||||
// (the parent ConfigTab.loadConfig already fetches workspace metadata,
|
||||
// and refetching here adds a serialised RTT to the panel-open path —
|
||||
// contributed to the ~20s detail-panel load reported in core#11).
|
||||
// Local state still tracks the edited/saved value so the editor flow
|
||||
// is unchanged.
|
||||
const storeCard = useCanvasStore((s) => {
|
||||
// Defensive against test mocks that omit `nodes` (some test files
|
||||
// stub the store with a minimal shape). In production `nodes` is
|
||||
// always an array — empty or not — so the optional chaining only
|
||||
// matters for the test path.
|
||||
const node = s.nodes?.find?.((n) => n.id === workspaceId);
|
||||
return (node?.data.agentCard as
|
||||
| Record<string, unknown>
|
||||
| null
|
||||
| undefined) ?? null;
|
||||
});
|
||||
const [card, setCard] = useState<Record<string, unknown> | null>(storeCard);
|
||||
const [card, setCard] = useState<Record<string, unknown> | null>(null);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [editing, setEditing] = useState(false);
|
||||
const [draft, setDraft] = useState("");
|
||||
const [saving, setSaving] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [success, setSuccess] = useState(false);
|
||||
|
||||
// If the store updates while this section is mounted (another tab
|
||||
// pushed an update via the platform event stream), reflect that —
|
||||
// unless the user is mid-edit, in which case we don't clobber their
|
||||
// unsaved draft.
|
||||
useEffect(() => {
|
||||
if (!editing) setCard(storeCard);
|
||||
}, [storeCard, editing]);
|
||||
api.get<Record<string, unknown>>(`/workspaces/${workspaceId}`)
|
||||
.then((ws) => setCard((ws.agent_card as Record<string, unknown>) || null))
|
||||
.catch(() => {})
|
||||
.finally(() => setLoading(false));
|
||||
}, [workspaceId]);
|
||||
|
||||
const handleSave = async () => {
|
||||
setError(null);
|
||||
@ -72,7 +53,9 @@ function AgentCardSection({ workspaceId }: { workspaceId: string }) {
|
||||
|
||||
return (
|
||||
<Section title="Agent Card" defaultOpen={false}>
|
||||
{editing ? (
|
||||
{loading ? (
|
||||
<div className="text-[10px] text-ink-soft">Loading...</div>
|
||||
) : editing ? (
|
||||
<div className="space-y-2">
|
||||
<textarea
|
||||
aria-label="Agent card JSON editor"
|
||||
@ -238,51 +221,47 @@ export function ConfigTab({ workspaceId }: Props) {
|
||||
setLoading(true);
|
||||
setError(null);
|
||||
|
||||
// Load workspace metadata (runtime + model + provider) in parallel.
|
||||
// These are independent GETs against three workspace-server endpoints
|
||||
// and used to be awaited serially — for SaaS workspaces each call
|
||||
// round-trips through an EIC SSH tunnel, so the previous serial
|
||||
// pattern stacked 3-5s of tunnel-setup latency per call (core#11).
|
||||
// Promise.all overlaps them; the per-call cost stays the same but
|
||||
// wall time drops to max() instead of sum().
|
||||
//
|
||||
// Each leg has its own .catch handler that yields a sentinel value,
|
||||
// matching the previous semantics:
|
||||
// - /workspaces/${id}: required source-of-truth for runtime+tier;
|
||||
// fall back to YAML if the GET fails (rare, network-class only).
|
||||
// - /workspaces/${id}/model: non-fatal; empty model lets the form
|
||||
// fall through to YAML runtime_config.model.
|
||||
// - /workspaces/${id}/provider: non-fatal; old workspace-servers
|
||||
// return 404, in which case provider="" and Save skips the PUT.
|
||||
//
|
||||
// See GH #1894 for the workspace-row-as-source-of-truth rationale
|
||||
// that motivated splitting from a single config.yaml read.
|
||||
const [wsRes, modelRes, providerRes] = await Promise.all([
|
||||
api.get<{ runtime?: string; tier?: number }>(`/workspaces/${workspaceId}`)
|
||||
.catch(() => ({} as { runtime?: string; tier?: number })),
|
||||
api.get<{ model?: string }>(`/workspaces/${workspaceId}/model`)
|
||||
.catch(() => ({} as { model?: string })),
|
||||
api.get<{ provider?: string }>(`/workspaces/${workspaceId}/provider`)
|
||||
.catch(() => null),
|
||||
]);
|
||||
const wsMetadataRuntime = (wsRes.runtime || "").trim();
|
||||
const wsMetadataModel = (modelRes.model || "").trim();
|
||||
const wsMetadataTier: number | null =
|
||||
typeof wsRes.tier === "number" ? wsRes.tier : null;
|
||||
if (providerRes !== null) {
|
||||
const loadedProvider = (providerRes.provider || "").trim();
|
||||
setProvider(loadedProvider);
|
||||
setOriginalProvider(loadedProvider);
|
||||
} else {
|
||||
setProvider("");
|
||||
setOriginalProvider("");
|
||||
}
|
||||
// ALWAYS load workspace metadata first (runtime + model). These are the
|
||||
// source of truth regardless of whether the runtime uses our config.yaml
|
||||
// template. Without this the form falls back to empty/default values on
|
||||
// a hermes workspace (which doesn't use our template), creating the
|
||||
// appearance that the saved runtime is unset — and worse, clicking Save
|
||||
// would silently flip `runtime` from `hermes` back to the dropdown
|
||||
// default `LangGraph`. See GH #1894.
|
||||
let wsMetadataRuntime = "";
|
||||
let wsMetadataModel = "";
|
||||
let wsMetadataTier: number | null = null;
|
||||
try {
|
||||
const ws = await api.get<{ runtime?: string; tier?: number }>(`/workspaces/${workspaceId}`);
|
||||
wsMetadataRuntime = (ws.runtime || "").trim();
|
||||
if (typeof ws.tier === "number") wsMetadataTier = ws.tier;
|
||||
} catch { /* fall back to config.yaml */ }
|
||||
try {
|
||||
const m = await api.get<{ model?: string }>(`/workspaces/${workspaceId}/model`);
|
||||
wsMetadataModel = (m.model || "").trim();
|
||||
} catch { /* non-fatal */ }
|
||||
// originalModel is set further down once the YAML has been parsed —
|
||||
// we want it to reflect what the form ACTUALLY rendered, which may
|
||||
// be the YAML's runtime_config.model fallback when MODEL_PROVIDER
|
||||
// is empty. Setting it here from wsMetadataModel alone would be
|
||||
// wrong for hermes/pre-#240 workspaces.
|
||||
|
||||
// Load explicit provider override (Option B PR-5). Endpoint returns
|
||||
// {provider: "", source: "default"} when no override is set, so the
|
||||
// empty string is the legitimate "auto-derive" signal — don't treat
|
||||
// it as a load error. Non-fatal: an older workspace-server that
|
||||
// predates PR-2 returns 404 here; the form falls back to "" and
|
||||
// Save just won't PUT the provider field.
|
||||
try {
|
||||
const p = await api.get<{ provider?: string }>(`/workspaces/${workspaceId}/provider`);
|
||||
const loadedProvider = (p.provider || "").trim();
|
||||
setProvider(loadedProvider);
|
||||
setOriginalProvider(loadedProvider);
|
||||
} catch {
|
||||
setProvider("");
|
||||
setOriginalProvider("");
|
||||
}
|
||||
|
||||
// Skip the config.yaml fetch entirely for runtimes that manage
|
||||
// their own config (external, hermes, etc.) — they don't have a
|
||||
// platform-side template, so the GET would 404. The catch block
|
||||
|
||||
@ -1,11 +1,13 @@
|
||||
// @vitest-environment jsdom
|
||||
//
|
||||
// Pins the lazy-loading chat-history pagination.
|
||||
// Pins the lazy-loading chat-history pagination added 2026-05-05.
|
||||
//
|
||||
// PR-C-2 (RFC #2945): canvas was migrated from /activity?type=a2a_receive
|
||||
// to /chat-history. Server now returns typed ChatMessage[] in
|
||||
// display-ready oldest-first order. These tests guard the canvas-side
|
||||
// pagination invariants against the new endpoint surface.
|
||||
// Pre-fix: ChatTab fetched the newest 50 messages on every mount and
|
||||
// scrolled to bottom, paying full DOM cost up-front even when the user
|
||||
// only wanted to read the last few bubbles. Post-fix: initial load is
|
||||
// bounded to 10 newest, and an IntersectionObserver on a top sentinel
|
||||
// triggers loadOlder() (batch of 20 with `before_ts` cursor) when the
|
||||
// user scrolls up.
|
||||
//
|
||||
// Pinned branches:
|
||||
// 1. Initial fetch carries `limit=10` and NO before_ts (newest-first
|
||||
@ -18,10 +20,11 @@
|
||||
// asserting the rendered bubble count matches the full page).
|
||||
// 4. The retry button after a failed initial load uses the same
|
||||
// INITIAL_HISTORY_LIMIT (10), not the legacy 50.
|
||||
// 5. before_ts cursor is the OLDEST timestamp from the current page,
|
||||
// passed verbatim to walk backward.
|
||||
// 6. Inflight guard rejects duplicate IO triggers while a loadOlder
|
||||
// fetch is in flight.
|
||||
//
|
||||
// IntersectionObserver / scroll-anchor restoration is exercised by the
|
||||
// E2E synth-canary suite — pinning it in jsdom would require mocking
|
||||
// the observer and faking layout, which is brittler than trusting a
|
||||
// live-DOM canary against the staging tenant.
|
||||
|
||||
import { describe, it, expect, vi, afterEach, beforeEach } from "vitest";
|
||||
import { render, screen, cleanup, waitFor, fireEvent } from "@testing-library/react";
|
||||
@ -30,31 +33,24 @@ import React from "react";
|
||||
afterEach(cleanup);
|
||||
|
||||
// Both ChatTab sub-panels (MyChat + AgentComms) mount simultaneously so
|
||||
// keyboard tab order and aria-controls land on a real DOM. MyChat's
|
||||
// loadMessagesFromDB hits /chat-history; AgentComms's polling hits a
|
||||
// different URL. Route the mock by URL so each gets a sensible default
|
||||
// and only MyChat's calls land in the assertion array.
|
||||
const myChatHistoryCalls: string[] = [];
|
||||
let myChatNextResponse:
|
||||
| { ok: true; messages: unknown[]; reachedEnd?: boolean }
|
||||
| { ok: false; err: Error } = { ok: true, messages: [] };
|
||||
|
||||
// keyboard tab order and aria-controls land on a real DOM. Both fire
|
||||
// /activity GETs on mount: MyChat's hits `type=a2a_receive&source=canvas`,
|
||||
// AgentComms's hits a different filter. Route the mock by URL so each
|
||||
// gets a sensible default and only MyChat's call is what the assertions
|
||||
// scrutinise.
|
||||
const myChatActivityCalls: string[] = [];
|
||||
let myChatNextResponse: { ok: true; rows: unknown[] } | { ok: false; err: Error } = {
|
||||
ok: true,
|
||||
rows: [],
|
||||
};
|
||||
const apiGet = vi.fn((path: string): Promise<unknown> => {
|
||||
if (path.includes("/chat-history")) {
|
||||
myChatHistoryCalls.push(path);
|
||||
if (myChatNextResponse.ok) {
|
||||
const reached_end =
|
||||
myChatNextResponse.reachedEnd !== undefined
|
||||
? myChatNextResponse.reachedEnd
|
||||
: myChatNextResponse.messages.length < 10;
|
||||
return Promise.resolve({
|
||||
messages: myChatNextResponse.messages,
|
||||
reached_end,
|
||||
});
|
||||
}
|
||||
if (path.includes("type=a2a_receive") && path.includes("source=canvas")) {
|
||||
myChatActivityCalls.push(path);
|
||||
if (myChatNextResponse.ok) return Promise.resolve(myChatNextResponse.rows);
|
||||
return Promise.reject(myChatNextResponse.err);
|
||||
}
|
||||
// AgentComms / heartbeat / anything else — empty array safe default.
|
||||
// AgentComms / heartbeat / anything else — empty array is a safe
|
||||
// default that won't blow up the corresponding component's .then().
|
||||
return Promise.resolve([]);
|
||||
});
|
||||
const apiPost = vi.fn();
|
||||
@ -88,8 +84,8 @@ const ioInstances: IOInstance[] = [];
|
||||
beforeEach(() => {
|
||||
apiGet.mockClear();
|
||||
apiPost.mockReset();
|
||||
myChatHistoryCalls.length = 0;
|
||||
myChatNextResponse = { ok: true, messages: [] };
|
||||
myChatActivityCalls.length = 0;
|
||||
myChatNextResponse = { ok: true, rows: [] };
|
||||
ioInstances.length = 0;
|
||||
class FakeIO {
|
||||
private inst: IOInstance;
|
||||
@ -105,12 +101,20 @@ beforeEach(() => {
|
||||
this.inst.disconnected = true;
|
||||
}
|
||||
}
|
||||
// Install on every reachable global — different bundlers / module
|
||||
// graphs can resolve `IntersectionObserver` via `window`, `globalThis`,
|
||||
// or the bare global. Without all three, jsdom's own (pre-existing)
|
||||
// stub silently wins and ioInstances stays empty.
|
||||
(window as unknown as { IntersectionObserver: unknown }).IntersectionObserver = FakeIO;
|
||||
(globalThis as unknown as { IntersectionObserver: unknown }).IntersectionObserver = FakeIO;
|
||||
// jsdom doesn't implement scrollIntoView; ChatTab calls it after every
|
||||
// messages update.
|
||||
Element.prototype.scrollIntoView = vi.fn();
|
||||
});
|
||||
|
||||
function triggerIntersection(instanceIdx = -1) {
|
||||
// -1 → the latest observer (the live one). Tests targeting an old
|
||||
// (disconnected) instance pass a positive index.
|
||||
const inst = ioInstances.at(instanceIdx);
|
||||
if (!inst) throw new Error(`no IO instance at ${instanceIdx}`);
|
||||
inst.callback(
|
||||
@ -121,30 +125,25 @@ function triggerIntersection(instanceIdx = -1) {
|
||||
|
||||
import { ChatTab } from "../ChatTab";
|
||||
|
||||
// makeMessagePair returns a (user, agent) pair sharing a timestamp,
|
||||
// matching the wire shape /chat-history emits per activity_logs row.
|
||||
// Server-side reverseRowChunks ensures the wire is oldest-first across
|
||||
// rows but [user, agent] within each row.
|
||||
function makeMessagePair(seq: number): unknown[] {
|
||||
// Zero-pad seq into the minute slot so seq=10 produces a valid
|
||||
// timestamp (00:10:00Z, not 00:010:00Z).
|
||||
function makeActivityRow(seq: number): Record<string, unknown> {
|
||||
// Zero-pad seq into the minute slot so "seq=10" doesn't produce
|
||||
// the invalid timestamp "00:010:00Z" (caught by the loadOlder URL
|
||||
// assertion below — first version of the helper used `0${seq}` and
|
||||
// the test failed on `before_ts` having an extra digit).
|
||||
const mm = String(seq).padStart(2, "0");
|
||||
const ts = `2026-05-05T00:${mm}:00Z`;
|
||||
return [
|
||||
{ id: `u-${seq}`, role: "user", content: `user msg ${seq}`, timestamp: ts },
|
||||
{ id: `a-${seq}`, role: "agent", content: `agent reply ${seq}`, timestamp: ts },
|
||||
];
|
||||
return {
|
||||
activity_type: "a2a_receive",
|
||||
status: "ok",
|
||||
created_at: `2026-05-05T00:${mm}:00Z`,
|
||||
request_body: { params: { message: { parts: [{ kind: "text", text: `user msg ${seq}` }] } } },
|
||||
response_body: { result: `agent reply ${seq}` },
|
||||
};
|
||||
}
|
||||
|
||||
// pageOldestFirst builds a wire-shape page (oldest-first within page)
|
||||
// of `count` row-pairs starting at seq=`start`. Mirrors the server's
|
||||
// post-reverseRowChunks emission order.
|
||||
function pageOldestFirst(start: number, count: number): unknown[] {
|
||||
const out: unknown[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
out.push(...makeMessagePair(start + i));
|
||||
}
|
||||
return out;
|
||||
// Server returns newest-first; the helper builds a server-shape page
|
||||
// so the order in the rendered messages array matches production.
|
||||
function newestFirstPage(start: number, count: number): unknown[] {
|
||||
return Array.from({ length: count }, (_, i) => makeActivityRow(start + count - 1 - i));
|
||||
}
|
||||
|
||||
const minimalData = {
|
||||
@ -154,30 +153,28 @@ const minimalData = {
|
||||
} as unknown as Parameters<typeof ChatTab>[0]["data"];
|
||||
|
||||
describe("ChatTab lazy history pagination", () => {
|
||||
it("initial fetch carries limit=10 (not the legacy 50) and hits /chat-history", async () => {
|
||||
myChatNextResponse = { ok: true, messages: makeMessagePair(1) };
|
||||
it("initial fetch carries limit=10 (not the legacy 50)", async () => {
|
||||
myChatNextResponse = { ok: true, rows: [makeActivityRow(1)] };
|
||||
render(<ChatTab workspaceId="ws-1" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
const url = myChatHistoryCalls[0];
|
||||
expect(url).toContain("/chat-history");
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
const url = myChatActivityCalls[0];
|
||||
expect(url).toContain("limit=10");
|
||||
expect(url).not.toContain("limit=50");
|
||||
// before_ts should NOT be set on the initial fetch — that's the
|
||||
// newest-first slice the user lands on.
|
||||
expect(url).not.toContain("before_ts");
|
||||
// /chat-history filters source-canvas server-side; client should
|
||||
// NOT pass type/source params (they belonged to /activity).
|
||||
expect(url).not.toContain("type=a2a_receive");
|
||||
expect(url).not.toContain("source=canvas");
|
||||
});
|
||||
|
||||
it("hides the top sentinel when initial fetch returns fewer than the limit", async () => {
|
||||
// 3 < 10 → server says "no more older history exists"; sentinel
|
||||
// should NOT mount and the "Loading older messages…" line should
|
||||
// never appear.
|
||||
myChatNextResponse = { ok: true, messages: pageOldestFirst(1, 3) };
|
||||
// never appear (it can't, since the sentinel is what triggers it).
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
rows: [makeActivityRow(1), makeActivityRow(2), makeActivityRow(3)],
|
||||
};
|
||||
render(<ChatTab workspaceId="ws-2" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByText(/Loading chat history/i)).toBeNull();
|
||||
});
|
||||
@ -185,15 +182,15 @@ describe("ChatTab lazy history pagination", () => {
|
||||
});
|
||||
|
||||
it("renders all messages when initial fetch returns exactly the limit", async () => {
|
||||
// limit=10 row-pairs → 20 ChatMessages. reachedEnd should be FALSE
|
||||
// so the sentinel mounts. Verified by bubble counts.
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(1, 10),
|
||||
reachedEnd: false,
|
||||
};
|
||||
// 10 == limit → server might have more older rows; sentinel SHOULD
|
||||
// mount so the IO observer can fire loadOlder() on scroll-up. We
|
||||
// verify by checking the rendered bubble count — if hasMore stayed
|
||||
// true the sentinel render path doesn't crash and all 10 rows
|
||||
// produced their pair of bubbles.
|
||||
const fullPage = Array.from({ length: 10 }, (_, i) => makeActivityRow(i + 1));
|
||||
myChatNextResponse = { ok: true, rows: fullPage };
|
||||
render(<ChatTab workspaceId="ws-3" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByText(/Loading chat history/i)).toBeNull();
|
||||
});
|
||||
@ -205,67 +202,54 @@ describe("ChatTab lazy history pagination", () => {
|
||||
myChatNextResponse = { ok: false, err: new Error("network down") };
|
||||
render(<ChatTab workspaceId="ws-4" data={minimalData} />);
|
||||
const retry = await screen.findByText(/Retry/);
|
||||
myChatNextResponse = { ok: true, messages: makeMessagePair(1) };
|
||||
myChatNextResponse = { ok: true, rows: [makeActivityRow(1)] };
|
||||
fireEvent.click(retry);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
|
||||
const retryUrl = myChatHistoryCalls[1];
|
||||
expect(retryUrl).toContain("/chat-history");
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
|
||||
const retryUrl = myChatActivityCalls[1];
|
||||
expect(retryUrl).toContain("limit=10");
|
||||
expect(retryUrl).not.toContain("limit=50");
|
||||
});
|
||||
|
||||
it("loadOlder fetches limit=20 with before_ts=oldest.timestamp", async () => {
|
||||
// Initial page = 10 row-pairs in oldest-first order (seq 1..10).
|
||||
// The oldest (and so the cursor for loadOlder) is seq=1's
|
||||
// timestamp 2026-05-05T00:01:00Z.
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(1, 10),
|
||||
reachedEnd: false,
|
||||
};
|
||||
// Initial page = 10 rows in newest-first order (seq 10..1). After
|
||||
// the component reverses to oldest-first for display, messages[0]
|
||||
// is built from seq=1 — the oldest — and its timestamp is what
|
||||
// before_ts should carry.
|
||||
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
|
||||
render(<ChatTab workspaceId="ws-load-older" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
|
||||
|
||||
// Stage older-batch response, then fire IO callback.
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(0, 1),
|
||||
reachedEnd: true,
|
||||
};
|
||||
// Stage the older-batch response, then fire the IO callback.
|
||||
myChatNextResponse = { ok: true, rows: newestFirstPage(0, 1) };
|
||||
triggerIntersection();
|
||||
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
|
||||
const olderUrl = myChatHistoryCalls[1];
|
||||
expect(olderUrl).toContain("/chat-history");
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
|
||||
const olderUrl = myChatActivityCalls[1];
|
||||
expect(olderUrl).toContain("limit=20");
|
||||
expect(olderUrl).toContain("before_ts=");
|
||||
expect(decodeURIComponent(olderUrl)).toContain("before_ts=2026-05-05T00:01:00Z");
|
||||
});
|
||||
|
||||
it("inflight guard rejects a second IO trigger while first loadOlder is in flight", async () => {
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(1, 10),
|
||||
reachedEnd: false,
|
||||
};
|
||||
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
|
||||
render(<ChatTab workspaceId="ws-inflight" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
|
||||
|
||||
// Hold the next loadOlder fetch open with a manual deferred so we
|
||||
// can fire the second trigger while the first is in-flight.
|
||||
let release!: (resp: unknown) => void;
|
||||
const deferred = new Promise<unknown>((res) => {
|
||||
let release!: (rows: unknown[]) => void;
|
||||
const deferred = new Promise<unknown[]>((res) => {
|
||||
release = res;
|
||||
});
|
||||
apiGet.mockImplementationOnce((path: string): Promise<unknown> => {
|
||||
myChatHistoryCalls.push(path);
|
||||
myChatActivityCalls.push(path);
|
||||
return deferred;
|
||||
});
|
||||
|
||||
triggerIntersection(); // start loadOlder #1
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
|
||||
|
||||
// Second IO trigger lands while #1 is still pending.
|
||||
triggerIntersection();
|
||||
@ -274,62 +258,79 @@ describe("ChatTab lazy history pagination", () => {
|
||||
// Without the inflight guard, each of these would have started a
|
||||
// new fetch. With the guard, none of them do — call count stays 2.
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
expect(myChatHistoryCalls.length).toBe(2);
|
||||
expect(myChatActivityCalls.length).toBe(2);
|
||||
|
||||
// Release the first fetch with a valid wire response shape.
|
||||
release({ messages: [], reached_end: true });
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
|
||||
// Release the first fetch. Inflight clears in the finally block;
|
||||
// a subsequent IO trigger is permitted again (verified by checking
|
||||
// we can fire a follow-up after release without hanging the test).
|
||||
release([]);
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
|
||||
});
|
||||
|
||||
it("empty older response clears the scroll anchor and unmounts the sentinel", async () => {
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(1, 10),
|
||||
reachedEnd: false,
|
||||
};
|
||||
// The bug we're pinning: if loadOlder returns 0 rows, the
|
||||
// scrollAnchorRef must be cleared so the next paint doesn't try to
|
||||
// restore against a no-op prepend (which would fight the natural
|
||||
// bottom-pin for any subsequent live message). hasMore flipping to
|
||||
// false is the same flag-flip path; sentinel disappearing is the
|
||||
// observable proxy.
|
||||
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
|
||||
render(<ChatTab workspaceId="ws-anchor" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
|
||||
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: [],
|
||||
reachedEnd: true,
|
||||
};
|
||||
myChatNextResponse = { ok: true, rows: [] }; // empty → reachedEnd
|
||||
triggerIntersection();
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(2));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(2));
|
||||
|
||||
// After reachedEnd the sentinel unmounts (hasMore=false). We can't
|
||||
// peek scrollAnchorRef directly, but we can assert the consequence:
|
||||
// scrollIntoView (the bottom-pin for live appends) is not blocked
|
||||
// by a stale anchor. Trigger a re-render via an unrelated state
|
||||
// change… in practice the safest assertion here is that the
|
||||
// sentinel disappeared (proving the empty response propagated to
|
||||
// hasMore correctly, which is the same flag-flip path as anchor
|
||||
// clearing).
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByText(/Loading older messages/i)).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
it("IntersectionObserver does not churn when older messages prepend", async () => {
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(1, 10),
|
||||
reachedEnd: false,
|
||||
};
|
||||
// Whole-PR perf invariant: prepending older history (the load-bearing
|
||||
// user gesture) must NOT tear down + re-arm the IO observer.
|
||||
// Triggering loadOlder is the cleanest way to drive a messages
|
||||
// mutation from inside the test, since live agent push goes through
|
||||
// a Zustand store that's harder to drive reliably from jsdom.
|
||||
//
|
||||
// Pre-fix, loadOlder depended on `messages`, so every prepend
|
||||
// recreated loadOlder → re-ran the IO effect → new observer. Each
|
||||
// call to triggerIntersection() produced a fresh disconnected
|
||||
// observer + a new live one. Post-fix, the observer survives.
|
||||
myChatNextResponse = { ok: true, rows: newestFirstPage(1, 10) };
|
||||
render(<ChatTab workspaceId="ws-stable-io" data={minimalData} />);
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(1));
|
||||
await waitFor(() => expect(myChatActivityCalls.length).toBe(1));
|
||||
await waitFor(() => expect(ioInstances.length).toBeGreaterThan(0));
|
||||
|
||||
// Snapshot the observer instance after first paint stabilises.
|
||||
const observerBefore = ioInstances.at(-1);
|
||||
expect(observerBefore).toBeDefined();
|
||||
expect(observerBefore!.disconnected).toBe(false);
|
||||
|
||||
// Trigger three older-batch prepends. Each batch returns the full
|
||||
// OLDER_HISTORY_BATCH (20 row-pairs = 40 messages) so reachedEnd
|
||||
// stays false and the sentinel keeps mounting.
|
||||
// OLDER_HISTORY_BATCH (20 rows) so reachedEnd stays false and the
|
||||
// sentinel keeps mounting. Pre-fix, each prepend mutated `messages`
|
||||
// → recreated loadOlder → re-ran the IO effect → new observer.
|
||||
for (let batch = 0; batch < 3; batch++) {
|
||||
myChatNextResponse = {
|
||||
ok: true,
|
||||
messages: pageOldestFirst(-(batch + 1) * 20, 20),
|
||||
reachedEnd: false,
|
||||
rows: newestFirstPage(-(batch + 1) * 20, 20),
|
||||
};
|
||||
const callsBefore = myChatHistoryCalls.length;
|
||||
const callsBefore = myChatActivityCalls.length;
|
||||
triggerIntersection();
|
||||
await waitFor(() => expect(myChatHistoryCalls.length).toBe(callsBefore + 1));
|
||||
await waitFor(() =>
|
||||
expect(myChatActivityCalls.length).toBe(callsBefore + 1),
|
||||
);
|
||||
}
|
||||
|
||||
// The original observer is still the live one — no churn.
|
||||
|
||||
@ -9,7 +9,6 @@
|
||||
// AttachmentLightbox).
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentChip } from "./AttachmentViews";
|
||||
@ -44,8 +43,13 @@ export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: P
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
});
|
||||
@ -112,5 +116,9 @@ export function AttachmentAudio({ workspaceId, attachment, onDownload, tone }: P
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -35,7 +35,6 @@
|
||||
// downscale via canvas, but defer that to v2.
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentLightbox } from "./AttachmentLightbox";
|
||||
@ -76,14 +75,22 @@ export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: P
|
||||
}
|
||||
|
||||
// Platform-auth path: identical to downloadChatFile but we keep
|
||||
// the blob (don't trigger a Save-As). Auth headers come from the
|
||||
// shared `platformAuthHeaders()` helper — one source of truth for
|
||||
// every authenticated raw fetch in the canvas (#178).
|
||||
// the blob (don't trigger a Save-As). Use the same headers it does
|
||||
// by going through it indirectly — no, downloadChatFile triggers a
|
||||
// Save-As. Need a separate fetch.
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
// Read the same env var downloadChatFile reads — single source
|
||||
// of truth would be cleaner; refactor opportunity for PR-2 if
|
||||
// we add the same path to AttachmentVideo.
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
@ -177,7 +184,15 @@ export function AttachmentImage({ workspaceId, attachment, onDownload, tone }: P
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api which uses the canonical
|
||||
// getTenantSlug() from @/lib/tenant. This eliminates the duplicate
|
||||
// hostname-regex + the duplicate bearer-token-attach pattern (#178).
|
||||
// Internal helper — duplicated from uploads.ts (it's not exported
|
||||
// there). Kept local so this component doesn't reach into private
|
||||
// surface; if AttachmentVideo / AttachmentPDF in PR-2/PR-3 also need
|
||||
// it, lift to an exported helper at that point (the third-caller
|
||||
// rule).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
// Tenant subdomain shape: <slug>.moleculesai.app
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -33,7 +33,6 @@
|
||||
// timeout, swap to chip. Implemented as a 3-second watchdog.
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentLightbox } from "./AttachmentLightbox";
|
||||
@ -70,8 +69,13 @@ export function AttachmentPDF({ workspaceId, attachment, onDownload, tone }: Pro
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
});
|
||||
@ -185,5 +189,9 @@ function PdfGlyph() {
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -26,7 +26,6 @@
|
||||
// to download the full file.
|
||||
|
||||
import { useState, useEffect } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentChip } from "./AttachmentViews";
|
||||
@ -58,13 +57,13 @@ export function AttachmentTextPreview({ workspaceId, attachment, onDownload, ton
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
// Only attach platform auth headers for in-platform URIs —
|
||||
// off-platform URLs (HTTP/HTTPS attachments) MUST NOT receive
|
||||
// our bearer token (it would leak the admin token to a third
|
||||
// party). The branch is preserved with the new shared helper.
|
||||
const headers: Record<string, string> = isPlatformAttachment(attachment.uri)
|
||||
? platformAuthHeaders()
|
||||
: {};
|
||||
const headers: Record<string, string> = {};
|
||||
if (isPlatformAttachment(attachment.uri)) {
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
}
|
||||
const res = await fetch(href, {
|
||||
headers,
|
||||
credentials: "include",
|
||||
@ -183,5 +182,9 @@ export function AttachmentTextPreview({ workspaceId, attachment, onDownload, ton
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -25,7 +25,6 @@
|
||||
// fetch via service worker. v2 if measured-needed.
|
||||
|
||||
import { useState, useEffect, useRef } from "react";
|
||||
import { platformAuthHeaders } from "@/lib/api";
|
||||
import type { ChatAttachment } from "./types";
|
||||
import { isPlatformAttachment, resolveAttachmentHref } from "./uploads";
|
||||
import { AttachmentChip } from "./AttachmentViews";
|
||||
@ -62,8 +61,13 @@ export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: P
|
||||
void (async () => {
|
||||
try {
|
||||
const href = resolveAttachmentHref(workspaceId, attachment.uri);
|
||||
const headers: Record<string, string> = {};
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
// Videos are larger than images on average; give the request
|
||||
// more headroom. The server's per-request body cap (50MB) is
|
||||
@ -143,5 +147,11 @@ export function AttachmentVideo({ workspaceId, attachment, onDownload, tone }: P
|
||||
);
|
||||
}
|
||||
|
||||
// Local getTenantSlug() removed — auth-header construction now goes
|
||||
// through platformAuthHeaders() from @/lib/api (#178).
|
||||
// Internal helper — same shape as AttachmentImage's. Lifted to a
|
||||
// shared util in PR-2.5 if a third caller needs it (PDF, audio).
|
||||
function getTenantSlug(): string | null {
|
||||
if (typeof window === "undefined") return null;
|
||||
const host = window.location.hostname;
|
||||
const m = host.match(/^([^.]+)\.moleculesai\.app$/);
|
||||
return m ? m[1] : null;
|
||||
}
|
||||
|
||||
@ -64,54 +64,6 @@ describe("extractRequestText", () => {
|
||||
};
|
||||
expect(extractRequestText(body)).toBe("");
|
||||
});
|
||||
|
||||
// Regression: delegation.go stores request_body as {"task": "...", "delegation_id": "..."}.
|
||||
// extractRequestText was checking only the A2A params.message.parts path, so
|
||||
// outbound delegation messages were rendered as blank bubbles.
|
||||
// Fix: check body.task first (delegation format), then fall back to A2A.
|
||||
it("extracts text from body.task (delegation format)", () => {
|
||||
const body = {
|
||||
task: "Deploy the staging environment for this sprint's release",
|
||||
delegation_id: "delg_01jx8q4n3k",
|
||||
};
|
||||
expect(extractRequestText(body)).toBe(
|
||||
"Deploy the staging environment for this sprint's release"
|
||||
);
|
||||
});
|
||||
|
||||
it("prefers body.task over A2A params when both present", () => {
|
||||
const body = {
|
||||
task: "Delegation text wins",
|
||||
params: {
|
||||
message: {
|
||||
parts: [{ kind: "text", text: "A2A text" }],
|
||||
},
|
||||
},
|
||||
};
|
||||
// body.task is checked first; delegation wins for delegation activities.
|
||||
expect(extractRequestText(body)).toBe("Delegation text wins");
|
||||
});
|
||||
|
||||
it("falls back to A2A format when body.task is absent", () => {
|
||||
const body = {
|
||||
params: {
|
||||
message: {
|
||||
parts: [{ kind: "text", text: "A2A fallback" }],
|
||||
},
|
||||
},
|
||||
};
|
||||
expect(extractRequestText(body)).toBe("A2A fallback");
|
||||
});
|
||||
|
||||
it("returns empty string when body.task is empty string", () => {
|
||||
const body = { task: "" };
|
||||
expect(extractRequestText(body)).toBe("");
|
||||
});
|
||||
|
||||
it("returns empty string when body.task is not a string", () => {
|
||||
const body = { task: 42 };
|
||||
expect(extractRequestText(body)).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractResponseText", () => {
|
||||
|
||||
@ -114,15 +114,9 @@ function basename(uri: string): string {
|
||||
return slash >= 0 ? cleaned.slice(slash + 1) : cleaned || "file";
|
||||
}
|
||||
|
||||
/** Extract user message text from an activity log request_body.
|
||||
*
|
||||
* Delegation activities from delegation.go store the task text directly
|
||||
* at `body.task` as a plain string: {"task": "...", "delegation_id": "..."}.
|
||||
* Check this first before falling back to the A2A JSON-RPC format
|
||||
* (`body.params.message.parts[].text`). */
|
||||
/** Extract user message text from an activity log request_body */
|
||||
export function extractRequestText(body: Record<string, unknown> | null): string {
|
||||
if (!body) return "";
|
||||
if (typeof body.task === "string" && body.task) return body.task;
|
||||
const params = body.params as Record<string, unknown> | undefined;
|
||||
const msg = params?.message as Record<string, unknown> | undefined;
|
||||
const parts = msg?.parts as Array<Record<string, unknown>> | undefined;
|
||||
|
||||
@ -1,16 +1,12 @@
|
||||
import { PLATFORM_URL, platformAuthHeaders } from "@/lib/api";
|
||||
import { PLATFORM_URL } from "@/lib/api";
|
||||
import { getTenantSlug } from "@/lib/tenant";
|
||||
import type { ChatAttachment } from "./types";
|
||||
|
||||
/** Chat attachments are intentionally uploaded via a direct fetch()
|
||||
* instead of the `api.post` helper — `api.post` JSON-stringifies the
|
||||
* body, which would 500 on a Blob. Auth headers (tenant slug, admin
|
||||
* token, credentials) come from `platformAuthHeaders()` — the same
|
||||
* helper `request()` uses, so a missing bearer surfaces as a single
|
||||
* fix site instead of N copies. We deliberately do NOT set
|
||||
* Content-Type so the browser writes the multipart boundary into the
|
||||
* header; setting it manually would yield a multipart body the server
|
||||
* can't parse. See lib/api.ts platformAuthHeaders() for the full
|
||||
* rationale on why this pair must stay matched. */
|
||||
* body, which would 500 on a Blob. Mirrors the header plumbing
|
||||
* (tenant slug, admin token, credentials) so SaaS + self-hosted
|
||||
* callers work the same way. */
|
||||
export async function uploadChatFiles(
|
||||
workspaceId: string,
|
||||
files: File[],
|
||||
@ -20,12 +16,18 @@ export async function uploadChatFiles(
|
||||
const form = new FormData();
|
||||
for (const f of files) form.append("files", f, f.name);
|
||||
|
||||
const headers: Record<string, string> = {};
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
|
||||
// Uploads legitimately take a while on cold cache (tar write +
|
||||
// docker cp into the container). 60s is comfortable for the 25MB/
|
||||
// 50MB caps the server enforces.
|
||||
const res = await fetch(`${PLATFORM_URL}/workspaces/${workspaceId}/chat/uploads`, {
|
||||
method: "POST",
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
body: form,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
@ -141,8 +143,14 @@ export async function downloadChatFile(
|
||||
return;
|
||||
}
|
||||
|
||||
const headers: Record<string, string> = {};
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
|
||||
const res = await fetch(href, {
|
||||
headers: platformAuthHeaders(),
|
||||
headers,
|
||||
credentials: "include",
|
||||
signal: AbortSignal.timeout(60_000),
|
||||
});
|
||||
|
||||
@ -1,130 +0,0 @@
|
||||
// @vitest-environment node
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
|
||||
// Tests for the boot-time matched-pair guard added to next.config.ts.
|
||||
//
|
||||
// Why this lives in src/lib/__tests__ even though the function is in
|
||||
// canvas/next.config.ts:
|
||||
// - next.config.ts runs as ESM-but-also-CJS depending on which
|
||||
// consumer loads it (Next.js dev server vs Next.js build); we
|
||||
// want the test to be a plain ESM module Vitest already handles.
|
||||
// - Importing from "../../../next.config" pulls in the rest of the
|
||||
// file (loadMonorepoEnv, the default export, etc.) which has
|
||||
// side effects on module load (it runs loadMonorepoEnv()
|
||||
// immediately). To keep the test hermetic we don't import — we
|
||||
// duplicate the function under test.
|
||||
//
|
||||
// Sourcing the function from a shared module would be cleaner, but
|
||||
// next.config.ts is required to be a single self-contained file by
|
||||
// Next.js's loader on some host configurations. Pin invariant: the
|
||||
// duplicated function below MUST stay byte-identical to the one in
|
||||
// next.config.ts. If you change one, change the other and bump this
|
||||
// comment.
|
||||
|
||||
function checkAdminTokenPair(): void {
|
||||
const serverSet = !!process.env.ADMIN_TOKEN;
|
||||
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (serverSet === clientSet) return;
|
||||
if (serverSet && !clientSet) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
} else {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
describe("checkAdminTokenPair", () => {
|
||||
// Snapshot env so individual tests can stomp on it without leaking.
|
||||
// Rebuild from snapshot in afterEach so the next test sees a known
|
||||
// baseline regardless of mutation pattern.
|
||||
let originalEnv: Record<string, string | undefined>;
|
||||
let errorSpy: ReturnType<typeof vi.spyOn>;
|
||||
|
||||
beforeEach(() => {
|
||||
originalEnv = {
|
||||
ADMIN_TOKEN: process.env.ADMIN_TOKEN,
|
||||
NEXT_PUBLIC_ADMIN_TOKEN: process.env.NEXT_PUBLIC_ADMIN_TOKEN,
|
||||
};
|
||||
delete process.env.ADMIN_TOKEN;
|
||||
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
errorSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (originalEnv.ADMIN_TOKEN === undefined) delete process.env.ADMIN_TOKEN;
|
||||
else process.env.ADMIN_TOKEN = originalEnv.ADMIN_TOKEN;
|
||||
if (originalEnv.NEXT_PUBLIC_ADMIN_TOKEN === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalEnv.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
errorSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("emits no warning when both are unset", () => {
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("emits no warning when both are set (matched pair, the happy path)", () => {
|
||||
process.env.ADMIN_TOKEN = "local-dev-admin";
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("warns when ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not", () => {
|
||||
process.env.ADMIN_TOKEN = "local-dev-admin";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).toHaveBeenCalledTimes(1);
|
||||
// Exact-string assertion — substring would also pass when the
|
||||
// function's branch logic is broken (e.g. emits both messages, or
|
||||
// emits the wrong one). Pin the exact message that operators will
|
||||
// see in their dev console so regressions are visible.
|
||||
expect(errorSpy).toHaveBeenCalledWith(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
});
|
||||
|
||||
it("warns when NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not", () => {
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).toHaveBeenCalledTimes(1);
|
||||
expect(errorSpy).toHaveBeenCalledWith(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
});
|
||||
|
||||
// Empty string in process.env is the JS-side representation of `KEY=`
|
||||
// (no value) in a .env file. Treating "" as unset makes the pair
|
||||
// invariant symmetric: `KEY=` and `unset KEY` produce the same
|
||||
// verdict. Without this branch, an operator who comments out the
|
||||
// value but leaves the line would get a false-positive warning.
|
||||
it("treats empty string as unset (so KEY= and unset KEY are equivalent)", () => {
|
||||
process.env.ADMIN_TOKEN = "";
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("warns when ADMIN_TOKEN is set and NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
|
||||
process.env.ADMIN_TOKEN = "local-dev-admin";
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
|
||||
checkAdminTokenPair();
|
||||
expect(errorSpy).toHaveBeenCalledTimes(1);
|
||||
// First branch — server set, client unset.
|
||||
expect(errorSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining("ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not"),
|
||||
);
|
||||
});
|
||||
});
|
||||
@ -1,97 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
|
||||
// Tests for platformAuthHeaders — the shared helper extracted in #178
|
||||
// to consolidate the bearer-token-attach + tenant-slug-attach pattern
|
||||
// that was previously duplicated across 7 raw-fetch callsites in the
|
||||
// canvas (uploads + 5 Attachment* components + the api.ts request()
|
||||
// function).
|
||||
//
|
||||
// What we pin here:
|
||||
// - Returns a fresh object each call (so callers can mutate without
|
||||
// leaking into each other).
|
||||
// - Empty result on a non-tenant host with no admin token (the
|
||||
// localhost / self-hosted shape).
|
||||
// - Bearer attached when NEXT_PUBLIC_ADMIN_TOKEN is set.
|
||||
// - X-Molecule-Org-Slug attached when window.location.hostname is a
|
||||
// tenant subdomain (<slug>.moleculesai.app).
|
||||
// - Both attached when both apply (the production SaaS shape).
|
||||
//
|
||||
// Why jsdom: getTenantSlug() reads window.location.hostname. Node-only
|
||||
// environment yields no window and getTenantSlug returns null
|
||||
// unconditionally — wouldn't exercise the slug branch.
|
||||
|
||||
import { platformAuthHeaders } from "../api";
|
||||
|
||||
describe("platformAuthHeaders", () => {
|
||||
let originalAdminToken: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
originalAdminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (originalAdminToken === undefined) delete process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
else process.env.NEXT_PUBLIC_ADMIN_TOKEN = originalAdminToken;
|
||||
// jsdom resets hostname between tests via the @vitest-environment
|
||||
// pragma's per-test isolation. No explicit reset needed.
|
||||
});
|
||||
|
||||
it("returns an empty object on a non-tenant host with no admin token", () => {
|
||||
// jsdom default hostname is "localhost" — not a tenant slug, so
|
||||
// getTenantSlug() returns null and no X-Molecule-Org-Slug is added.
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({});
|
||||
});
|
||||
|
||||
it("attaches Authorization when NEXT_PUBLIC_ADMIN_TOKEN is set", () => {
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "local-dev-admin";
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({ Authorization: "Bearer local-dev-admin" });
|
||||
});
|
||||
|
||||
it("does NOT attach Authorization when NEXT_PUBLIC_ADMIN_TOKEN is empty string", () => {
|
||||
// Empty-string env is the JS-side shape of `KEY=` in .env.
|
||||
// Treating it as unset matches the matched-pair guard in
|
||||
// next.config.ts (admin-token-pair.test.ts) — symmetric semantics.
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "";
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({});
|
||||
});
|
||||
|
||||
it("attaches X-Molecule-Org-Slug on a tenant subdomain", () => {
|
||||
Object.defineProperty(window, "location", {
|
||||
value: { hostname: "reno-stars.moleculesai.app" },
|
||||
writable: true,
|
||||
});
|
||||
const headers = platformAuthHeaders();
|
||||
expect(headers).toEqual({ "X-Molecule-Org-Slug": "reno-stars" });
|
||||
});
|
||||
|
||||
it("attaches both when both apply (production SaaS shape)", () => {
|
||||
Object.defineProperty(window, "location", {
|
||||
value: { hostname: "reno-stars.moleculesai.app" },
|
||||
writable: true,
|
||||
});
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tenant-bearer";
|
||||
const headers = platformAuthHeaders();
|
||||
// Pin exact-equality on the full shape — substring/contains
|
||||
// assertions would also pass for an extra-header bug.
|
||||
expect(headers).toEqual({
|
||||
"X-Molecule-Org-Slug": "reno-stars",
|
||||
Authorization: "Bearer tenant-bearer",
|
||||
});
|
||||
});
|
||||
|
||||
it("returns a fresh object each call (callers can mutate safely)", () => {
|
||||
process.env.NEXT_PUBLIC_ADMIN_TOKEN = "tok";
|
||||
const a = platformAuthHeaders();
|
||||
const b = platformAuthHeaders();
|
||||
expect(a).not.toBe(b); // distinct refs
|
||||
expect(a).toEqual(b); // same content
|
||||
a["Content-Type"] = "application/json";
|
||||
// Mutation on `a` does not leak into `b`.
|
||||
expect(b["Content-Type"]).toBeUndefined();
|
||||
});
|
||||
});
|
||||
@ -21,45 +21,6 @@ export interface RequestOptions {
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the platform auth header set used by every authenticated fetch
|
||||
* from the canvas. Returns a fresh object so callers can mutate (e.g.
|
||||
* append `Content-Type` for JSON requests, omit it for FormData).
|
||||
*
|
||||
* SaaS cross-origin shape:
|
||||
* - `X-Molecule-Org-Slug` — derived from `window.location.hostname`
|
||||
* by `getTenantSlug()`. Control plane uses it for fly-replay
|
||||
* routing. Empty on localhost / non-tenant hosts — safe to omit.
|
||||
* - `Authorization: Bearer <token>` — `NEXT_PUBLIC_ADMIN_TOKEN` baked
|
||||
* into the canvas build (see canvas/Dockerfile L8/L11). Required by
|
||||
* the workspace-server when `ADMIN_TOKEN` is set on the server side
|
||||
* (Tier-2b AdminAuth gate, wsauth_middleware.go ~L245). Empty when
|
||||
* no admin token was provisioned — the Tier-1 session-cookie path
|
||||
* handles that case via `credentials:"include"`.
|
||||
*
|
||||
* Why a shared helper: the two-line "read env, attach bearer; read
|
||||
* slug, attach header" pattern was duplicated across `request()` and
|
||||
* 7 raw-fetch callsites (chat uploads/download + 5 Attachment*
|
||||
* components) before this consolidation. A new poller or raw fetch
|
||||
* that forgets one of the two headers silently 401s against
|
||||
* workspace-server when ADMIN_TOKEN is set — the exact bug shape
|
||||
* called out in #178 / closes the post-#176 self-review gap.
|
||||
*
|
||||
* Callers that want JSON Content-Type should spread this and add it
|
||||
* themselves; FormData callers should NOT add Content-Type (the
|
||||
* browser sets the multipart boundary). Centralizing the auth pair
|
||||
* but leaving Content-Type up to the caller is the minimum viable
|
||||
* shared shape.
|
||||
*/
|
||||
export function platformAuthHeaders(): Record<string, string> {
|
||||
const headers: Record<string, string> = {};
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
return headers;
|
||||
}
|
||||
|
||||
async function request<T>(
|
||||
method: string,
|
||||
path: string,
|
||||
@ -67,16 +28,17 @@ async function request<T>(
|
||||
retryCount = 0,
|
||||
options?: RequestOptions,
|
||||
): Promise<T> {
|
||||
// JSON-bodied request — Content-Type is JSON. Auth pair comes from
|
||||
// the shared helper; see its doc comment for the SaaS-shape rationale.
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
...platformAuthHeaders(),
|
||||
};
|
||||
// Re-read slug locally for the 401 handler below — `headers` already
|
||||
// has it, but the 401 branch needs the bare value to gate the
|
||||
// session-probe + redirect logic on tenant context.
|
||||
// SaaS cross-origin shape:
|
||||
// - X-Molecule-Org-Slug: derived from window.location.hostname by
|
||||
// getTenantSlug(). Control plane uses it for fly-replay routing.
|
||||
// Empty on localhost / non-tenant hosts — safe to omit.
|
||||
// - credentials:"include": sends the session cookie cross-origin.
|
||||
// Cookie's Domain=.moleculesai.app attribute + cp's CORS allow this.
|
||||
const headers: Record<string, string> = { "Content-Type": "application/json" };
|
||||
const slug = getTenantSlug();
|
||||
if (slug) headers["X-Molecule-Org-Slug"] = slug;
|
||||
const adminToken = process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (adminToken) headers["Authorization"] = `Bearer ${adminToken}`;
|
||||
|
||||
const res = await fetch(`${PLATFORM_URL}${path}`, {
|
||||
method,
|
||||
|
||||
@ -7,48 +7,6 @@ export default defineConfig({
|
||||
test: {
|
||||
environment: 'node',
|
||||
exclude: ['e2e/**', 'node_modules/**', '**/dist/**'],
|
||||
// Issue #22 / vitest pool investigation:
|
||||
//
|
||||
// The forks pool spawns one Node.js worker per concurrent slot.
|
||||
// Each jsdom-environment worker bootstraps a full DOM (~30-50 MB resident
|
||||
// set) at cold-start. With the default maxWorkers derived from CPU
|
||||
// count, multiple jsdom workers can start simultaneously, exhausting
|
||||
// memory on the 2-CPU Gitea Actions runner and causing pool workers to
|
||||
// fail to respond with "[vitest-pool]: Timeout starting … runner."
|
||||
//
|
||||
// Fix: cap maxWorkers at 1 so only one worker is alive at any time.
|
||||
// Tests still run in parallel within that single worker's process (via
|
||||
// node's EventLoop) — this is the same parallelism as the `threads`
|
||||
// pool but without the per-worker jsdom cold-start overhead. 51 test
|
||||
// files that previously took 5070 s with 5 failures now run
|
||||
// sequentially through one worker, eliminating the memory spike.
|
||||
maxWorkers: 1,
|
||||
// CI-conditional test timeout (issue #96).
|
||||
//
|
||||
// Vitest's 5000ms default is too tight for the first test in any
|
||||
// file under our CI shape: `npx vitest run --coverage` on the
|
||||
// self-hosted Gitea Actions Docker runner. The cold-start cost
|
||||
// (v8 coverage instrumentation init + JSDOM bootstrap + module-
|
||||
// graph import for @/components/* and @/lib/* + first React
|
||||
// render) consistently consumes 5-7 seconds for the first
|
||||
// synchronous test in heavyweight component files
|
||||
// (ActivityTab.test.tsx, CreateWorkspaceDialog.test.tsx,
|
||||
// ConfigTab.provider.test.tsx) — even though every subsequent
|
||||
// test in the same file completes in 100-1500ms.
|
||||
//
|
||||
// Empirically the worst observed first-test was 6453ms in a
|
||||
// single file (CreateWorkspaceDialog). 30000ms gives ~5x
|
||||
// headroom over that on CI; we still keep 5000ms locally so
|
||||
// genuine waitFor races / hung promises stay sensitive in dev.
|
||||
//
|
||||
// Same vitest pattern documented at:
|
||||
// https://vitest.dev/config/testtimeout
|
||||
// https://vitest.dev/guide/coverage#profiling-test-performance
|
||||
//
|
||||
// Per-test duration is still emitted to the CI log; if a test
|
||||
// ever silently approaches 25-30s under this raised ceiling that
|
||||
// will surface as a duration regression and we revisit.
|
||||
testTimeout: process.env.CI ? 30000 : 5000,
|
||||
// Coverage is instrumented but NOT yet a CI gate — first land
|
||||
// observability so we can see the baseline, then dial in
|
||||
// thresholds + a hard gate in a follow-up PR (#1815). Today's
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
# docker-compose.dev.yml — overlay over docker-compose.yml for local dev
|
||||
# with air-driven live reload of the platform (workspace-server) service.
|
||||
#
|
||||
# Usage:
|
||||
# docker compose -f docker-compose.yml -f docker-compose.dev.yml up
|
||||
# (or `make dev` shorthand from repo root)
|
||||
#
|
||||
# What this overlay changes vs docker-compose.yml alone:
|
||||
# - Platform service uses workspace-server/Dockerfile.dev (air on top of
|
||||
# golang:1.25-alpine) instead of the multi-stage prod Dockerfile.
|
||||
# - Platform service bind-mounts the host's workspace-server/ source
|
||||
# into /app/workspace-server so air sees source edits live.
|
||||
# - Other services (postgres, redis, langfuse, etc.) inherit unchanged
|
||||
# from docker-compose.yml.
|
||||
#
|
||||
# What stays the same:
|
||||
# - All env vars, volumes, depends_on, healthchecks from docker-compose.yml.
|
||||
# - Network topology + ports.
|
||||
# - Postgres/Redis as service containers (no in-process replacements).
|
||||
|
||||
services:
|
||||
platform:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: workspace-server/Dockerfile.dev
|
||||
# Rebind source: edits under host's workspace-server/ propagate live.
|
||||
# The named volume on go-build-cache speeds up first build per container.
|
||||
volumes:
|
||||
- ./workspace-server:/app/workspace-server
|
||||
- go-build-cache:/root/.cache/go-build
|
||||
- go-mod-cache:/go/pkg/mod
|
||||
# Air signals the running binary on rebuild; ensure shell stops cleanly.
|
||||
init: true
|
||||
# Mark the service as dev-mode so the platform can short-circuit any
|
||||
# behavior that's incompatible with hot-reload (e.g. background
|
||||
# cron-style watchers that don't survive process restart). No-op
|
||||
# today; reserved for future flag use.
|
||||
environment:
|
||||
MOLECULE_DEV_HOT_RELOAD: "1"
|
||||
|
||||
volumes:
|
||||
go-build-cache:
|
||||
go-mod-cache:
|
||||
@ -119,7 +119,7 @@ services:
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: molecule-core-net
|
||||
name: molecule-monorepo-net
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
|
||||
@ -1,7 +1,3 @@
|
||||
# Include infra services (Temporal, Langfuse) so `docker compose up` starts the full stack.
|
||||
include:
|
||||
- docker-compose.infra.yml
|
||||
|
||||
services:
|
||||
# --- Infrastructure ---
|
||||
postgres:
|
||||
@ -16,8 +12,7 @@ services:
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- molecule-core-net
|
||||
restart: unless-stopped
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-dev}"]
|
||||
interval: 2s
|
||||
@ -44,7 +39,7 @@ services:
|
||||
psql -h postgres -U "$${POSTGRES_USER}" -d postgres -c "CREATE DATABASE langfuse"
|
||||
fi
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
@ -54,8 +49,7 @@ services:
|
||||
volumes:
|
||||
- redisdata:/data
|
||||
networks:
|
||||
- molecule-core-net
|
||||
restart: unless-stopped
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 2s
|
||||
@ -72,7 +66,7 @@ services:
|
||||
volumes:
|
||||
- clickhousedata:/var/lib/clickhouse
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:8123/ping || exit 1"]
|
||||
interval: 5s
|
||||
@ -101,7 +95,7 @@ services:
|
||||
ports:
|
||||
- "3001:3000"
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/public/health || exit 1"]
|
||||
interval: 10s
|
||||
@ -132,10 +126,6 @@ services:
|
||||
REDIS_URL: redis://redis:6379
|
||||
PORT: "${PLATFORM_PORT:-8080}"
|
||||
PLATFORM_URL: "http://platform:${PLATFORM_PORT:-8080}"
|
||||
# Container network namespace is already isolated; "all interfaces"
|
||||
# inside the container = the bridge interface only. The fail-open
|
||||
# default (127.0.0.1) would block host-to-container access.
|
||||
BIND_ADDR: "${BIND_ADDR:-0.0.0.0}"
|
||||
# Default MOLECULE_ENV=development so the WorkspaceAuth / AdminAuth
|
||||
# middleware fail-open path activates when ADMIN_TOKEN is unset —
|
||||
# otherwise the canvas (which runs without a bearer in pure local
|
||||
@ -205,28 +195,12 @@ services:
|
||||
# App private key — read-only bind-mount. The host-side path is
|
||||
# gitignored per .gitignore rules (/.secrets/ + *.pem).
|
||||
- ./.secrets/github-app.pem:/secrets/github-app.pem:ro
|
||||
# Per-role persona credentials (molecule-core#242 local surface).
|
||||
# Sourced at workspace creation time by org_import.go::loadPersonaEnvFile
|
||||
# when a workspace.yaml carries `role: <name>`. The host-side dir is
|
||||
# populated by the operator-host bootstrap kit (28 dev-tree personas);
|
||||
# /etc/molecule-bootstrap/personas is the in-container path the
|
||||
# platform expects (matches the prod tenant-EC2 path so the same code
|
||||
# works in both modes).
|
||||
#
|
||||
# Read-only mount — workspace-server only reads, never writes here.
|
||||
# If the host dir is empty/missing the platform's loadPersonaEnvFile
|
||||
# silently no-ops per its existing semantics, so this mount is safe
|
||||
# even on a fresh machine that hasn't run the bootstrap kit yet.
|
||||
- ${MOLECULE_PERSONA_ROOT_HOST:-${HOME}/.molecule-ai/personas}:/etc/molecule-bootstrap/personas:ro
|
||||
ports:
|
||||
- "${PLATFORM_PUBLISH_PORT:-8080}:${PLATFORM_PORT:-8080}"
|
||||
networks:
|
||||
- molecule-core-net
|
||||
restart: unless-stopped
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
# Plain GET — `--spider` would issue HEAD, which returns 404 because
|
||||
# /health is registered as GET only.
|
||||
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:${PLATFORM_PORT:-8080}/health || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@ -238,8 +212,8 @@ services:
|
||||
# docker compose pull canvas && docker compose up -d canvas
|
||||
# First-time local setup or testing unreleased changes — build from source:
|
||||
# docker compose build canvas && docker compose up -d canvas
|
||||
# Note: ECR images require AWS auth — `aws ecr get-login-password --region us-east-2 | docker login --username AWS --password-stdin 153263036946.dkr.ecr.us-east-2.amazonaws.com` before pull.
|
||||
image: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/canvas:latest
|
||||
# Note: GHCR images are private — `docker login ghcr.io` required before pull.
|
||||
image: ghcr.io/molecule-ai/canvas:latest
|
||||
build:
|
||||
context: ./canvas
|
||||
dockerfile: Dockerfile
|
||||
@ -262,9 +236,9 @@ services:
|
||||
ports:
|
||||
- "${CANVAS_PUBLISH_PORT:-3000}:${CANVAS_PORT:-3000}"
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
@ -295,7 +269,7 @@ services:
|
||||
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
|
||||
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY:-sk-molecule}
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1"]
|
||||
@ -320,7 +294,7 @@ services:
|
||||
volumes:
|
||||
- ollamadata:/root/.ollama
|
||||
networks:
|
||||
- molecule-core-net
|
||||
- molecule-monorepo-net
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "ollama list || exit 1"]
|
||||
@ -330,8 +304,8 @@ services:
|
||||
start_period: 20s
|
||||
|
||||
networks:
|
||||
molecule-core-net:
|
||||
name: molecule-core-net
|
||||
molecule-monorepo-net:
|
||||
name: molecule-monorepo-net
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
|
||||
@ -67,7 +67,7 @@ On-demand fits naturally with how agents work — an agent only needs to know ab
|
||||
|
||||
This is acceptable for MVP because:
|
||||
- All workspaces are provisioned by the same platform on trusted infrastructure
|
||||
- Docker network isolation (`molecule-core-net`) limits who can reach workspace endpoints
|
||||
- Docker network isolation (`molecule-monorepo-net`) limits who can reach workspace endpoints
|
||||
- The tool is self-hosted — the operator controls the network
|
||||
|
||||
**Known gap:** Once workspace A caches workspace B's URL, nothing stops A from calling B directly even after the hierarchy changes and A is no longer supposed to reach B. The cached URL remains valid until the container is restarted or the URL changes.
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
**Status:** living document — update when you ship a feature that touches one backend.
|
||||
**Owner:** workspace-server + controlplane teams.
|
||||
**Last audit:** 2026-05-07 (plugin install/uninstall closed for EC2 backend via EIC SSH push to the bind-mounted `/configs/plugins/<name>/`, mirroring the Files API PR #1702 pattern).
|
||||
**Last audit:** 2026-05-05 (Claude agent — `provisionWorkspaceAuto` / `StopWorkspaceAuto` / `HasProvisioner` SoT pattern landed in PRs #2811 + #2824).
|
||||
|
||||
## Why this exists
|
||||
|
||||
@ -54,7 +54,7 @@ For "do we have any backend?", use `HasProvisioner()`, never bare `h.provisioner
|
||||
| **Files API** | | | | |
|
||||
| List / Read / Write / Replace / Delete | `container_files.go`, `template_import.go` | `docker exec` + tar `CopyToContainer` | SSH via EIC tunnel (PR #1702) | ✅ parity as of 2026-04-22 (previously docker-only) |
|
||||
| **Plugins** | | | | |
|
||||
| Install / uninstall / list | `plugins_install.go` + `plugins_install_eic.go` | `deliverToContainer()` → exec+`CopyToContainer` on local container | `instance_id` set → EIC SSH push of the staged tarball into the EC2's bind-mounted `/configs/plugins/<name>/` (per `workspaceFilePathPrefix`), `chown 1000:1000`, restart | ✅ parity |
|
||||
| Install / uninstall / list | `plugins_install.go` | `deliverToContainer()` + volume rm | **gap — no live plugin delivery** | 🔴 **docker-only** |
|
||||
| **Terminal (WebSocket)** | | | | |
|
||||
| Dispatch | `terminal.go:90-105` | `instance_id=""` → `handleLocalConnect` → `docker attach` | `instance_id` set → `handleRemoteConnect` → EIC SSH + `docker exec` | ✅ parity (different implementations, same UX) |
|
||||
| **A2A proxy** | | | | |
|
||||
|
||||
@ -4,7 +4,7 @@ How a workspace-server code change reaches the prod tenant fleet — and how to
|
||||
|
||||
> **⚠️ State note (2026-04-22):** this doc describes the **intended design**. As of this write, the canary fleet described below is **not actually running** — no canary tenants are provisioned, `CANARY_TENANT_URLS` / `CANARY_ADMIN_TOKENS` / `CANARY_CP_SHARED_SECRET` are empty in repo secrets, and `canary-verify.yml` fails every run.
|
||||
>
|
||||
> Current merges gate on manual `promote-latest.yml` dispatches, not canary. See [molecule-controlplane/docs/canary-tenants.md](https://git.moleculesai.app/molecule-ai/molecule-controlplane/src/branch/main/docs/canary-tenants.md) for the Phase 1 code work that's already shipped + the Phase 2 plan for actually standing up the fleet + a "should we even do this now?" decision framework.
|
||||
> Current merges gate on manual `promote-latest.yml` dispatches, not canary. See [molecule-controlplane/docs/canary-tenants.md](https://github.com/Molecule-AI/molecule-controlplane/blob/main/docs/canary-tenants.md) for the Phase 1 code work that's already shipped + the Phase 2 plan for actually standing up the fleet + a "should we even do this now?" decision framework.
|
||||
>
|
||||
> **Account-specific identifiers (AWS account ID, IAM role name) referenced below in the original design have been redacted from this public doc.** The actual values — if they exist — are in `Molecule-AI/internal/runbooks/canary-fleet.md`. If you're implementing Phase 2, start there.
|
||||
>
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
# Molecule AI — Comprehensive Technical Documentation
|
||||
|
||||
> Definitive technical reference for the Molecule AI Agent Team platform.
|
||||
> Based on a full non-invasive scan of the [molecule-monorepo](https://git.moleculesai.app/molecule-ai/molecule-monorepo) repository.
|
||||
> Based on a full non-invasive scan of the [molecule-monorepo](https://github.com/Molecule-AI/molecule-monorepo) repository.
|
||||
|
||||
---
|
||||
|
||||
@ -124,7 +124,7 @@ Six runtime adapters ship production-ready on `main`: LangGraph, DeepAgents, Cla
|
||||
| Platform ↔ Redis | TCP | Ephemeral state (liveness TTL), caching, pub/sub |
|
||||
| Workspace ↔ Workspace | HTTP (A2A JSON-RPC 2.0) | Direct peer-to-peer, **platform not in data path** |
|
||||
| Workspace → Langfuse | HTTP | Automatic OpenTelemetry tracing |
|
||||
| Docker Network | `molecule-core-net` | Internal-only by default, no exposed DB/Redis ports |
|
||||
| Docker Network | `molecule-monorepo-net` | Internal-only by default, no exposed DB/Redis ports |
|
||||
|
||||
### Core Components
|
||||
|
||||
@ -465,7 +465,7 @@ Unknown tier values default to T2 for safety. Applied via `provisioner.ApplyTier
|
||||
|
||||
### Docker Networking
|
||||
|
||||
- All containers join `molecule-core-net` private network
|
||||
- All containers join `molecule-monorepo-net` private network
|
||||
- Container naming: `ws-{workspace_id[:12]}`
|
||||
- Ephemeral host port binding: `127.0.0.1:0→8000/tcp`
|
||||
|
||||
@ -1149,11 +1149,11 @@ Molecule AI's workspace abstraction is **runtime-agnostic by design**. A workspa
|
||||
|
||||
## Links
|
||||
|
||||
- **GitHub**: https://git.moleculesai.app/molecule-ai/molecule-monorepo
|
||||
- **Architecture Docs**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/architecture
|
||||
- **API Protocol**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/api-protocol
|
||||
- **Agent Runtime**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/agent-runtime
|
||||
- **Product Docs**: https://git.moleculesai.app/molecule-ai/molecule-monorepo/src/branch/main/docs/product
|
||||
- **GitHub**: https://github.com/Molecule-AI/molecule-monorepo
|
||||
- **Architecture Docs**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/architecture
|
||||
- **API Protocol**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/api-protocol
|
||||
- **Agent Runtime**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/agent-runtime
|
||||
- **Product Docs**: https://github.com/Molecule-AI/molecule-monorepo/tree/main/docs/product
|
||||
|
||||
---
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ The provisioner is the platform component that deploys workspace containers and
|
||||
|
||||
## Docker Networking (Tier 1-3, Tier 4 uses host)
|
||||
|
||||
All workspace containers join the `molecule-core-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
|
||||
All workspace containers join the `molecule-monorepo-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
|
||||
|
||||
- `provisioner.ContainerName(workspaceID)` → `ws-{id[:12]}`
|
||||
- `provisioner.InternalURL(workspaceID)` → `http://ws-{id[:12]}:8000`
|
||||
@ -38,7 +38,7 @@ This URL is pre-stored in both Postgres and Redis before the agent registers. Wh
|
||||
|
||||
**Why not use Docker-internal URLs?** In local dev, the platform runs on the host (not in Docker), so it cannot resolve Docker container hostnames. The ephemeral port mapping lets the A2A proxy reach agents via localhost. In production (platform in Docker), the Docker-internal URL (`http://ws-{id}:8000`) would work directly.
|
||||
|
||||
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-core-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
|
||||
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-monorepo-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
|
||||
|
||||
For external HTTPS access (multi-host mode), Nginx on the host handles TLS termination and proxies to the container.
|
||||
|
||||
|
||||
@ -79,7 +79,7 @@ For SOC2 / ISO 27001 / customer security questionnaires:
|
||||
|
||||
## Pointers
|
||||
|
||||
- KMS envelope code: [`molecule-controlplane/internal/crypto/kms.go`](https://git.moleculesai.app/molecule-ai/molecule-controlplane/src/branch/main/internal/crypto/kms.go)
|
||||
- Static-key fallback: [`molecule-controlplane/internal/crypto/aes.go`](https://git.moleculesai.app/molecule-ai/molecule-controlplane/src/branch/main/internal/crypto/aes.go)
|
||||
- KMS envelope code: [`molecule-controlplane/internal/crypto/kms.go`](https://github.com/Molecule-AI/molecule-controlplane/blob/main/internal/crypto/kms.go)
|
||||
- Static-key fallback: [`molecule-controlplane/internal/crypto/aes.go`](https://github.com/Molecule-AI/molecule-controlplane/blob/main/internal/crypto/aes.go)
|
||||
- Tenant secrets handler: [`workspace-server/internal/crypto/aes.go`](../../workspace-server/internal/crypto/aes.go)
|
||||
- Tenant secrets schema: [database-schema.md](./database-schema.md#workspace_secrets)
|
||||
|
||||
@ -1,28 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64">
|
||||
<style>
|
||||
.bg { fill: #0a1120; }
|
||||
.accent { fill: #7fe8d6; }
|
||||
.accent-stroke { stroke: #7fe8d6; }
|
||||
@media (prefers-color-scheme: light) {
|
||||
.bg { fill: #f5f7fa; }
|
||||
.accent { fill: #1a8a72; }
|
||||
.accent-stroke { stroke: #1a8a72; }
|
||||
}
|
||||
</style>
|
||||
<rect class="bg" width="64" height="64" rx="14"/>
|
||||
<g class="accent-stroke" stroke-width="2.4" stroke-linecap="round" fill="none">
|
||||
<line x1="32" y1="32" x2="12" y2="14"/>
|
||||
<line x1="32" y1="32" x2="52" y2="18"/>
|
||||
<line x1="32" y1="32" x2="10" y2="40"/>
|
||||
<line x1="32" y1="32" x2="54" y2="44"/>
|
||||
<line x1="32" y1="32" x2="32" y2="56"/>
|
||||
</g>
|
||||
<g class="accent">
|
||||
<circle cx="32" cy="32" r="6.5"/>
|
||||
<circle cx="12" cy="14" r="3.5"/>
|
||||
<circle cx="52" cy="18" r="3.5"/>
|
||||
<circle cx="10" cy="40" r="3.5"/>
|
||||
<circle cx="54" cy="44" r="3.5"/>
|
||||
<circle cx="32" cy="56" r="3.5"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 957 B |
@ -1,17 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" role="img" aria-label="Molecule AI">
|
||||
<g stroke="#7fe8d6" stroke-width="2.6" stroke-linecap="round" fill="none">
|
||||
<line x1="32" y1="32" x2="12" y2="14"/>
|
||||
<line x1="32" y1="32" x2="52" y2="18"/>
|
||||
<line x1="32" y1="32" x2="10" y2="40"/>
|
||||
<line x1="32" y1="32" x2="54" y2="44"/>
|
||||
<line x1="32" y1="32" x2="32" y2="56"/>
|
||||
</g>
|
||||
<g fill="#7fe8d6">
|
||||
<circle cx="32" cy="32" r="7"/>
|
||||
<circle cx="12" cy="14" r="3.6"/>
|
||||
<circle cx="52" cy="18" r="3.6"/>
|
||||
<circle cx="10" cy="40" r="3.6"/>
|
||||
<circle cx="54" cy="44" r="3.6"/>
|
||||
<circle cx="32" cy="56" r="3.6"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 662 B |
@ -10,7 +10,7 @@ tags: [platform, fly.io, deployment, infrastructure]
|
||||
|
||||
Your infrastructure choice just got decoupled from your agent platform choice. Molecule AI now ships three production-ready workspace backends — `docker`, `flyio`, and `controlplane` — and switching between them takes a single environment variable. Your agent code, model choices, and workspace topology stay exactly the same.
|
||||
|
||||
This post covers what shipped in [PR #501](https://git.moleculesai.app/molecule-ai/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://git.moleculesai.app/molecule-ai/molecule-core/pull/503) (control plane provisioner), and which backend fits your situation.
|
||||
This post covers what shipped in [PR #501](https://github.com/Molecule-AI/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://github.com/Molecule-AI/molecule-core/pull/503) (control plane provisioner), and which backend fits your situation.
|
||||
|
||||
## Before: One Deployment Model for Every Use Case
|
||||
|
||||
@ -107,4 +107,4 @@ No changes to agent code, tool definitions, or orchestration logic. Swap `CONTAI
|
||||
|
||||
---
|
||||
|
||||
*[PR #501](https://git.moleculesai.app/molecule-ai/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://git.moleculesai.app/molecule-ai/molecule-core/pull/503) (control plane provisioner) are both merged to `main`. Molecule AI is open source — contributions welcome.*
|
||||
*[PR #501](https://github.com/Molecule-AI/molecule-core/pull/501) (Fly Machines provisioner) and [PR #503](https://github.com/Molecule-AI/molecule-core/pull/503) (control plane provisioner) are both merged to `main`. Molecule AI is open source — contributions welcome.*
|
||||
|
||||
@ -299,8 +299,8 @@ Or use the Canvas UI: Workspace → Config → MCP Servers → Add browser MCP s
|
||||
|
||||
**Try it free** — Molecule AI is open source and self-hostable. Get a workspace running in under 5 minutes.
|
||||
|
||||
→ [Get started on GitHub →](https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
→ [Get started on GitHub →](https://github.com/Molecule-AI/molecule-core)
|
||||
|
||||
---
|
||||
|
||||
*Have a browser automation use case you want to see covered? File an issue with the `enhancement` label on the [molecule-core issue tracker](https://git.moleculesai.app/molecule-ai/molecule-core/issues).*
|
||||
*Have a browser automation use case you want to see covered? Open a discussion on [GitHub Discussions](https://github.com/Molecule-AI/molecule-core/discussions) — or file an issue with the `enhancement` label.*
|
||||
|
||||
@ -148,7 +148,7 @@ Then follow the [quick-start guide](/docs/guides/remote-workspaces.md).
|
||||
Or run the annotated example directly:
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-sdk-python
|
||||
git clone https://github.com/Molecule-AI/molecule-sdk-python
|
||||
cd molecule-sdk-python/examples/remote-agent
|
||||
# Create workspace with runtime:external, grab the ID, then:
|
||||
WORKSPACE_ID=<your-id> PLATFORM_URL=https://acme.moleculesai.app python3 run.py
|
||||
@ -160,6 +160,6 @@ The agent appears on the canvas within seconds.
|
||||
|
||||
→ [Remote Workspaces Guide →](/docs/guides/remote-workspaces.md)
|
||||
→ [External Agent Registration Reference →](/docs/guides/external-agent-registration.md)
|
||||
→ [molecule-sdk-python →](https://git.moleculesai.app/molecule-ai/molecule-sdk-python)
|
||||
→ [molecule-sdk-python →](https://github.com/Molecule-AI/molecule-sdk-python)
|
||||
|
||||
*Phase 30 shipped in PRs #1075–#1083 and #1085–#1100 on `molecule-core`.*
|
||||
|
||||
@ -27,7 +27,7 @@ The biggest user-facing change: every Molecule AI org can now mint named, revoca
|
||||
|
||||
→ [User guide: Organization API Keys](/docs/guides/org-api-keys.md)
|
||||
→ [Architecture: Org API Keys](/docs/architecture/org-api-keys.md)
|
||||
→ PRs: [#1105](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1105), [#1107](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1107), [#1109](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1109), [#1110](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1110)
|
||||
→ PRs: [#1105](https://github.com/Molecule-AI/molecule-core/pull/1105), [#1107](https://github.com/Molecule-AI/molecule-core/pull/1107), [#1109](https://github.com/Molecule-AI/molecule-core/pull/1109), [#1110](https://github.com/Molecule-AI/molecule-core/pull/1110)
|
||||
|
||||
---
|
||||
|
||||
@ -48,7 +48,7 @@ AdminAuth now accepts a session-verification tier that runs **before** the beare
|
||||
**Self-hosted / local dev:** `CP_UPSTREAM_URL` is unset → this feature is disabled, behaviour is unchanged.
|
||||
|
||||
→ [Guide: Same-Origin Canvas Fetches & Session Auth](/docs/guides/same-origin-canvas-fetches.md)
|
||||
→ PRs: [#1099](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1099), [#1100](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1100)
|
||||
→ PRs: [#1099](https://github.com/Molecule-AI/molecule-core/pull/1099), [#1100](https://github.com/Molecule-AI/molecule-core/pull/1100)
|
||||
|
||||
---
|
||||
|
||||
@ -87,7 +87,7 @@ The proxy is **fail-closed**: only an explicit allowlist of paths (`/cp/auth/`,
|
||||
This is also the structural fix for the lateral-movement risk that session auth introduced: without the allowlist, a tenant-authed browser user could have proxied `/cp/admin/*` requests upstream and exploited the fact that those endpoints accept WorkOS session cookies. The allowlist makes that impossible by construction.
|
||||
|
||||
→ [Guide: Same-Origin Canvas Fetches & Session Auth](/docs/guides/same-origin-canvas-fetches.md)
|
||||
→ PR: [#1095](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1095)
|
||||
→ PR: [#1095](https://github.com/Molecule-AI/molecule-core/pull/1095)
|
||||
|
||||
---
|
||||
|
||||
@ -99,7 +99,7 @@ The waitlist itself is a Canvas-administered list with email hashing in audit lo
|
||||
|
||||
This is the operational surface that makes the above security work matter: the beta is invitation-only, credentials are scoped, and every admin action is auditable.
|
||||
|
||||
→ Control plane PRs [#145](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/145), [#148](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/148), [#150](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/150)
|
||||
→ Control plane PRs [#145](https://github.com/Molecule-AI/molecule-controlplane/pull/145), [#148](https://github.com/Molecule-AI/molecule-controlplane/pull/148), [#150](https://github.com/Molecule-AI/molecule-controlplane/pull/150)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ Your team is in Discord. Your AI agents are in Molecule AI. Until today, those t
|
||||
|
||||
That's now one webhook URL.
|
||||
|
||||
Molecule AI workspaces can now connect to Discord. Here's what shipped in [PR #656](https://git.moleculesai.app/molecule-ai/molecule-core/pull/656).
|
||||
Molecule AI workspaces can now connect to Discord. Here's what shipped in [PR #656](https://github.com/Molecule-AI/molecule-core/pull/656).
|
||||
|
||||
---
|
||||
|
||||
@ -70,7 +70,7 @@ For inbound slash commands, point your Discord app's **Interactions Endpoint URL
|
||||
|
||||
## Security: Webhook Tokens Don't Appear in Logs
|
||||
|
||||
Webhook URLs contain a token (`/webhooks/{id}/{token}`). If that token leaks into server logs, it's a rotation event. The Discord adapter is explicit about this: HTTP request errors are logged without the URL, and the adapter returns a generic error message. This was hardened in [PR #659](https://git.moleculesai.app/molecule-ai/molecule-core/pull/659).
|
||||
Webhook URLs contain a token (`/webhooks/{id}/{token}`). If that token leaks into server logs, it's a rotation event. The Discord adapter is explicit about this: HTTP request errors are logged without the URL, and the adapter returns a generic error message. This was hardened in [PR #659](https://github.com/Molecule-AI/molecule-core/pull/659).
|
||||
|
||||
---
|
||||
|
||||
@ -97,4 +97,4 @@ Documentation: [Social Channels guide](/docs/agent-runtime/social-channels#disco
|
||||
|
||||
---
|
||||
|
||||
*Discord adapter shipped in [PR #656](https://git.moleculesai.app/molecule-ai/molecule-core/pull/656). Security hardening in [PR #659](https://git.moleculesai.app/molecule-ai/molecule-core/pull/659). Molecule AI is open source — contributions welcome.*
|
||||
*Discord adapter shipped in [PR #656](https://github.com/Molecule-AI/molecule-core/pull/656). Security hardening in [PR #659](https://github.com/Molecule-AI/molecule-core/pull/659). Molecule AI is open source — contributions welcome.*
|
||||
|
||||
@ -133,4 +133,4 @@ With protocol-native A2A, you get:
|
||||
|
||||
Molecule AI's external agent registration is production-ready. Documentation is live at [External Agent Registration Guide](https://docs.molecule.ai/docs/guides/external-agent-registration). The npm package for the MCP server is available at [`@molecule-ai/mcp-server`](https://www.npmjs.com/package/@molecule-ai/mcp-server).
|
||||
|
||||
Read the full [A2A v1.0 protocol spec](https://git.moleculesai.app/molecule-ai/molecule-core/src/branch/main/docs/api-protocol/a2a-protocol.md) on GitHub.
|
||||
Read the full [A2A v1.0 protocol spec](https://github.com/Molecule-AI/molecule-core/blob/main/docs/api-protocol/a2a-protocol.md) on GitHub.
|
||||
@ -45,7 +45,7 @@ canonicalUrl: "https://docs.molecule.ai/blog/remote-workspaces"
|
||||
" proficiencyLevel": "Expert",
|
||||
"genre": ["technical documentation", "product announcement"],
|
||||
"sameAs": [
|
||||
"https://git.moleculesai.app/molecule-ai/molecule-core",
|
||||
"https://github.com/Molecule-AI/molecule-core",
|
||||
"https://molecule.ai"
|
||||
]
|
||||
}
|
||||
@ -270,7 +270,7 @@ Configure it in your project's `.mcp.json` and any AI agent (Claude Code, Cursor
|
||||
|
||||
→ [External Agent Registration Guide](/docs/guides/external-agent-registration) — full step-by-step with Python and Node.js reference implementations
|
||||
|
||||
→ [GitHub: molecule-core](https://git.moleculesai.app/molecule-ai/molecule-core) — source and issues
|
||||
→ [GitHub: molecule-core](https://github.com/Molecule-AI/molecule-core) — source and issues
|
||||
|
||||
→ [Phase 30 Launch Thread on X](https://x.com) — follow for updates
|
||||
|
||||
|
||||
@ -170,4 +170,4 @@ The `staging` branch is now on `a2a-sdk` 1.0.0. The `main` branch still carries
|
||||
|
||||
If you're running `a2a-sdk` 0.3.x and planning the 1.0.0 migration, this post is the reference. The four breaking changes are well-contained, the migration is a single PR, and the eight smoke scenarios above will tell you whether the upgrade is clean before you merge.
|
||||
|
||||
Questions? The [A2A protocol spec](https://github.com/google-a2a/a2a-specification) is the authoritative source. For Molecule AI's production A2A implementation, see [External Agent Registration](https://docs.molecule.ai/docs/guides/external-agent-registration) or open an issue in the [molecule-core](https://git.moleculesai.app/molecule-ai/molecule-core) repo.
|
||||
Questions? The [A2A protocol spec](https://github.com/google-a2a/a2a-specification) is the authoritative source. For Molecule AI's production A2A implementation, see [External Agent Registration](https://docs.molecule.ai/docs/guides/external-agent-registration) or open an issue in the [molecule-core](https://github.com/Molecule-AI/molecule-core) repo.
|
||||
|
||||
@ -1,119 +0,0 @@
|
||||
# Canvas Architecture Audit — VERIFIED
|
||||
|
||||
> **Status:** VERIFIED — Cross-referenced against molecule-core/canvas/src/ (2026-05-09)
|
||||
> **Author:** Core-FE (draft), Core-UIUX (verification)
|
||||
> **Updated:** 2026-05-09 with architecture structure + known issues
|
||||
|
||||
## Canvas Stack (Verified)
|
||||
|
||||
| Technology | Version | Purpose |
|
||||
|-----------|--------|---------|
|
||||
| React Flow | `@xyflow/react` v12 | Node/edge rendering |
|
||||
| Framework | Next.js 14 App Router | Routing, SSR |
|
||||
| Styling | Tailwind v4 | CSS with custom properties |
|
||||
| State | Zustand | Client state management |
|
||||
|
||||
## Directory Structure (Verified)
|
||||
|
||||
```
|
||||
canvas/src/
|
||||
├── components/
|
||||
│ ├── Canvas.tsx # Viewport management, ReactFlow wrapper
|
||||
│ ├── Toolbar.tsx # Add node/edge controls
|
||||
│ ├── ContextMenu.tsx # Right-click menu
|
||||
│ ├── SidePanel.tsx # Properties panel
|
||||
│ ├── WorkspaceNode.tsx # Node rendering
|
||||
│ ├── A2AEdge.tsx # Edge rendering
|
||||
│ └── [tests]/ # Accessibility + component tests
|
||||
├── stores/
|
||||
│ └── secrets-store.ts # ⚠️ getGrouped() performance issue
|
||||
├── hooks/
|
||||
│ ├── useSocketEvent.ts
|
||||
│ ├── useTemplateDeploy.tsx
|
||||
│ └── useWorkspaceName.ts
|
||||
└── lib/
|
||||
├── api.ts
|
||||
├── auth.ts
|
||||
├── canvas-actions.ts
|
||||
├── design-tokens.ts # STATUS_CONFIG, TIER_CONFIG
|
||||
├── theme.ts
|
||||
└── theme-provider.tsx # ThemeProvider, useTheme()
|
||||
|
||||
## Known Issues
|
||||
|
||||
### 🔴 HIGH: secrets-store.ts Performance
|
||||
**File:** `canvas/src/stores/secrets-store.ts`
|
||||
**Issue:** `getGrouped()` selector creates new objects every call (Object.fromEntries + arrays). Not memoized.
|
||||
**Impact:** Causes unnecessary re-renders on frequent selector access.
|
||||
**Fix needed:** Memoize the selector or use a proper Zustand selector pattern.
|
||||
|
||||
### 🟡 MEDIUM: Pre-commit Hook Verification
|
||||
**Issue:** Pre-commit hook checks 'use client' on hook-using components but unclear if it actually fails on violations.
|
||||
**Action:** Verify the hook is enforcing the rule correctly.
|
||||
|
||||
## Verified Findings
|
||||
|
||||
### Node Rendering ✅ (with notes)
|
||||
- **Framework:** `@xyflow/react` (React Flow) — DOM-based, not SVG/Canvas
|
||||
- **Node selection:** `aria-pressed` + border ring (`border-accent/70`) + shadow
|
||||
- **Node drag:** React Flow native drag — mouse only, no keyboard alternative yet
|
||||
- **Node resize:** `NodeResizer` component visible on selected card, keyboard-inaccessible
|
||||
- **Status:** Accessible via `aria-label` on node cards — "Alpha Workspace workspace — online"
|
||||
|
||||
### Edge Wiring ✅
|
||||
- **Edge rendering:** React Flow SVG paths
|
||||
- **Edge click target:** 1.5px stroke (CSS `stroke-width: 1.5 !important` in globals.css)
|
||||
- **Edge creation:** React Flow drag-from-handle
|
||||
- **Edge anchors:** Visible on hover (`hover:!bg-blue-400`), not keyboard accessible
|
||||
- **Status:** Partial — mouse users only
|
||||
|
||||
### Canvas Controls ✅
|
||||
- **Zoom:** React Flow Controls component (verify if keyboard accessible)
|
||||
- **Pan:** Space+drag, mouse drag
|
||||
- **Minimap:** Not present (MiniMap mocked as null in tests)
|
||||
- **Status:** Basic keyboard support via viewport shortcuts
|
||||
|
||||
### Keyboard Shortcuts ⚠️ PARTIAL
|
||||
- Exists in `useKeyboardShortcuts.ts` but no `aria-describedby` on trigger buttons
|
||||
- No dedicated keyboard shortcut help dialog
|
||||
- **Gap:** Users can't discover shortcuts visually
|
||||
|
||||
### Focus Management ✅ (strong)
|
||||
- Skip link → `#canvas-main` ✅
|
||||
- `aria-label` on ReactFlow container ✅
|
||||
- Focus trap in modals via Radix ✅
|
||||
- Focus ring: `focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-950`
|
||||
|
||||
### Accessibility Tree ⚠️ PARTIAL
|
||||
- Canvas is in accessibility tree (React Flow DOM nodes)
|
||||
- Node state changes not announced to screen readers (no `aria-live` region)
|
||||
- Context menus announced via `role="menu"` ✅
|
||||
|
||||
### Context Menus ✅ (strong)
|
||||
- `role="menu"`, `role="menuitem"`, `role="separator"` ✅
|
||||
- `aria-label` with workspace name ✅
|
||||
- ArrowUp/Down navigation with wrap-around ✅
|
||||
- Escape + Tab close menu ✅
|
||||
- Auto-focus first item on open ✅
|
||||
|
||||
### Drag and Drop ⚠️ PARTIAL
|
||||
- **Mouse drag:** React Flow native
|
||||
- **Drop target:** Visual indicator (`bg-emerald-950/40 border-emerald-400/60`) ✅
|
||||
- **Keyboard alternative:** None — nodes repositioned only via mouse drag
|
||||
- **Status:** Mouse-only. Keyboard users cannot rearrange nodes.
|
||||
|
||||
---
|
||||
|
||||
## Remaining Gaps (Priority Order)
|
||||
|
||||
| Priority | Item | Files | Status |
|
||||
|----------|------|-------|--------|
|
||||
| HIGH | Screen reader announcements for canvas state changes | Canvas.tsx | Not started |
|
||||
| MEDIUM | Keyboard shortcut help dialog | useKeyboardShortcuts.ts | Not started |
|
||||
| MEDIUM | Keyboard-accessible node drag | WorkspaceNode.tsx, useDragHandlers.ts | Not started |
|
||||
| LOW | Edge anchor keyboard accessibility | A2AEdge.tsx | Not started |
|
||||
| LOW | Node resize keyboard accessibility | WorkspaceNode.tsx (NodeResizer) | Not started |
|
||||
|
||||
---
|
||||
|
||||
*Verified 2026-05-09 by Core-UIUX against molecule-core/canvas/src/*
|
||||
@ -1,424 +0,0 @@
|
||||
# Canvas Design System v1 — VERIFIED
|
||||
|
||||
> **Status:** VERIFIED — Cross-referenced against molecule-core/canvas/src/ (2026-05-09)
|
||||
> **Authors:** Core-FE (draft), Core-UIUX (verification + updates)
|
||||
> **Source files verified:**
|
||||
> - `canvas/src/app/globals.css`
|
||||
> - `canvas/src/styles/theme-tokens.css`
|
||||
> - `canvas/src/lib/design-tokens.ts`
|
||||
> - `canvas/src/components/Tooltip.tsx`
|
||||
> - `canvas/src/components/ContextMenu.tsx`
|
||||
> - `canvas/src/components/Canvas.tsx`
|
||||
> - `canvas/src/components/__tests__/Canvas.a11y.test.tsx`
|
||||
> - `canvas/src/components/__tests__/ContextMenu.keyboard.test.tsx`
|
||||
> - `canvas/src/components/__tests__/MissingKeysModal.a11y.test.tsx`
|
||||
> - `canvas/src/components/__tests__/ConversationTraceModal.a11y.test.tsx`
|
||||
|
||||
---
|
||||
|
||||
## 1. Color Palette — Three-Mode Theme System
|
||||
|
||||
Canvas supports **three themes**: System (follows OS), Light, Dark. Controlled via `ThemeProvider` in `theme-provider.tsx` with preference persisted in `mol_theme` cookie.
|
||||
|
||||
**Key principle: Use semantic tokens, NOT raw zinc values for surfaces.**
|
||||
|
||||
### 1.1 Theme-Mutable Tokens (use these for surfaces)
|
||||
|
||||
Defined in `globals.css` via Tailwind v4 `@theme` block. Automatically flip between light/dark.
|
||||
|
||||
**Light theme (warm paper):**
|
||||
|
||||
| Token | Tailwind Class | Hex | Usage |
|
||||
|-------|--------------|-----|-------|
|
||||
| `--color-surface` | `bg-surface` | `#fafaf7` | Page background |
|
||||
| `--color-surface-elevated` | `bg-surface-elevated` | `#ffffff` | Elevated cards, modals |
|
||||
| `--color-surface-sunken` | `bg-surface-sunken` | `#f3f1ec` | Input fields, recessed areas |
|
||||
| `--color-surface-card` | `bg-surface-card` | `#efece4` | Node cards, chips |
|
||||
| `--color-line` | `border-line` | `#e6e2d8` | Dividers, borders |
|
||||
| `--color-line-soft` | `border-line-soft` | `#efece4` | Subtle dividers |
|
||||
| `--color-ink` | `text-ink` | `#15181c` | Primary text |
|
||||
| `--color-ink-mid` | `text-ink-mid` | `#5a5e66` | Secondary text |
|
||||
| `--color-ink-soft` | `text-ink-soft` | `#8b8e95` | Tertiary text, placeholders |
|
||||
| `--color-accent` | `text-accent` | `#3b5bdb` | Links, primary actions |
|
||||
| `--color-accent-strong` | `text-accent-strong` | `#1a2f99` | Emphasized accent |
|
||||
| `--color-warm` | `text-warm` | `#c0532b` | Warnings |
|
||||
| `--color-good` | `text-good` | `#2f7a4d` | Success states |
|
||||
| `--color-bad` | `text-bad` | `#b94e4a` | Error states |
|
||||
|
||||
**Dark theme:**
|
||||
|
||||
| Token | Hex | Usage |
|
||||
|-------|-----|-------|
|
||||
| `--color-surface` | `#0e1014` | Page background |
|
||||
| `--color-surface-elevated` | `#15181c` | Elevated cards |
|
||||
| `--color-surface-sunken` | `#0a0b0e` | Input fields |
|
||||
| `--color-surface-card` | `#1a1d23` | Node cards |
|
||||
| `--color-line` | `#2a2f3a` | Dividers |
|
||||
| `--color-ink` | `#f4f1e9` | Primary text |
|
||||
| `--color-ink-mid` | `#c8c2b4` | Secondary text |
|
||||
| `--color-ink-soft` | `#8d92a0` | Tertiary text |
|
||||
| `--color-accent` | `#6883e8` | Links (brighter for AA contrast) |
|
||||
| `--color-accent-strong` | `#8aa1ee` | Emphasized accent |
|
||||
| `--color-warm` | `#d96f48` | Warnings |
|
||||
| `--color-good` | `#4ca06e` | Success |
|
||||
| `--color-bad` | `#d27773` | Errors |
|
||||
|
||||
### 1.2 Always-Dark Tokens (terminal surfaces)
|
||||
|
||||
Terminals, console modal, log streams **stay dark** in all themes — readable green-on-black doesn't translate to light.
|
||||
|
||||
| Token | Tailwind Class | Hex | Usage |
|
||||
|-------|--------------|-----|-------|
|
||||
| `--color-bg` | `bg-bg` | `rgb(9 9 11)` / zinc-950 | Terminal background |
|
||||
| `--color-bg-elev` | `bg-bg-elev` | `rgb(24 24 27)` / zinc-900 | Elevated terminal surfaces |
|
||||
| `--color-bg-card` | `bg-bg-card` | `rgb(39 39 42)` / zinc-800 | Terminal cards |
|
||||
| `--color-line-strong` | `border-line-strong` | `rgb(63 63 70)` / zinc-700 | Strong borders |
|
||||
| `--color-ink-mute` | `text-ink-mute` | `rgb(161 161 170)` / zinc-400 | Muted text |
|
||||
| `--color-ink-dim` | `text-ink-dim` | `rgb(113 113 122)` / zinc-500 | Dim text |
|
||||
|
||||
### 1.3 Raw Zinc Usage Rules
|
||||
|
||||
**Use raw zinc for:**
|
||||
- Borders: `border-zinc-700`, `border-zinc-800`
|
||||
- Disabled states: `text-zinc-600`, `bg-zinc-800`
|
||||
- Code highlighting: `bg-zinc-900`, `text-zinc-300`
|
||||
- Terminal surfaces: `bg-zinc-950` (always-dark)
|
||||
|
||||
**NEVER use for surfaces:**
|
||||
- `bg-zinc-900` or `bg-zinc-950` as page/card backgrounds — use `bg-surface`
|
||||
- `text-zinc-50` or `text-zinc-100` as primary text — use `text-ink`
|
||||
- `bg-white`, `bg-gray-50/100` for surfaces — use semantic tokens
|
||||
|
||||
### 1.4 Accessibility Contrast
|
||||
|
||||
| Pair | Ratio | WCAG |
|
||||
|------|-------|------|
|
||||
| `text-ink` on `bg-surface` (light) | ~14.5:1 | AAA |
|
||||
| `text-ink` on `bg-surface` (dark) | ~15.8:1 | AAA |
|
||||
| `text-ink-mid` on `bg-surface` (light) | ~5.2:1 | AA |
|
||||
| `text-ink-mid` on `bg-surface` (dark) | ~5.9:1 | AA |
|
||||
| `text-accent` on `bg-surface` (light) | ~4.8:1 | AA |
|
||||
| `text-accent` on `bg-surface` (dark) | ~4.6:1 | AA |
|
||||
|
||||
---
|
||||
|
||||
## 2. Typography Scale
|
||||
|
||||
**Actual font stack** (from `globals.css`):
|
||||
```
|
||||
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", sans-serif
|
||||
```
|
||||
No custom fonts loaded — uses OS-native system stack.
|
||||
|
||||
| Size Token | Tailwind | Usage |
|
||||
|------------|----------|-------|
|
||||
| `text-[10px]` | 10px | Micro badges, tier labels |
|
||||
| `text-[11px]` | 11px | Tooltip text |
|
||||
| `text-xs` / `text-[12px]` | 12px | Badges, timestamps |
|
||||
| `text-sm` / `text-[13px]` | 13–14px | Secondary labels, node titles |
|
||||
| `text-base` / `text-[16px]` | 16px | Body text |
|
||||
| `text-lg` | 18px | Section headers |
|
||||
| `text-xl` | 20px | Modal titles |
|
||||
|
||||
**Line height:** `leading-tight` (1.25) for headings, `leading-relaxed` (1.625) for body/tooltips.
|
||||
|
||||
---
|
||||
|
||||
## 3. Animation / Motion Tokens
|
||||
|
||||
**Defined in `canvas/src/styles/theme-tokens.css`** — use these, don't hardcode ms values.
|
||||
|
||||
| Token | Value | Usage |
|
||||
|-------|-------|-------|
|
||||
| `--mol-duration-fast` | 150ms | Hover states, button feedback |
|
||||
| `--mol-duration-base` | 300ms | Standard transitions |
|
||||
| `--mol-duration-spawn` | 350ms | Node spawn animation |
|
||||
| `--mol-duration-root-complete` | 700ms | Org-deploy root glow |
|
||||
| `--mol-duration-fit-view` | 800ms | Canvas fit-viewport |
|
||||
|
||||
| Token | Value | Usage |
|
||||
|-------|-------|-------|
|
||||
| `--mol-easing-standard` | `cubic-bezier(0.2, 0, 0, 1)` | Default ease |
|
||||
| `--mol-easing-bounce-out` | `cubic-bezier(0.2, 0.8, 0.2, 1.05)` | Node spawn bounce |
|
||||
| `--mol-easing-emphasize` | `cubic-bezier(0.3, 0, 0, 1)` | Modal/drawer enter |
|
||||
|
||||
**CSS usage:**
|
||||
```css
|
||||
/* Good — reference the token */
|
||||
transition: all var(--mol-duration-fast) ease;
|
||||
|
||||
/* Bad — hardcoded value */
|
||||
transition: all 150ms ease;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Component Patterns (Verified)
|
||||
|
||||
### 4.1 Buttons
|
||||
|
||||
```tsx
|
||||
// Primary — accent background, ink text
|
||||
<button className="bg-accent hover:bg-accent/90 active:scale-95
|
||||
text-ink px-4 py-2 rounded-md text-sm font-medium
|
||||
focus-visible:ring-2 focus-visible:ring-blue-500
|
||||
focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-900
|
||||
disabled:opacity-50 disabled:cursor-not-allowed">
|
||||
Primary
|
||||
</button>
|
||||
|
||||
// Secondary — surface-card background, border-line
|
||||
<button className="bg-surface-card hover:bg-surface-elevated border border-line
|
||||
text-ink px-4 py-2 rounded-md text-sm font-medium
|
||||
focus-visible:ring-2 focus-visible:ring-blue-500
|
||||
focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-900">
|
||||
Secondary
|
||||
</button>
|
||||
|
||||
// Ghost — no background, hover surface
|
||||
<button className="hover:bg-surface-card text-ink-mid hover:text-ink
|
||||
px-4 py-2 rounded-md text-sm font-medium">
|
||||
Ghost
|
||||
</button>
|
||||
|
||||
// Danger — bad color, requires confirmation dialog
|
||||
<button className="bg-bad hover:bg-bad/90 text-white px-4 py-2
|
||||
rounded-md text-sm font-medium">
|
||||
Delete
|
||||
</button>
|
||||
```
|
||||
|
||||
**States:** default, hover, active (`scale-95`), focus (`ring-2 ring-blue-500 ring-offset-2 ring-offset-zinc-900`), disabled (`opacity-50 cursor-not-allowed`).
|
||||
|
||||
### 4.2 Inputs
|
||||
|
||||
```tsx
|
||||
// Text input — use semantic tokens for surfaces
|
||||
<input
|
||||
className="bg-surface-sunken border border-line text-ink
|
||||
placeholder:text-ink-soft px-3 py-2 rounded-md text-sm
|
||||
focus:outline-none focus:ring-2 focus:ring-blue-500
|
||||
focus:border-transparent
|
||||
disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
placeholder="Enter workspace name"
|
||||
/>
|
||||
|
||||
// Error state
|
||||
<input
|
||||
className="border-bad focus:ring-bad"
|
||||
aria-invalid="true"
|
||||
aria-describedby="error-message"
|
||||
/>
|
||||
```
|
||||
|
||||
**Label:** `text-sm font-medium text-ink mb-1`
|
||||
**Error:** `text-xs text-bad mt-1`
|
||||
|
||||
### 4.3 Cards
|
||||
|
||||
```tsx
|
||||
// Workspace node card (from WorkspaceNode.tsx)
|
||||
<div className="bg-surface-sunken/90 border border-line/80
|
||||
rounded-xl p-3.5 py-2.5
|
||||
hover:border-zinc-500/60 shadow-lg shadow-black/30
|
||||
focus-visible:ring-2 focus-visible:ring-accent/70
|
||||
focus-visible:ring-offset-1 focus-visible:ring-offset-zinc-950">
|
||||
```
|
||||
|
||||
### 4.4 Modals (Radix Dialog)
|
||||
|
||||
```tsx
|
||||
// Backdrop
|
||||
<div className="fixed inset-0 bg-black/70 backdrop-blur-sm z-50"
|
||||
aria-hidden="true" />
|
||||
|
||||
// Dialog — use surface-card + border-line
|
||||
<div className="bg-surface-card border border-line rounded-xl
|
||||
shadow-2xl p-6 max-w-md w-full mx-4">
|
||||
{/* Modal content */}
|
||||
</div>
|
||||
```
|
||||
|
||||
Note: Uses `--color-surface-sunken` for sunken areas (node cards). Cards use `bg-surface-card`.
|
||||
|
||||
**Important:** Use `@radix-ui/react-dialog` — it provides WCAG 2.1 compliance automatically (focus trap, Escape key, aria-modal, aria-labelledby).
|
||||
|
||||
### 4.5 Tooltips
|
||||
|
||||
**Verified implementation** (`canvas/src/components/Tooltip.tsx`):
|
||||
|
||||
```tsx
|
||||
// Trigger wraps children
|
||||
<span aria-describedby="tooltip-id">
|
||||
{children}
|
||||
</span>
|
||||
|
||||
// Tooltip portal (shows on hover + focus, 400ms delay)
|
||||
<div id="tooltip-id"
|
||||
role="tooltip"
|
||||
className="fixed z-[9999] max-w-[400px] max-h-[300px] overflow-y-auto
|
||||
px-3 py-2 bg-surface-card border border-line
|
||||
rounded-lg shadow-2xl shadow-black/60 pointer-events-none">
|
||||
<div className="text-[11px] text-ink whitespace-pre-wrap break-words leading-relaxed">
|
||||
{text}
|
||||
</div>
|
||||
</div>
|
||||
```
|
||||
|
||||
**WCAG 1.4.13 compliance:** Escape key dismisses tooltip without moving pointer/focus.
|
||||
|
||||
### 4.6 Theme Switching
|
||||
|
||||
Use `useTheme()` hook from `theme-provider.tsx`:
|
||||
|
||||
```tsx
|
||||
import { useTheme } from "@/lib/theme-provider";
|
||||
|
||||
function ThemeToggle() {
|
||||
const { theme, resolvedTheme, setTheme } = useTheme();
|
||||
return (
|
||||
<select
|
||||
value={theme}
|
||||
onChange={(e) => setTheme(e.target.value as ThemePreference)}
|
||||
>
|
||||
<option value="system">System</option>
|
||||
<option value="light">Light</option>
|
||||
<option value="dark">Dark</option>
|
||||
</select>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
**Theme types:**
|
||||
```ts
|
||||
type ThemePreference = "system" | "light" | "dark";
|
||||
type ResolvedTheme = "light" | "dark";
|
||||
```
|
||||
|
||||
**Cookie:** `mol_theme` with `Domain=.moleculesai.app` — persists across surfaces.
|
||||
|
||||
---
|
||||
|
||||
## 5. Accessibility Rules (WCAG 2.1 AA) — VERIFIED
|
||||
|
||||
### 5.1 Focus Management ✅ VERIFIED
|
||||
- All interactive elements have `focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-2 focus-visible:ring-offset-zinc-950`
|
||||
- No `outline-none` without equivalent focus ring
|
||||
- Radix Dialog traps focus automatically
|
||||
|
||||
### 5.2 Semantic HTML ✅ VERIFIED
|
||||
- Buttons use `<button>` — verified in WorkspaceNode.tsx, ContextMenu.tsx
|
||||
- Form inputs have associated `<label>` patterns
|
||||
- Radix Dialog provides role="dialog" + aria-modal
|
||||
|
||||
### 5.3 ARIA ✅ VERIFIED
|
||||
- Icon-only buttons: `aria-label` with descriptive text (not "X")
|
||||
- Example: `aria-label="Extract ${name} from team"` in WorkspaceNode.tsx
|
||||
- Live regions: `aria-live="polite"` on Toast component
|
||||
- Modals: Radix provides `role="dialog"`, `aria-modal="true"`, `aria-labelledby`
|
||||
- Error messages: `aria-invalid="true"`, `aria-describedby` linking to error text
|
||||
- Tooltips: `role="tooltip"` + `aria-describedby` on trigger
|
||||
|
||||
### 5.4 Keyboard Navigation ✅ VERIFIED
|
||||
- ContextMenu: ArrowUp/Down wraps, Enter/Space selects, Escape closes, Tab closes
|
||||
- Modals: Escape closes (Radix), focus returns to trigger
|
||||
- `prefers-reduced-motion` ✅ (verified in globals.css)
|
||||
|
||||
### 5.5 Color Independence ✅
|
||||
- Status indicators use text labels + icons, not color alone
|
||||
- `STATUS_CONFIG` has text labels: "Online", "Offline", "Failed", etc.
|
||||
|
||||
---
|
||||
|
||||
## 6. React Flow Canvas Specifics
|
||||
|
||||
Canvas uses `@xyflow/react` (React Flow).
|
||||
|
||||
### Canvas Container ✅ VERIFIED
|
||||
```tsx
|
||||
// Canvas.tsx wraps ReactFlow with:
|
||||
<ReactFlow
|
||||
aria-label="Molecule AI workspace canvas"
|
||||
// ...
|
||||
/>
|
||||
```
|
||||
|
||||
### Node Accessibility ✅ VERIFIED
|
||||
- `role="button"` on workspace node cards
|
||||
- `tabIndex={0}` for keyboard focus
|
||||
- `aria-pressed` for selection state
|
||||
- `aria-label` with workspace name + status
|
||||
|
||||
### Skip Link ✅ VERIFIED
|
||||
```tsx
|
||||
<a href="#canvas-main">Skip to canvas</a>
|
||||
<main id="canvas-main" role="main">
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. Enforcement Checklist
|
||||
|
||||
### Color Token Rules
|
||||
- [x] No `bg-white` / `bg-zinc-50` for surfaces — use `bg-surface`
|
||||
- [x] No `text-zinc-50` / `text-zinc-100` for surfaces — use `text-ink`
|
||||
- [x] No `bg-zinc-900` / `bg-zinc-950` for surfaces — use `bg-surface` or `bg-surface-card`
|
||||
- [x] Raw zinc OK for: borders, disabled states, code, terminal surfaces
|
||||
|
||||
### Accessibility Rules
|
||||
- [x] All buttons have focus rings (verified in tests)
|
||||
- [x] All modals use Radix Dialog (verified)
|
||||
- [x] All tooltips use `role="tooltip"` + `aria-describedby` (verified)
|
||||
- [x] No `outline-none` without focus ring (verified)
|
||||
- [x] All inputs have visible labels (verified pattern)
|
||||
- [x] Contrast ratios at 4.5:1 minimum (verified above)
|
||||
- [x] `prefers-reduced-motion` suppresses all animations (verified in globals.css)
|
||||
- [x] Context menu has keyboard navigation (verified in ContextMenu.keyboard.test.tsx)
|
||||
- [x] Theme switching works: System/Light/Dark modes verified
|
||||
|
||||
---
|
||||
|
||||
## 8. Canvas Architecture (Verified)
|
||||
|
||||
**Stack:**
|
||||
- `@xyflow/react` v12 (React Flow) — node/edge rendering
|
||||
- Next.js 14 App Router
|
||||
- Tailwind v4 with CSS custom properties
|
||||
- Zustand for state management
|
||||
|
||||
**Directory Structure:**
|
||||
```
|
||||
canvas/src/
|
||||
├── components/ # Canvas.tsx, Toolbar.tsx, ContextMenu.tsx, SidePanel.tsx, WorkspaceNode.tsx, A2AEdge.tsx
|
||||
├── stores/ # secrets-store.ts (only store)
|
||||
├── hooks/ # useSocketEvent.ts, useTemplateDeploy.tsx, useWorkspaceName.ts
|
||||
├── lib/ # api.ts, auth.ts, canvas-actions.ts, design-tokens.ts, theme.ts, theme-provider.tsx
|
||||
└── app/ # Next.js App Router
|
||||
```
|
||||
|
||||
## 9. Known Issues (Technical Debt)
|
||||
|
||||
### Performance Issues
|
||||
- **secrets-store.ts getGrouped() selector** — Creates new objects every call (Object.fromEntries + arrays) — not memoized. Causes performance issues with frequent re-renders. Needs selector optimization.
|
||||
|
||||
### Code Quality
|
||||
- Check for `any` types in canvas/ directory
|
||||
- Verify pre-commit hook actually fails on 'use client' violations (unverified)
|
||||
- Verify all Zustand selectors avoid object creation (see getGrouped issue above)
|
||||
- Check 'use client' directive on hook-using components
|
||||
|
||||
### Testing
|
||||
- Add axe-core integration for automated accessibility testing
|
||||
- Visual regression tests — no screenshot tests exist yet (KI-006)
|
||||
- Target >80% test coverage on changed files
|
||||
|
||||
## 10. Remaining Open Items
|
||||
|
||||
### Accessibility Gaps
|
||||
1. **Screen reader announcements** — Node/edge changes not announced. Need `aria-live="polite"` region.
|
||||
2. **Keyboard shortcut help dialog** — No dedicated dialog. Shortcuts exist in `useKeyboardShortcuts.ts` but no `aria-describedby` hints on buttons.
|
||||
3. **Edge anchor accessibility** — React Flow handles purely visual. Need ARIA annotations for screen readers.
|
||||
4. **Drag-and-drop keyboard alternative** — Mouse only. Need keyboard equivalent for node rearrangement.
|
||||
|
||||
### Performance
|
||||
5. **secrets-store.ts getGrouped()** — Not memoized, creates new objects every call.
|
||||
@ -73,7 +73,7 @@ These are applied after CORS middleware on every response.
|
||||
|
||||
## 14. No Exposed Database Ports
|
||||
|
||||
Postgres and Redis must not expose host ports. They communicate exclusively over the internal Docker network (`molecule-core-net`). Use `docker compose exec` for direct access during development.
|
||||
Postgres and Redis must not expose host ports. They communicate exclusively over the internal Docker network (`molecule-monorepo-net`). Use `docker compose exec` for direct access during development.
|
||||
|
||||
## Related Docs
|
||||
|
||||
|
||||
@ -3,8 +3,8 @@
|
||||
**Date:** 2026-04-23
|
||||
**Severity:** High — every new SaaS tenant blocked
|
||||
**Detection path:** E2E Staging SaaS run 24848425822 failed at "tenant provisioning"; investigation of CP Railway logs surfaced the auth mismatch.
|
||||
**Status:** Fix pushed on [molecule-controlplane#238](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/238).
|
||||
**Related:** [issue #239](https://git.moleculesai.app/molecule-ai/molecule-controlplane/issues/239) (Cloudflare DNS record quota), [testing-strategy.md](../engineering/testing-strategy.md)
|
||||
**Status:** Fix pushed on [molecule-controlplane#238](https://github.com/Molecule-AI/molecule-controlplane/pull/238).
|
||||
**Related:** [issue #239](https://github.com/Molecule-AI/molecule-controlplane/issues/239) (Cloudflare DNS record quota), [testing-strategy.md](../engineering/testing-strategy.md)
|
||||
|
||||
## Summary
|
||||
|
||||
@ -35,7 +35,7 @@ The flow was:
|
||||
|
||||
### The commit that introduced the bug
|
||||
|
||||
[molecule-controlplane#235](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/235) — "fix(provision): wait for tenant boot-event before falling back to canary". Merged 2026-04-22.
|
||||
[molecule-controlplane#235](https://github.com/Molecule-AI/molecule-controlplane/pull/235) — "fix(provision): wait for tenant boot-event before falling back to canary". Merged 2026-04-22.
|
||||
|
||||
Before #235, readiness was determined via a canary probe through Cloudflare's edge — which didn't need CP-side auth, so the INSERT ordering didn't matter. #235 made boot-events the primary readiness signal but didn't move the INSERT earlier. The race was latent before but became load-bearing after.
|
||||
|
||||
@ -90,7 +90,7 @@ bootReady, _ := provisioner.WaitForTenantReady(ctx, h.db, org.ID, 4*time.Minute)
|
||||
h.db.ExecContext(ctx, `UPDATE org_instances SET status = 'running' WHERE org_id = $1`, org.ID)
|
||||
```
|
||||
|
||||
See [molecule-controlplane#238](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/238) for the full diff.
|
||||
See [molecule-controlplane#238](https://github.com/Molecule-AI/molecule-controlplane/pull/238) for the full diff.
|
||||
|
||||
## Lessons
|
||||
|
||||
@ -122,9 +122,9 @@ Early investigation blamed the hermes provider 401 bug (a separate, known issue
|
||||
|
||||
## Follow-ups
|
||||
|
||||
- [ ] Land [molecule-controlplane#238](https://git.moleculesai.app/molecule-ai/molecule-controlplane/pull/238)
|
||||
- [ ] Land [molecule-controlplane#238](https://github.com/Molecule-AI/molecule-controlplane/pull/238)
|
||||
- [ ] Redeploy staging-api, verify E2E goes green
|
||||
- [ ] Add CP integration test suite (see lesson #2)
|
||||
- [ ] Wire E2E failure → notification (see lesson #3)
|
||||
- [ ] Add invariant comment in `provisionTenant` (see lesson #4)
|
||||
- [ ] Cloudflare DNS quota cleanup — [molecule-controlplane#239](https://git.moleculesai.app/molecule-ai/molecule-controlplane/issues/239)
|
||||
- [ ] Cloudflare DNS quota cleanup — [molecule-controlplane#239](https://github.com/Molecule-AI/molecule-controlplane/issues/239)
|
||||
|
||||
@ -138,5 +138,5 @@ If you see any of these, don't try to "clean it up in place" — **cherry-pick o
|
||||
|
||||
## Related
|
||||
|
||||
- [Issue #1822](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1822) — backend parity drift tracker (example of docs that have to stay current)
|
||||
- [Issue #1822](https://github.com/Molecule-AI/molecule-core/issues/1822) — backend parity drift tracker (example of docs that have to stay current)
|
||||
- [Postmortem: CP boot-event 401](./postmortem-2026-04-23-boot-event-401.md) — caught before shipping because a reviewer could read the diff
|
||||
|
||||
@ -103,9 +103,9 @@ A bad test:
|
||||
|
||||
## Related
|
||||
|
||||
- [Issue #1821](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1821) — policy tracking issue
|
||||
- [Issue #1815](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1815) — Canvas coverage instrumentation
|
||||
- [Issue #1818](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1818) — Python pytest-cov
|
||||
- [Issue #1814](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1814) — workspace_provision_test.go unblock
|
||||
- [Issue #1816](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1816) — tokens.go coverage
|
||||
- [Issue #1819](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1819) — wsauth_middleware coverage
|
||||
- [Issue #1821](https://github.com/Molecule-AI/molecule-core/issues/1821) — policy tracking issue
|
||||
- [Issue #1815](https://github.com/Molecule-AI/molecule-core/issues/1815) — Canvas coverage instrumentation
|
||||
- [Issue #1818](https://github.com/Molecule-AI/molecule-core/issues/1818) — Python pytest-cov
|
||||
- [Issue #1814](https://github.com/Molecule-AI/molecule-core/issues/1814) — workspace_provision_test.go unblock
|
||||
- [Issue #1816](https://github.com/Molecule-AI/molecule-core/issues/1816) — tokens.go coverage
|
||||
- [Issue #1819](https://github.com/Molecule-AI/molecule-core/issues/1819) — wsauth_middleware coverage
|
||||
|
||||
@ -153,7 +153,7 @@ The `id` field is your workspace ID — remember it.
|
||||
|---|---|
|
||||
| "Failed to send message — agent may be unreachable" | The tenant couldn't POST to your URL. Verify `curl https://<your-tunnel>/health` returns 200 from another machine. |
|
||||
| Response takes > 30s | Canvas times out around 30s. Keep initial implementations simple. For long-running work, return a placeholder and use [polling mode](#next-step-polling-mode-preview) (once available). |
|
||||
| Agent duplicated in chat | Known canvas bug where WebSocket + HTTP responses both render. Fixed in [PR #1517](https://git.moleculesai.app/molecule-ai/molecule-core/pull/1517). |
|
||||
| Agent duplicated in chat | Known canvas bug where WebSocket + HTTP responses both render. Fixed in [PR #1517](https://github.com/Molecule-AI/molecule-core/pull/1517). |
|
||||
| Agent replies but canvas shows "Agent unreachable" | Check the tenant can reach your URL. Cloudflare quick tunnels rotate — the URL in your canvas may point at a dead tunnel after restart. |
|
||||
| Getting 404 when POSTing to tenant | Add `X-Molecule-Org-Id` header. The tenant's security layer 404s unmatched origin requests by design. |
|
||||
|
||||
@ -215,7 +215,7 @@ Push mode (this guide) works today but requires an inbound-reachable URL — whi
|
||||
|
||||
Your agent makes only outbound HTTPS calls to the platform, pulling messages from an inbox queue and posting replies back. Works behind any NAT/firewall, tolerates offline laptops, no tunnel needed.
|
||||
|
||||
See the [design doc](https://git.moleculesai.app/molecule-ai/internal/src/branch/main/product/external-workspaces-polling.md) (internal) and the implementation tracking issue (search `polling+mode` on the [molecule-core issue tracker](https://git.moleculesai.app/molecule-ai/molecule-core/issues)).
|
||||
See the [design doc](https://github.com/Molecule-AI/internal/blob/main/product/external-workspaces-polling.md) (internal) and [implementation tracking issue](https://github.com/Molecule-AI/molecule-core/issues?q=polling+mode) once opened.
|
||||
|
||||
---
|
||||
|
||||
@ -255,7 +255,7 @@ If all four pass and canvas still shows your agent as unreachable, see the [remo
|
||||
## Feedback
|
||||
|
||||
This is a new path. Tell us what broke:
|
||||
- Open an issue: https://git.moleculesai.app/molecule-ai/molecule-core/issues/new?labels=external-workspace
|
||||
- Open an issue: https://github.com/Molecule-AI/molecule-core/issues/new?labels=external-workspace
|
||||
- Join #external-workspaces on our Slack
|
||||
- Submit a PR improving this doc if something tripped you up — the faster we can make the quickstart, the more developers we bring in
|
||||
|
||||
|
||||
@ -143,5 +143,5 @@ The agent appears on the canvas with a **purple REMOTE badge** within seconds. F
|
||||
## Next Steps
|
||||
|
||||
- **[External Agent Registration Guide →](/docs/guides/external-agent-registration)** — full endpoint reference, Python + Node.js examples, troubleshooting
|
||||
- **[molecule-sdk-python →](https://git.moleculesai.app/molecule-ai/molecule-sdk-python)** — SDK source, `RemoteAgentClient` API docs
|
||||
- **[SDK Examples →](https://git.moleculesai.app/molecule-ai/molecule-sdk-python/src/branch/main/examples/remote-agent)** — `run.py` demo script, annotated walkthrough
|
||||
- **[molecule-sdk-python →](https://github.com/Molecule-AI/molecule-sdk-python)** — SDK source, `RemoteAgentClient` API docs
|
||||
- **[SDK Examples →](https://github.com/Molecule-AI/molecule-sdk-python/tree/main/examples/remote-agent)** — `run.py` demo script, annotated walkthrough
|
||||
|
||||
@ -61,7 +61,7 @@ molecule skills install arxiv-research --from community
|
||||
|
||||
Community skills are reviewed by the Molecule AI team before being
|
||||
listed. Submit a skill for review by opening a PR against
|
||||
[`molecule-ai/skills`](https://git.moleculesai.app/molecule-ai/skills).
|
||||
[`molecule-ai/skills`](https://github.com/Molecule-AI/skills).
|
||||
|
||||
## Installing via config.yaml
|
||||
|
||||
@ -151,7 +151,7 @@ molecule skills bundle my-custom-skill --output ./org-templates/my-role/
|
||||
```
|
||||
|
||||
**Publishing to the community:** Open a PR against
|
||||
[`molecule-ai/skills`](https://git.moleculesai.app/molecule-ai/skills) with a
|
||||
[`molecule-ai/skills`](https://github.com/Molecule-AI/skills) with a
|
||||
complete skill package. Community skills are reviewed for security and
|
||||
correctness before listing.
|
||||
|
||||
|
||||
@ -58,11 +58,8 @@ green — proves wire shape end-to-end against a real `hermes gateway run`
|
||||
subprocess + stub OpenAI-compat LLM. Caught + fixed a real `KeyError`
|
||||
in upstream `hermes_cli/tools_config.py` (PLATFORMS dict lookup
|
||||
crashed on plugin platforms) — fix on the patched fork branch
|
||||
(`molecule-ai/hermes-agent` `feat/platform-adapter-plugins`, commit
|
||||
`18e4849e`, hosted on Gitea at
|
||||
`https://git.moleculesai.app/molecule-ai/hermes-agent` — moved from the
|
||||
suspended `github.com/HongmingWang-Rabbit/hermes-agent`, see
|
||||
`molecule-ai/internal#72`). Upstream PR #18775 OPEN; CONFLICTING with main.
|
||||
(`HongmingWang-Rabbit/hermes-agent` `feat/platform-adapter-plugins`,
|
||||
commit `18e4849e`). Upstream PR #18775 OPEN; CONFLICTING with main.
|
||||
Not on critical path for our platform — patched fork is what the
|
||||
workspace image installs.
|
||||
|
||||
@ -99,10 +96,10 @@ fork needed in production.
|
||||
`resolve_platform_id` for plugin-platform-safe deserialization, and
|
||||
`self.adapters[adapter.platform]` keying fix (caught by real-subprocess
|
||||
test before merge — see below).
|
||||
- **Plugin package**: [Molecule-AI/hermes-platform-molecule-a2a](https://git.moleculesai.app/molecule-ai/hermes-platform-molecule-a2a)
|
||||
- **Plugin package**: [Molecule-AI/hermes-platform-molecule-a2a](https://github.com/Molecule-AI/hermes-platform-molecule-a2a)
|
||||
v0.1.0 — public, MIT-licensed. 11 unit tests + 8 in-process E2E
|
||||
+ 4 real-subprocess E2E checkpoints all green.
|
||||
- **Workspace template patch**: [Molecule-AI/molecule-ai-workspace-template-hermes#32](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-hermes/pull/32)
|
||||
- **Workspace template patch**: [Molecule-AI/molecule-ai-workspace-template-hermes#32](https://github.com/Molecule-AI/molecule-ai-workspace-template-hermes/pull/32)
|
||||
— Dockerfile installs the patched fork + plugin into the hermes
|
||||
installer's venv; start.sh seeds `platforms.molecule-a2a` config
|
||||
stanza. Pre-demo deliberately install-only; adapter.py rewrite to
|
||||
@ -157,9 +154,9 @@ intermediate shim earns its complexity.
|
||||
## Codex (OpenAI Codex CLI)
|
||||
|
||||
**Status:** Template SHIPPED. Repo live at
|
||||
[`Molecule-AI/molecule-ai-workspace-template-codex`](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-codex)
|
||||
[`Molecule-AI/molecule-ai-workspace-template-codex`](https://github.com/Molecule-AI/molecule-ai-workspace-template-codex)
|
||||
(14 files, 1411 LOC, 12/12 tests). molecule-core registration in
|
||||
[PR #2512](https://git.moleculesai.app/molecule-ai/molecule-core/pull/2512).
|
||||
[PR #2512](https://github.com/Molecule-AI/molecule-core/pull/2512).
|
||||
E2E with real A2A traffic remains.
|
||||
|
||||
**Path:** Persistent `codex app-server` stdio JSON-RPC client
|
||||
|
||||
@ -101,7 +101,7 @@ incident-shaped.
|
||||
## [v1.0.0] — initial release (RFC #2728, PRs #2729-#2742)
|
||||
|
||||
Initial plugin contract + 11-PR rollout. See
|
||||
[issue #2728](https://git.moleculesai.app/molecule-ai/molecule-core/issues/2728)
|
||||
[issue #2728](https://github.com/Molecule-AI/molecule-core/issues/2728)
|
||||
for the full RFC.
|
||||
|
||||
Endpoints: `/v1/health`, `/v1/namespaces/{name}` (PUT/PATCH/DELETE),
|
||||
|
||||
@ -160,11 +160,11 @@ not expose.
|
||||
| `molecule-skill-update-docs` | `[claude_code]` | `[claude_code, hermes]` |
|
||||
|
||||
Companion PRs:
|
||||
- [molecule-ai-plugin-ecc#2](https://git.moleculesai.app/molecule-ai/molecule-ai-plugin-ecc/pull/2)
|
||||
- [molecule-ai-plugin-superpowers#2](https://git.moleculesai.app/molecule-ai/molecule-ai-plugin-superpowers/pull/2)
|
||||
- [molecule-ai-plugin-molecule-dev#2](https://git.moleculesai.app/molecule-ai/molecule-ai-plugin-molecule-dev/pull/2)
|
||||
- [molecule-ai-plugin-molecule-skill-cron-learnings#2](https://git.moleculesai.app/molecule-ai/molecule-ai-plugin-molecule-skill-cron-learnings/pull/2)
|
||||
- [molecule-ai-plugin-molecule-skill-update-docs#2](https://git.moleculesai.app/molecule-ai/molecule-ai-plugin-molecule-skill-update-docs/pull/2)
|
||||
- [molecule-ai-plugin-ecc#2](https://github.com/Molecule-AI/molecule-ai-plugin-ecc/pull/2)
|
||||
- [molecule-ai-plugin-superpowers#2](https://github.com/Molecule-AI/molecule-ai-plugin-superpowers/pull/2)
|
||||
- [molecule-ai-plugin-molecule-dev#2](https://github.com/Molecule-AI/molecule-ai-plugin-molecule-dev/pull/2)
|
||||
- [molecule-ai-plugin-molecule-skill-cron-learnings#2](https://github.com/Molecule-AI/molecule-ai-plugin-molecule-skill-cron-learnings/pull/2)
|
||||
- [molecule-ai-plugin-molecule-skill-update-docs#2](https://github.com/Molecule-AI/molecule-ai-plugin-molecule-skill-update-docs/pull/2)
|
||||
|
||||
Security note: Security Auditor was offline at time of change. Self-assessed
|
||||
as non-security-impacting — adding `hermes` to a string list in `plugin.yaml`
|
||||
|
||||
@ -17,7 +17,7 @@ This path is aligned to the current repository and current UI. It gets you from
|
||||
## The one-command path
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-monorepo.git
|
||||
git clone https://github.com/Molecule-AI/molecule-monorepo.git
|
||||
cd molecule-monorepo
|
||||
./scripts/dev-start.sh
|
||||
```
|
||||
@ -42,7 +42,7 @@ If you'd rather run each component yourself — useful when you're iterating on
|
||||
### Step 1: Clone the repository
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-monorepo.git
|
||||
git clone https://github.com/Molecule-AI/molecule-monorepo.git
|
||||
cd molecule-monorepo
|
||||
```
|
||||
|
||||
|
||||
@ -1,137 +0,0 @@
|
||||
# Runbook — Handlers Postgres Integration port-collision substrate
|
||||
|
||||
**Status:** Resolved 2026-05-08 (PR for class B Hongming-owned CICD red sweep).
|
||||
|
||||
## Symptom
|
||||
|
||||
`Handlers Postgres Integration` workflow fails on staging push and PRs.
|
||||
Step `Apply migrations to Postgres service` shows:
|
||||
|
||||
```
|
||||
psql: error: connection to server at "127.0.0.1", port 5432 failed: Connection refused
|
||||
```
|
||||
|
||||
Job-cleanup step further down logs:
|
||||
|
||||
```
|
||||
Cleaning up services for job Handlers Postgres Integration
|
||||
failed to remove container: Error response from daemon: No such container: <id>
|
||||
```
|
||||
|
||||
…confirming the postgres service container was already gone before
|
||||
cleanup ran.
|
||||
|
||||
## Root cause
|
||||
|
||||
Our Gitea act_runner (operator host `5.78.80.188`,
|
||||
`/opt/molecule/runners/config.yaml`) sets:
|
||||
|
||||
```yaml
|
||||
container:
|
||||
network: host
|
||||
```
|
||||
|
||||
…which act_runner applies to BOTH the job container AND every
|
||||
`services:` container in a workflow. Multiple workflow instances
|
||||
running concurrently across the 16 parallel runners each try to bind
|
||||
postgres on `0.0.0.0:5432`. The first wins; subsequent instances exit
|
||||
immediately with:
|
||||
|
||||
```
|
||||
LOG: could not bind IPv4 address "0.0.0.0": Address in use
|
||||
HINT: Is another postmaster already running on port 5432?
|
||||
FATAL: could not create any TCP/IP sockets
|
||||
```
|
||||
|
||||
act_runner sets `AutoRemove:true` on service containers, so Docker
|
||||
garbage-collects them as soon as they exit. By the time the migrations
|
||||
step runs `pg_isready` / `psql`, the container is gone and connection
|
||||
refused.
|
||||
|
||||
Reproduction (operator host):
|
||||
|
||||
```bash
|
||||
docker run --rm -d --name pg-A --network host \
|
||||
-e POSTGRES_PASSWORD=test postgres:15-alpine
|
||||
docker run -d --name pg-B --network host \
|
||||
-e POSTGRES_PASSWORD=test postgres:15-alpine
|
||||
docker logs pg-B # FATAL: could not create any TCP/IP sockets
|
||||
```
|
||||
|
||||
## Why per-job override doesn't work
|
||||
|
||||
The natural fix — per-job `container.network` override — is silently
|
||||
ignored by act_runner. The runner log emits:
|
||||
|
||||
```
|
||||
--network and --net in the options will be ignored.
|
||||
```
|
||||
|
||||
This is a documented act_runner constraint: container network is a
|
||||
runner-wide setting, not per-job. Source: gitea/act_runner config docs
|
||||
+ vegardit/docker-gitea-act-runner issue #7.
|
||||
|
||||
Flipping the global `container.network` to `bridge` would break every
|
||||
other workflow in the repo (cache server discovery,
|
||||
`molecule-core-net` peer access during integration tests, etc.) —
|
||||
unacceptable blast radius for a per-test bug.
|
||||
|
||||
## Fix shape
|
||||
|
||||
`handlers-postgres-integration.yml` no longer uses `services: postgres:`.
|
||||
It launches a sibling postgres container manually on the existing
|
||||
`molecule-core-net` bridge network with a per-run unique name:
|
||||
|
||||
```yaml
|
||||
env:
|
||||
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
PG_NETWORK: molecule-core-net
|
||||
|
||||
steps:
|
||||
- name: Start sibling Postgres on bridge network
|
||||
run: |
|
||||
docker run -d --name "${PG_NAME}" --network "${PG_NETWORK}" \
|
||||
...
|
||||
postgres:15-alpine
|
||||
PG_HOST=$(docker inspect "${PG_NAME}" \
|
||||
--format "{{(index .NetworkSettings.Networks \"${PG_NETWORK}\").IPAddress}}")
|
||||
echo "PG_HOST=${PG_HOST}" >> "$GITHUB_ENV"
|
||||
|
||||
# … migrations + tests use ${PG_HOST}, not 127.0.0.1 …
|
||||
|
||||
- if: always() && …
|
||||
name: Stop sibling Postgres
|
||||
run: docker rm -f "${PG_NAME}" || true
|
||||
```
|
||||
|
||||
The host-net job container can reach a bridge-net container via the
|
||||
bridge IP directly (verified manually, 2026-05-08). Two parallel runs
|
||||
use different names + different bridge IPs — no collision.
|
||||
|
||||
## Future-proofing
|
||||
|
||||
Other workflows that hit the same shape (any `services:` with a
|
||||
fixed-port image) will exhibit the same failure mode under
|
||||
host-network runner config. Translate using this same pattern:
|
||||
|
||||
1. Drop the `services:` block.
|
||||
2. Use `${{ github.run_id }}-${{ github.run_attempt }}` for unique
|
||||
container name.
|
||||
3. Launch on `molecule-core-net` (already trusted bridge in
|
||||
`docker-compose.infra.yml`).
|
||||
4. Read back the bridge IP via `docker inspect` and export as a step env.
|
||||
5. `if: always()` cleanup step at the end.
|
||||
|
||||
If the count of such workflows grows, factor into a composite action
|
||||
(`./.github/actions/sibling-postgres`) so the substrate logic lives
|
||||
in one place.
|
||||
|
||||
## Related
|
||||
|
||||
- Issue #88 (closed by #92): localhost → 127.0.0.1 fix that unmasked
|
||||
this collision; the IPv6 fix is correct, port collision is the new
|
||||
layer.
|
||||
- Issue #94 created `molecule-core-net` + `alpine:latest` as
|
||||
prereqs.
|
||||
- Saved memory `feedback_act_runner_github_server_url` documents
|
||||
another act_runner-vs-GHA divergence (server URL).
|
||||
@ -198,7 +198,7 @@ Lighthouse audit against staging.yourapp.com:
|
||||
FCP: 2.4s | LCP: 5.2s | CLS: 0.18 | TBT: 620ms
|
||||
|
||||
Performance regression detected — opening GitHub issue.
|
||||
Issue: https://git.moleculesai.app/molecule-ai/molecule-core/issues/1527
|
||||
Issue: https://github.com/Molecule-AI/molecule-core/issues/1527
|
||||
Label: performance-regression | Assignees: @your-team
|
||||
```
|
||||
|
||||
|
||||
@ -85,8 +85,8 @@ Fly Machines start in milliseconds and run in 35+ regions. Provisioning agent wo
|
||||
|
||||
## Related
|
||||
|
||||
- PR #501: [feat(platform): Fly Machines provisioner](https://git.moleculesai.app/molecule-ai/molecule-core/pull/501)
|
||||
- PR #481: [feat(ci): deploy to Fly after image push](https://git.moleculesai.app/molecule-ai/molecule-core/pull/481)
|
||||
- PR #501: [feat(platform): Fly Machines provisioner](https://github.com/Molecule-AI/molecule-core/pull/501)
|
||||
- PR #481: [feat(ci): deploy to Fly after image push](https://github.com/Molecule-AI/molecule-core/pull/481)
|
||||
- [Fly Machines API docs](https://fly.io/docs/machines/api/)
|
||||
- [Platform API reference](../api-reference.md)
|
||||
- Issue [#525](https://git.moleculesai.app/molecule-ai/molecule-core/issues/525)
|
||||
- Issue [#525](https://github.com/Molecule-AI/molecule-core/issues/525)
|
||||
|
||||
@ -61,6 +61,6 @@ The real power surfaces when you mix runtimes on the same Molecule AI tenant. Yo
|
||||
|
||||
## Related
|
||||
|
||||
- PR #379: [feat(adapters): add gemini-cli runtime adapter](https://git.moleculesai.app/molecule-ai/molecule-core/pull/379)
|
||||
- PR #379: [feat(adapters): add gemini-cli runtime adapter](https://github.com/Molecule-AI/molecule-core/pull/379)
|
||||
- [Multi-provider Hermes docs](../architecture/hermes.md)
|
||||
- [Workspace runtimes reference](../reference/runtimes.md)
|
||||
|
||||
@ -68,7 +68,7 @@ ADK workspaces participate in the same A2A network as Claude Code, Gemini CLI, H
|
||||
|
||||
## Related
|
||||
|
||||
- PR #550: [feat(adapters): add google-adk runtime adapter](https://git.moleculesai.app/molecule-ai/molecule-core/pull/550)
|
||||
- PR #550: [feat(adapters): add google-adk runtime adapter](https://github.com/Molecule-AI/molecule-core/pull/550)
|
||||
- [Google ADK (adk-python)](https://github.com/google/adk-python)
|
||||
- [Gemini CLI runtime tutorial](./gemini-cli-runtime.md)
|
||||
- [Platform API reference](../api-reference.md)
|
||||
|
||||
@ -176,9 +176,9 @@ What is on the roadmap for Phase 2d (not yet shipped):
|
||||
|
||||
## Related
|
||||
|
||||
- PR #240: [Phase 2a — native Anthropic dispatch](https://git.moleculesai.app/molecule-ai/molecule-core/pull/240)
|
||||
- PR #255: [Phase 2b — native Gemini dispatch](https://git.moleculesai.app/molecule-ai/molecule-core/pull/255)
|
||||
- PR #267: [Phase 2c — multi-turn history on all paths](https://git.moleculesai.app/molecule-ai/molecule-core/pull/267)
|
||||
- PR #240: [Phase 2a — native Anthropic dispatch](https://github.com/Molecule-AI/molecule-core/pull/240)
|
||||
- PR #255: [Phase 2b — native Gemini dispatch](https://github.com/Molecule-AI/molecule-core/pull/255)
|
||||
- PR #267: [Phase 2c — multi-turn history on all paths](https://github.com/Molecule-AI/molecule-core/pull/267)
|
||||
- [Hermes adapter design](../adapters/hermes-adapter-design.md)
|
||||
- [Platform API reference](../api-reference.md)
|
||||
- Issue [#513](https://git.moleculesai.app/molecule-ai/molecule-core/issues/513)
|
||||
- Issue [#513](https://github.com/Molecule-AI/molecule-core/issues/513)
|
||||
|
||||
@ -90,6 +90,6 @@ Molecule AI canvas without code changes.
|
||||
|
||||
## Related
|
||||
|
||||
- PR #480: [feat(channels): Lark / Feishu channel adapter](https://git.moleculesai.app/molecule-ai/molecule-core/pull/480)
|
||||
- PR #480: [feat(channels): Lark / Feishu channel adapter](https://github.com/Molecule-AI/molecule-core/pull/480)
|
||||
- [Social channels architecture](../agent-runtime/social-channels.md)
|
||||
- [Channel adapter reference](../api-reference.md#channels)
|
||||
@ -98,14 +98,14 @@ Each of the 8 adapter template repos contains:
|
||||
|
||||
| Adapter | Repo |
|
||||
|---------|------|
|
||||
| claude-code | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-claude-code |
|
||||
| langgraph | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-langgraph |
|
||||
| crewai | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-crewai |
|
||||
| autogen | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-autogen |
|
||||
| deepagents | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-deepagents |
|
||||
| hermes | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-hermes |
|
||||
| gemini-cli | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-gemini-cli |
|
||||
| openclaw | https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-openclaw |
|
||||
| claude-code | https://github.com/Molecule-AI/molecule-ai-workspace-template-claude-code |
|
||||
| langgraph | https://github.com/Molecule-AI/molecule-ai-workspace-template-langgraph |
|
||||
| crewai | https://github.com/Molecule-AI/molecule-ai-workspace-template-crewai |
|
||||
| autogen | https://github.com/Molecule-AI/molecule-ai-workspace-template-autogen |
|
||||
| deepagents | https://github.com/Molecule-AI/molecule-ai-workspace-template-deepagents |
|
||||
| hermes | https://github.com/Molecule-AI/molecule-ai-workspace-template-hermes |
|
||||
| gemini-cli | https://github.com/Molecule-AI/molecule-ai-workspace-template-gemini-cli |
|
||||
| openclaw | https://github.com/Molecule-AI/molecule-ai-workspace-template-openclaw |
|
||||
|
||||
## Adapter discovery (ADAPTER_MODULE)
|
||||
|
||||
@ -244,7 +244,7 @@ correctness before pushing a `runtime-v*` tag.
|
||||
## Writing a new adapter
|
||||
|
||||
Use the GitHub template repo
|
||||
[`molecule-ai/molecule-ai-workspace-template-starter`](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-starter) (note: the starter repo did not survive the 2026-05-06 GitHub-org-suspension migration; recreation tracked at internal#41)
|
||||
[`Molecule-AI/molecule-ai-workspace-template-starter`](https://github.com/Molecule-AI/molecule-ai-workspace-template-starter)
|
||||
— it ships with the canonical Dockerfile + adapter.py skeleton + config.yaml
|
||||
schema + the `repository_dispatch: [runtime-published]` cascade receiver
|
||||
already wired up. No follow-up setup PR required.
|
||||
@ -256,7 +256,7 @@ gh repo create Molecule-AI/molecule-ai-workspace-template-<runtime> \
|
||||
--public \
|
||||
--description "Molecule AI workspace template: <runtime>"
|
||||
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-<runtime>.git
|
||||
git clone https://github.com/Molecule-AI/molecule-ai-workspace-template-<runtime>
|
||||
cd molecule-ai-workspace-template-<runtime>
|
||||
```
|
||||
|
||||
@ -286,7 +286,7 @@ After `git push`:
|
||||
If the canonical shape changes (e.g. `config.yaml` schema gets a new field,
|
||||
the `BaseAdapter` interface adds a method, the reusable CI workflow
|
||||
signature changes), update the
|
||||
[starter](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-template-starter) (recreation pending — see note above)
|
||||
[starter](https://github.com/Molecule-AI/molecule-ai-workspace-template-starter)
|
||||
**first**. Existing templates can either migrate at their own pace or be
|
||||
touched in a coordinated cleanup PR. Either way, future templates pick up
|
||||
the new shape from day one.
|
||||
|
||||
@ -5,7 +5,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
echo "==> Ensuring shared docker network exists..."
|
||||
docker network create molecule-core-net 2>/dev/null || true
|
||||
docker network create molecule-monorepo-net 2>/dev/null || true
|
||||
|
||||
# Populate the template / plugin registry.
|
||||
# workspace-configs-templates/, org-templates/, and plugins/ are intentionally
|
||||
|
||||
@ -1,46 +1,47 @@
|
||||
{
|
||||
"_comment": "OSS surface registry — every repo listed here MUST be public on git.moleculesai.app. Layer-3 customer/private templates are NOT registered here; they are handled at provision-time via the per-tenant credential resolver (see internal#102 RFC). 'main' refs are pinned to tags before broad rollout.",
|
||||
"_comment": "Pin refs to release tags for reproducible builds. 'main' is OK while all repos are internal.",
|
||||
"version": 1,
|
||||
"plugins": [
|
||||
{"name": "browser-automation", "repo": "molecule-ai/molecule-ai-plugin-browser-automation", "ref": "main"},
|
||||
{"name": "ecc", "repo": "molecule-ai/molecule-ai-plugin-ecc", "ref": "main"},
|
||||
{"name": "gh-identity", "repo": "molecule-ai/molecule-ai-plugin-gh-identity", "ref": "main"},
|
||||
{"name": "molecule-audit", "repo": "molecule-ai/molecule-ai-plugin-molecule-audit", "ref": "main"},
|
||||
{"name": "molecule-audit-trail", "repo": "molecule-ai/molecule-ai-plugin-molecule-audit-trail", "ref": "main"},
|
||||
{"name": "molecule-careful-bash", "repo": "molecule-ai/molecule-ai-plugin-molecule-careful-bash", "ref": "main"},
|
||||
{"name": "molecule-compliance", "repo": "molecule-ai/molecule-ai-plugin-molecule-compliance", "ref": "main"},
|
||||
{"name": "molecule-dev", "repo": "molecule-ai/molecule-ai-plugin-molecule-dev", "ref": "main"},
|
||||
{"name": "molecule-freeze-scope", "repo": "molecule-ai/molecule-ai-plugin-molecule-freeze-scope", "ref": "main"},
|
||||
{"name": "molecule-hitl", "repo": "molecule-ai/molecule-ai-plugin-molecule-hitl", "ref": "main"},
|
||||
{"name": "molecule-prompt-watchdog", "repo": "molecule-ai/molecule-ai-plugin-molecule-prompt-watchdog", "ref": "main"},
|
||||
{"name": "molecule-security-scan", "repo": "molecule-ai/molecule-ai-plugin-molecule-security-scan", "ref": "main"},
|
||||
{"name": "molecule-session-context", "repo": "molecule-ai/molecule-ai-plugin-molecule-session-context", "ref": "main"},
|
||||
{"name": "molecule-skill-code-review", "repo": "molecule-ai/molecule-ai-plugin-molecule-skill-code-review", "ref": "main"},
|
||||
{"name": "molecule-skill-cron-learnings", "repo": "molecule-ai/molecule-ai-plugin-molecule-skill-cron-learnings", "ref": "main"},
|
||||
{"name": "molecule-skill-cross-vendor-review", "repo": "molecule-ai/molecule-ai-plugin-molecule-skill-cross-vendor-review", "ref": "main"},
|
||||
{"name": "molecule-skill-llm-judge", "repo": "molecule-ai/molecule-ai-plugin-molecule-skill-llm-judge", "ref": "main"},
|
||||
{"name": "molecule-skill-update-docs", "repo": "molecule-ai/molecule-ai-plugin-molecule-skill-update-docs", "ref": "main"},
|
||||
{"name": "molecule-workflow-retro", "repo": "molecule-ai/molecule-ai-plugin-molecule-workflow-retro", "ref": "main"},
|
||||
{"name": "molecule-workflow-triage", "repo": "molecule-ai/molecule-ai-plugin-molecule-workflow-triage", "ref": "main"},
|
||||
{"name": "superpowers", "repo": "molecule-ai/molecule-ai-plugin-superpowers", "ref": "main"}
|
||||
{"name": "browser-automation", "repo": "Molecule-AI/molecule-ai-plugin-browser-automation", "ref": "main"},
|
||||
{"name": "ecc", "repo": "Molecule-AI/molecule-ai-plugin-ecc", "ref": "main"},
|
||||
{"name": "gh-identity", "repo": "Molecule-AI/molecule-ai-plugin-gh-identity", "ref": "main"},
|
||||
{"name": "molecule-audit", "repo": "Molecule-AI/molecule-ai-plugin-molecule-audit", "ref": "main"},
|
||||
{"name": "molecule-audit-trail", "repo": "Molecule-AI/molecule-ai-plugin-molecule-audit-trail", "ref": "main"},
|
||||
{"name": "molecule-careful-bash", "repo": "Molecule-AI/molecule-ai-plugin-molecule-careful-bash", "ref": "main"},
|
||||
{"name": "molecule-compliance", "repo": "Molecule-AI/molecule-ai-plugin-molecule-compliance", "ref": "main"},
|
||||
{"name": "molecule-dev", "repo": "Molecule-AI/molecule-ai-plugin-molecule-dev", "ref": "main"},
|
||||
{"name": "molecule-freeze-scope", "repo": "Molecule-AI/molecule-ai-plugin-molecule-freeze-scope", "ref": "main"},
|
||||
{"name": "molecule-hitl", "repo": "Molecule-AI/molecule-ai-plugin-molecule-hitl", "ref": "main"},
|
||||
{"name": "molecule-prompt-watchdog", "repo": "Molecule-AI/molecule-ai-plugin-molecule-prompt-watchdog", "ref": "main"},
|
||||
{"name": "molecule-security-scan", "repo": "Molecule-AI/molecule-ai-plugin-molecule-security-scan", "ref": "main"},
|
||||
{"name": "molecule-session-context", "repo": "Molecule-AI/molecule-ai-plugin-molecule-session-context", "ref": "main"},
|
||||
{"name": "molecule-skill-code-review", "repo": "Molecule-AI/molecule-ai-plugin-molecule-skill-code-review", "ref": "main"},
|
||||
{"name": "molecule-skill-cron-learnings", "repo": "Molecule-AI/molecule-ai-plugin-molecule-skill-cron-learnings", "ref": "main"},
|
||||
{"name": "molecule-skill-cross-vendor-review", "repo": "Molecule-AI/molecule-ai-plugin-molecule-skill-cross-vendor-review", "ref": "main"},
|
||||
{"name": "molecule-skill-llm-judge", "repo": "Molecule-AI/molecule-ai-plugin-molecule-skill-llm-judge", "ref": "main"},
|
||||
{"name": "molecule-skill-update-docs", "repo": "Molecule-AI/molecule-ai-plugin-molecule-skill-update-docs", "ref": "main"},
|
||||
{"name": "molecule-workflow-retro", "repo": "Molecule-AI/molecule-ai-plugin-molecule-workflow-retro", "ref": "main"},
|
||||
{"name": "molecule-workflow-triage", "repo": "Molecule-AI/molecule-ai-plugin-molecule-workflow-triage", "ref": "main"},
|
||||
{"name": "superpowers", "repo": "Molecule-AI/molecule-ai-plugin-superpowers", "ref": "main"}
|
||||
],
|
||||
"workspace_templates": [
|
||||
{"name": "claude-code-default", "repo": "molecule-ai/molecule-ai-workspace-template-claude-code", "ref": "main"},
|
||||
{"name": "hermes", "repo": "molecule-ai/molecule-ai-workspace-template-hermes", "ref": "main"},
|
||||
{"name": "openclaw", "repo": "molecule-ai/molecule-ai-workspace-template-openclaw", "ref": "main"},
|
||||
{"name": "codex", "repo": "molecule-ai/molecule-ai-workspace-template-codex", "ref": "main"},
|
||||
{"name": "langgraph", "repo": "molecule-ai/molecule-ai-workspace-template-langgraph", "ref": "main"},
|
||||
{"name": "crewai", "repo": "molecule-ai/molecule-ai-workspace-template-crewai", "ref": "main"},
|
||||
{"name": "autogen", "repo": "molecule-ai/molecule-ai-workspace-template-autogen", "ref": "main"},
|
||||
{"name": "deepagents", "repo": "molecule-ai/molecule-ai-workspace-template-deepagents", "ref": "main"},
|
||||
{"name": "gemini-cli", "repo": "molecule-ai/molecule-ai-workspace-template-gemini-cli", "ref": "main"}
|
||||
{"name": "claude-code-default", "repo": "Molecule-AI/molecule-ai-workspace-template-claude-code", "ref": "main"},
|
||||
{"name": "hermes", "repo": "Molecule-AI/molecule-ai-workspace-template-hermes", "ref": "main"},
|
||||
{"name": "openclaw", "repo": "Molecule-AI/molecule-ai-workspace-template-openclaw", "ref": "main"},
|
||||
{"name": "codex", "repo": "Molecule-AI/molecule-ai-workspace-template-codex", "ref": "main"},
|
||||
{"name": "langgraph", "repo": "Molecule-AI/molecule-ai-workspace-template-langgraph", "ref": "main"},
|
||||
{"name": "crewai", "repo": "Molecule-AI/molecule-ai-workspace-template-crewai", "ref": "main"},
|
||||
{"name": "autogen", "repo": "Molecule-AI/molecule-ai-workspace-template-autogen", "ref": "main"},
|
||||
{"name": "deepagents", "repo": "Molecule-AI/molecule-ai-workspace-template-deepagents", "ref": "main"},
|
||||
{"name": "gemini-cli", "repo": "Molecule-AI/molecule-ai-workspace-template-gemini-cli", "ref": "main"}
|
||||
],
|
||||
"org_templates": [
|
||||
{"name": "molecule-dev", "repo": "molecule-ai/molecule-ai-org-template-molecule-dev", "ref": "main"},
|
||||
{"name": "free-beats-all", "repo": "molecule-ai/molecule-ai-org-template-free-beats-all", "ref": "main"},
|
||||
{"name": "medo-smoke", "repo": "molecule-ai/molecule-ai-org-template-medo-smoke", "ref": "main"},
|
||||
{"name": "molecule-worker-gemini", "repo": "molecule-ai/molecule-ai-org-template-molecule-worker-gemini", "ref": "main"},
|
||||
{"name": "ux-ab-lab", "repo": "molecule-ai/molecule-ai-org-template-ux-ab-lab", "ref": "main"},
|
||||
{"name": "mock-bigorg", "repo": "molecule-ai/molecule-ai-org-template-mock-bigorg", "ref": "main"}
|
||||
{"name": "molecule-dev", "repo": "Molecule-AI/molecule-ai-org-template-molecule-dev", "ref": "main"},
|
||||
{"name": "free-beats-all", "repo": "Molecule-AI/molecule-ai-org-template-free-beats-all", "ref": "main"},
|
||||
{"name": "medo-smoke", "repo": "Molecule-AI/molecule-ai-org-template-medo-smoke", "ref": "main"},
|
||||
{"name": "molecule-worker-gemini", "repo": "Molecule-AI/molecule-ai-org-template-molecule-worker-gemini", "ref": "main"},
|
||||
{"name": "reno-stars", "repo": "Molecule-AI/molecule-ai-org-template-reno-stars", "ref": "main"},
|
||||
{"name": "ux-ab-lab", "repo": "Molecule-AI/molecule-ai-org-template-ux-ab-lab", "ref": "main"},
|
||||
{"name": "mock-bigorg", "repo": "Molecule-AI/molecule-ai-org-template-mock-bigorg", "ref": "main"}
|
||||
]
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ There are three related scripts; pick the right one:
|
||||
|---|---|---|
|
||||
| `measure-coordinator-task-bounds.sh` | **Canonical** v1 harness for the RFC #2251 / Issue 4 reproduction. Provisions a PM coordinator + Researcher child via `claude-code-default` + `langgraph` templates, sends a synthesis-heavy A2A kickoff, observes elapsed time + activity trace. | OSS-shape platform — localhost or any `/workspaces`-shaped endpoint. Has tenant/admin-token guards for non-localhost runs. |
|
||||
| `measure-coordinator-task-bounds-runner.sh` | Generalised runner for the same measurement contract but with **arbitrary template + secret + model combinations** (Hermes/MiniMax, etc.). Useful for cross-runtime variants without modifying the canonical harness. | Same as above (local or SaaS via `MODE=saas`). |
|
||||
| `measure-coordinator-task-bounds.sh` (in [molecule-controlplane](https://git.moleculesai.app/molecule-ai/molecule-controlplane)) | **Production-shape** variant that bootstraps a real staging tenant via `POST /cp/admin/orgs`, then runs the same measurement against `<slug>.staging.moleculesai.app`. | Staging controlplane only — refuses to run against production. |
|
||||
| `measure-coordinator-task-bounds.sh` (in [molecule-controlplane](https://github.com/Molecule-AI/molecule-controlplane)) | **Production-shape** variant that bootstraps a real staging tenant via `POST /cp/admin/orgs`, then runs the same measurement against `<slug>.staging.moleculesai.app`. | Staging controlplane only — refuses to run against production. |
|
||||
|
||||
See `reference_harness_pair_pattern` (auto-memory) for when to use which
|
||||
and the cross-repo design rationale.
|
||||
|
||||
@ -278,7 +278,7 @@ include = ["molecule_runtime*"]
|
||||
README_TEMPLATE = """\
|
||||
# molecule-ai-workspace-runtime
|
||||
|
||||
Shared workspace runtime for [Molecule AI](https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
Shared workspace runtime for [Molecule AI](https://github.com/Molecule-AI/molecule-core)
|
||||
agent adapters. Installed by every workspace template image
|
||||
(`workspace-template-claude-code`, `-langgraph`, `-hermes`, etc.) to provide
|
||||
A2A delegation, heartbeat, memory, plugin loading, and skill management.
|
||||
@ -376,7 +376,7 @@ hold:
|
||||
non-plugin-sourced server, which Claude Code rejects with
|
||||
`channel_enable requires a marketplace plugin`. Until the
|
||||
official `moleculesai/claude-code-plugin` marketplace lands
|
||||
(tracking [#2936](https://git.moleculesai.app/molecule-ai/molecule-core/issues/2936)),
|
||||
(tracking [#2936](https://github.com/Molecule-AI/molecule-core/issues/2936)),
|
||||
operators who want push must scaffold their own local marketplace
|
||||
under
|
||||
`~/.claude/marketplaces/molecule-local/` containing a
|
||||
@ -389,14 +389,14 @@ hold:
|
||||
Symptom of any condition failing: messages arrive but only via the
|
||||
poll path (every ~1–60s), not real-time. There's currently no
|
||||
diagnostic surfaced — `molecule-mcp doctor` (tracking
|
||||
[#2937](https://git.moleculesai.app/molecule-ai/molecule-core/issues/2937)) is
|
||||
[#2937](https://github.com/Molecule-AI/molecule-core/issues/2937)) is
|
||||
planned.
|
||||
|
||||
If you don't need real-time push, the default poll path works
|
||||
universally with no extra setup; both modes converge on the same
|
||||
`inbox_pop` ack so messages never duplicate.
|
||||
|
||||
See [`docs/workspace-runtime-package.md`](https://git.moleculesai.app/molecule-ai/molecule-core/src/branch/main/docs/workspace-runtime-package.md)
|
||||
See [`docs/workspace-runtime-package.md`](https://github.com/Molecule-AI/molecule-core/blob/main/docs/workspace-runtime-package.md)
|
||||
for the publish flow and architecture.
|
||||
"""
|
||||
|
||||
|
||||
@ -8,24 +8,27 @@
|
||||
# Requires: git, jq (lighter than python3 — ~2MB vs ~50MB in Alpine)
|
||||
#
|
||||
# Auth (optional):
|
||||
# Post-2026-05-08 (#192): every repo in manifest.json is public on
|
||||
# git.moleculesai.app. Anonymous clone works for the entire registered
|
||||
# set. The OSS-surface contract is recorded in manifest.json's _comment
|
||||
# — Layer-3 customer/private templates (e.g. reno-stars) are NOT in the
|
||||
# manifest; they are handled at provision-time via the per-tenant
|
||||
# credential resolver (internal#102 RFC).
|
||||
# When MOLECULE_GITEA_TOKEN is set, embed it as the basic-auth password so
|
||||
# private Gitea repos clone successfully. When unset, clone anonymously
|
||||
# (works only for repos that are public on git.moleculesai.app).
|
||||
#
|
||||
# MOLECULE_GITEA_TOKEN is therefore optional today. Kept supported for
|
||||
# two reasons: (a) historical CI configs that still inject
|
||||
# AUTO_SYNC_TOKEN remain harmless, (b) reserved for the case where a
|
||||
# private internal-only template is later registered via a ci-readonly
|
||||
# team grant — review must explicitly sign off on that, since it
|
||||
# violates the public-OSS-surface contract.
|
||||
# This is the path the publish-workspace-server-image.yml workflow uses:
|
||||
# it injects AUTO_SYNC_TOKEN (devops-engineer persona PAT, repo:read on
|
||||
# the molecule-ai org) so the in-CI pre-clone step succeeds for ALL
|
||||
# manifest entries — including the 5 private workspace-template-* repos
|
||||
# (codex, crewai, deepagents, gemini-cli, langgraph) and all 7
|
||||
# org-template-* repos.
|
||||
#
|
||||
# The token (when set) never enters the Docker image: this script runs
|
||||
# in the trusted CI context BEFORE `docker buildx build`, populates
|
||||
# The token never enters the Docker image: this script runs in the
|
||||
# trusted CI context BEFORE `docker buildx build`, populates
|
||||
# .tenant-bundle-deps/, then `Dockerfile.tenant` COPYs from there with
|
||||
# the .git directories already stripped (see line ~67 below).
|
||||
#
|
||||
# For backward compatibility — and so a fresh clone works without
|
||||
# secrets when (eventually) the workspace-template-* repos flip public —
|
||||
# the unset path remains a plain anonymous HTTPS clone. That path will
|
||||
# FAIL with "could not read Username" on private repos today; CI MUST
|
||||
# set MOLECULE_GITEA_TOKEN.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@ -65,19 +68,22 @@ clone_category() {
|
||||
continue
|
||||
fi
|
||||
|
||||
# Post-2026-05-06 GitHub-org-suspension: clone from Gitea instead.
|
||||
# manifest.json paths still read "Molecule-AI/..." (the historic
|
||||
# github.com slug); Gitea lowercases the org part to "molecule-ai/".
|
||||
# Lowercase the org segment on the fly so we don't need to rewrite
|
||||
# every manifest entry.
|
||||
repo_gitea="$(echo "$repo" | awk -F/ '{ printf "%s", tolower($1); for (i=2; i<=NF; i++) printf "/%s", $i; print "" }')"
|
||||
|
||||
# Build the clone URL. When MOLECULE_GITEA_TOKEN is set (CI path)
|
||||
# embed it as basic-auth so private repos succeed. The username
|
||||
# part ("oauth2") is conventional and ignored by Gitea — only the
|
||||
# token-as-password is verified.
|
||||
#
|
||||
# manifest.json was migrated to lowercase org slugs on
|
||||
# 2026-05-07 (post-suspension reconciliation), so we use $repo
|
||||
# verbatim — no on-the-fly tolower transform needed.
|
||||
if [ -n "${MOLECULE_GITEA_TOKEN:-}" ]; then
|
||||
clone_url="https://oauth2:${MOLECULE_GITEA_TOKEN}@git.moleculesai.app/${repo}.git"
|
||||
display_url="https://oauth2:***@git.moleculesai.app/${repo}.git"
|
||||
clone_url="https://oauth2:${MOLECULE_GITEA_TOKEN}@git.moleculesai.app/${repo_gitea}.git"
|
||||
display_url="https://oauth2:***@git.moleculesai.app/${repo_gitea}.git"
|
||||
else
|
||||
clone_url="https://git.moleculesai.app/${repo}.git"
|
||||
clone_url="https://git.moleculesai.app/${repo_gitea}.git"
|
||||
display_url="$clone_url"
|
||||
fi
|
||||
|
||||
|
||||
@ -10,11 +10,11 @@
|
||||
# → PyPI auto-bumps molecule-ai-workspace-runtime patch version
|
||||
# → repository_dispatch fans out to 8 workspace-template-* repos
|
||||
# → each template repo rebuilds and re-tags
|
||||
# 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/workspace-template-<runtime>:latest
|
||||
# ghcr.io/molecule-ai/workspace-template-<runtime>:latest
|
||||
#
|
||||
# PATH 2: any merge to a workspace-template-* repo's main branch
|
||||
# → that repo's publish-image.yml fires
|
||||
# → 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/workspace-template-<runtime>:latest
|
||||
# → ghcr.io/molecule-ai/workspace-template-<runtime>:latest
|
||||
# gets re-tagged
|
||||
#
|
||||
# provisioner.go:296 RuntimeImages[runtime] reads `:latest` at every
|
||||
|
||||
@ -24,7 +24,7 @@ echo "=== NUKE ==="
|
||||
docker compose -f "$ROOT/docker-compose.yml" down -v 2>/dev/null || true
|
||||
docker ps -a --format "{{.Names}}" | grep "^ws-" | xargs -r docker rm -f 2>/dev/null || true
|
||||
docker volume ls --format "{{.Name}}" | grep "^ws-" | xargs -r docker volume rm 2>/dev/null || true
|
||||
docker network rm molecule-core-net 2>/dev/null || true
|
||||
docker network rm molecule-monorepo-net 2>/dev/null || true
|
||||
echo " cleaned"
|
||||
|
||||
echo "=== POPULATE MANIFEST DIRS ==="
|
||||
|
||||
@ -51,7 +51,7 @@ log "pulling latest images for: ${RUNTIMES[*]}"
|
||||
PULLED=()
|
||||
FAILED=()
|
||||
for rt in "${RUNTIMES[@]}"; do
|
||||
IMG="153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/workspace-template-$rt:latest"
|
||||
IMG="ghcr.io/molecule-ai/workspace-template-$rt:latest"
|
||||
if docker pull "$IMG" >/dev/null 2>&1; then
|
||||
log " ✓ $rt"
|
||||
PULLED+=("$rt")
|
||||
|
||||
@ -1,10 +1,9 @@
|
||||
#!/bin/bash
|
||||
# rollback-latest.sh — moves the :latest tag on the platform image
|
||||
# (and the matching tenant image) on AWS ECR back to a prior
|
||||
# :staging-<sha> digest without rebuilding anything. Prod tenants
|
||||
# auto-pull :latest every 5 min, so this is the fast path when a
|
||||
# canary-verified image turns out to have a runtime regression that
|
||||
# canary didn't catch.
|
||||
# rollback-latest.sh — moves the :latest tag on ghcr.io/molecule-ai/platform
|
||||
# (and the matching tenant image) back to a prior :staging-<sha> digest
|
||||
# without rebuilding anything. Prod tenants auto-pull :latest every 5
|
||||
# min, so this is the fast path when a canary-verified image turns out
|
||||
# to have a runtime regression that canary didn't catch.
|
||||
#
|
||||
# Usage:
|
||||
# scripts/rollback-latest.sh <sha>
|
||||
@ -13,14 +12,12 @@
|
||||
# Prereqs:
|
||||
# - crane on $PATH (brew install crane OR download from
|
||||
# https://github.com/google/go-containerregistry/releases)
|
||||
# - aws CLI authenticated for region us-east-2 with ECR pull/push
|
||||
# access to the molecule-ai/platform + platform-tenant repositories.
|
||||
# `aws sts get-caller-identity` should succeed.
|
||||
# - GHCR token exported as GITHUB_TOKEN with write:packages scope
|
||||
#
|
||||
# What it does (per image — platform + tenant):
|
||||
# crane digest <ecr>:<sha> # verify the target sha exists
|
||||
# crane tag <ecr>:<sha> latest # retag remotely, single API call
|
||||
# crane digest <ecr>:latest # confirm the move
|
||||
# crane digest ghcr.io/…:<sha> # verify the target sha exists
|
||||
# crane tag ghcr.io/…:<sha> latest # retag remotely, single API call
|
||||
# crane digest ghcr.io/…:latest # confirm the move
|
||||
#
|
||||
# Exit codes: 0 = both retagged, 1 = tag missing / crane error, 2 = bad args.
|
||||
|
||||
@ -33,23 +30,21 @@ if [ "${1:-}" = "" ]; then
|
||||
fi
|
||||
|
||||
TARGET_SHA="$1"
|
||||
ECR_HOST=153263036946.dkr.ecr.us-east-2.amazonaws.com
|
||||
PLATFORM=$ECR_HOST/molecule-ai/platform
|
||||
TENANT=$ECR_HOST/molecule-ai/platform-tenant
|
||||
PLATFORM=ghcr.io/molecule-ai/platform
|
||||
TENANT=ghcr.io/molecule-ai/platform-tenant
|
||||
|
||||
if ! command -v crane >/dev/null; then
|
||||
echo "ERROR: crane not installed. brew install crane" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! command -v aws >/dev/null; then
|
||||
echo "ERROR: aws CLI not installed. brew install awscli" >&2
|
||||
if [ -z "${GITHUB_TOKEN:-}" ]; then
|
||||
echo "ERROR: GITHUB_TOKEN unset. export it with write:packages scope." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Log in once. ECR auth is via short-lived password from `aws ecr
|
||||
# get-login-password`. crane stores creds in a config file keyed by
|
||||
# registry; re-running is cheap.
|
||||
aws ecr get-login-password --region us-east-2 | crane auth login "$ECR_HOST" -u AWS --password-stdin >/dev/null
|
||||
# Log in once. crane stores creds in a config file keyed by registry;
|
||||
# re-running is cheap.
|
||||
printf '%s\n' "$GITHUB_TOKEN" | crane auth login ghcr.io -u "${GITHUB_ACTOR:-$(whoami)}" --password-stdin >/dev/null
|
||||
|
||||
roll() {
|
||||
local image="$1"
|
||||
|
||||
@ -105,5 +105,5 @@ Hard per-workflow timeouts (15–40 min) cap runaway cost. Three teardown layers
|
||||
|
||||
## Known gaps (tracked elsewhere)
|
||||
|
||||
- [#1369](https://git.moleculesai.app/molecule-ai/molecule-core/issues/1369): SaaS canvas Files / Terminal / Peers tabs — architecturally broken; whitelisted in the spec
|
||||
- [#1369](https://github.com/Molecule-AI/molecule-core/issues/1369): SaaS canvas Files / Terminal / Peers tabs — architecturally broken; whitelisted in the spec
|
||||
- LLM-driven delegation (autonomous `delegate_task` tool use) — probabilistic, not in v1; proxy mechanics covered
|
||||
|
||||
@ -1,7 +1,5 @@
|
||||
# Production-shape local harness
|
||||
|
||||
<!-- Retrigger Harness Replays after Class G #168 + clone-manifest fix (#42). -->
|
||||
|
||||
The harness brings up the SaaS tenant topology on localhost using the
|
||||
same `Dockerfile.tenant` image that ships to production. Tests target
|
||||
the cf-proxy on `http://localhost:8080` and pass the tenant identity
|
||||
|
||||
@ -1,14 +0,0 @@
|
||||
# cf-proxy harness image — nginx + the harness's tenant-routing config baked
|
||||
# in at build time.
|
||||
#
|
||||
# Why bake (not bind-mount): on Gitea Actions / act_runner, the runner is a
|
||||
# container talking to the OUTER docker daemon over the host socket; runc
|
||||
# resolves bind-mount source paths on the outer host filesystem, where the
|
||||
# repo at `/workspace/.../tests/harness/cf-proxy/nginx.conf` is invisible.
|
||||
# Compose `configs:` (with `file:`) falls back to bind mounts when swarm is
|
||||
# not active, so it hits the same gap. A build-time COPY uploads the file
|
||||
# as part of the docker build context — the daemon receives the tarball
|
||||
# directly and never bind-mounts. See issue #88 item 2.
|
||||
FROM nginx:1.27-alpine
|
||||
|
||||
COPY nginx.conf /etc/nginx/nginx.conf
|
||||
@ -167,26 +167,15 @@ services:
|
||||
# Production shape: same single CF tunnel front-doors every tenant
|
||||
# subdomain — the Host header carries the tenant identity, not the
|
||||
# routing destination. Local cf-proxy mirrors this exactly.
|
||||
#
|
||||
# nginx.conf delivery: built into a custom image via cf-proxy/Dockerfile
|
||||
# (a thin nginx:1.27-alpine + COPY). NOT a bind mount and NOT a
|
||||
# compose `configs:` block, both of which break under Gitea's
|
||||
# act_runner: the runner talks to the OUTER docker daemon over the
|
||||
# host socket, and runc resolves bind sources on the outer host
|
||||
# filesystem, where `/workspace/.../tests/harness/cf-proxy/nginx.conf`
|
||||
# is invisible. Compose `configs:` falls back to bind mounts without
|
||||
# swarm, so it hits the same gap. A build context, by contrast, is
|
||||
# uploaded to the daemon as a tarball at build time — no bind. See
|
||||
# issue #88 item 2.
|
||||
cf-proxy:
|
||||
build:
|
||||
context: ./cf-proxy
|
||||
dockerfile: Dockerfile
|
||||
image: nginx:1.27-alpine
|
||||
depends_on:
|
||||
tenant-alpha:
|
||||
condition: service_healthy
|
||||
tenant-beta:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- ./cf-proxy/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
# Bind to 127.0.0.1 only — hardcoded ADMIN_TOKENs make 0.0.0.0
|
||||
# exposure unsafe even on a local network.
|
||||
ports:
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
#
|
||||
# Or inline via curl:
|
||||
#
|
||||
# bash <(curl -fsSL https://git.moleculesai.app/molecule-ai/molecule-core/raw/branch/main/tools/check-template-parity.sh) \
|
||||
# bash <(curl -fsSL https://raw.githubusercontent.com/Molecule-AI/molecule-core/main/tools/check-template-parity.sh) \
|
||||
# install.sh start.sh
|
||||
#
|
||||
# Exit codes:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user