Compare commits
No commits in common. "main" and "fix/issue173-buildx-driver-and-cache" have entirely different histories.
main
...
fix/issue1
@ -1,118 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# audit-force-merge — detect a §SOP-6 force-merge after PR close, emit
|
||||
# `incident.force_merge` to stdout as structured JSON.
|
||||
#
|
||||
# Vector's docker_logs source picks up runner stdout; the JSON gets
|
||||
# shipped to Loki on molecule-canonical-obs, indexable by event_type.
|
||||
# Query example:
|
||||
#
|
||||
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
|
||||
#
|
||||
# A force-merge is detected when a PR closed-with-merged=true had at
|
||||
# least one of the repo's required-status-check contexts in a state
|
||||
# other than "success" at the merge commit's SHA. That's exactly what
|
||||
# the Gitea force_merge:true API call lets through, so it's a faithful
|
||||
# detector of the override path.
|
||||
#
|
||||
# Triggers on `pull_request_target: closed` (loaded from base branch
|
||||
# per §SOP-6 security model). No-op when merged=false.
|
||||
#
|
||||
# Required env (set by the workflow):
|
||||
# GITEA_TOKEN, GITEA_HOST, REPO, PR_NUMBER, REQUIRED_CHECKS
|
||||
#
|
||||
# REQUIRED_CHECKS is a newline-separated list of status-check context
|
||||
# names that branch protection requires. Declared in the workflow YAML
|
||||
# rather than fetched from /branch_protections (which needs admin
|
||||
# scope — sop-tier-bot has read-only). Trade dynamism for simplicity:
|
||||
# when the required-check set changes, update both branch protection
|
||||
# AND this env. Keeping them in sync is less complexity than granting
|
||||
# the audit bot admin perms on every repo.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
: "${GITEA_TOKEN:?required}"
|
||||
: "${GITEA_HOST:?required}"
|
||||
: "${REPO:?required}"
|
||||
: "${PR_NUMBER:?required}"
|
||||
: "${REQUIRED_CHECKS:?required (newline-separated context names)}"
|
||||
|
||||
OWNER="${REPO%%/*}"
|
||||
NAME="${REPO##*/}"
|
||||
API="https://${GITEA_HOST}/api/v1"
|
||||
AUTH="Authorization: token ${GITEA_TOKEN}"
|
||||
|
||||
# 1. Fetch the PR. If not merged, no-op.
|
||||
PR=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}")
|
||||
MERGED=$(echo "$PR" | jq -r '.merged // false')
|
||||
if [ "$MERGED" != "true" ]; then
|
||||
echo "::notice::PR #${PR_NUMBER} closed without merge — no audit emission."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MERGE_SHA=$(echo "$PR" | jq -r '.merge_commit_sha // empty')
|
||||
MERGED_BY=$(echo "$PR" | jq -r '.merged_by.login // "unknown"')
|
||||
TITLE=$(echo "$PR" | jq -r '.title // ""')
|
||||
BASE_BRANCH=$(echo "$PR" | jq -r '.base.ref // "main"')
|
||||
HEAD_SHA=$(echo "$PR" | jq -r '.head.sha // empty')
|
||||
|
||||
if [ -z "$MERGE_SHA" ]; then
|
||||
echo "::warning::PR #${PR_NUMBER} merged=true but no merge_commit_sha — cannot evaluate force-merge."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 2. Required status checks declared in the workflow env.
|
||||
REQUIRED="$REQUIRED_CHECKS"
|
||||
if [ -z "${REQUIRED//[[:space:]]/}" ]; then
|
||||
echo "::notice::REQUIRED_CHECKS empty — force-merge not applicable."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 3. Status-check state at the PR HEAD (where checks ran). The merge
|
||||
# commit doesn't get its own checks; we evaluate the PR's last
|
||||
# commit, which is what branch protection compared against.
|
||||
STATUS=$(curl -sS -H "$AUTH" \
|
||||
"${API}/repos/${OWNER}/${NAME}/commits/${HEAD_SHA}/status")
|
||||
declare -A CHECK_STATE
|
||||
while IFS=$'\t' read -r ctx state; do
|
||||
[ -n "$ctx" ] && CHECK_STATE[$ctx]="$state"
|
||||
done < <(echo "$STATUS" | jq -r '.statuses // [] | .[] | "\(.context)\t\(.status)"')
|
||||
|
||||
# 4. For each required check, was it green at merge? YAML block scalars
|
||||
# (`|`) leave a trailing newline; skip blank/whitespace-only lines.
|
||||
FAILED_CHECKS=()
|
||||
while IFS= read -r req; do
|
||||
trimmed="${req#"${req%%[![:space:]]*}"}" # ltrim
|
||||
trimmed="${trimmed%"${trimmed##*[![:space:]]}"}" # rtrim
|
||||
[ -z "$trimmed" ] && continue
|
||||
state="${CHECK_STATE[$trimmed]:-missing}"
|
||||
if [ "$state" != "success" ]; then
|
||||
FAILED_CHECKS+=("${trimmed}=${state}")
|
||||
fi
|
||||
done <<< "$REQUIRED"
|
||||
|
||||
if [ "${#FAILED_CHECKS[@]}" -eq 0 ]; then
|
||||
echo "::notice::PR #${PR_NUMBER} merged with all required checks green — not a force-merge."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 5. Emit structured audit event.
|
||||
NOW=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||
FAILED_JSON=$(printf '%s\n' "${FAILED_CHECKS[@]}" | jq -R . | jq -s .)
|
||||
|
||||
# Print as a single-line JSON so Vector's parse_json transform can pick
|
||||
# it up cleanly from docker_logs.
|
||||
jq -nc \
|
||||
--arg event_type "incident.force_merge" \
|
||||
--arg ts "$NOW" \
|
||||
--arg repo "$REPO" \
|
||||
--argjson pr "$PR_NUMBER" \
|
||||
--arg title "$TITLE" \
|
||||
--arg base "$BASE_BRANCH" \
|
||||
--arg merged_by "$MERGED_BY" \
|
||||
--arg merge_sha "$MERGE_SHA" \
|
||||
--argjson failed_checks "$FAILED_JSON" \
|
||||
'{event_type: $event_type, ts: $ts, repo: $repo, pr: $pr, title: $title,
|
||||
base_branch: $base, merged_by: $merged_by, merge_sha: $merge_sha,
|
||||
failed_checks: $failed_checks}'
|
||||
|
||||
echo "::warning::FORCE-MERGE detected on PR #${PR_NUMBER} by ${MERGED_BY}: ${#FAILED_CHECKS[@]} required check(s) not green at merge time."
|
||||
@ -1,346 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# sop-tier-check — verify a Gitea PR satisfies the §SOP-6 approval gate.
|
||||
#
|
||||
# Reads the PR's tier label, walks approving reviewers, and checks team
|
||||
# membership against the tier's approval expression. Passes only when
|
||||
# ALL clauses in the expression are satisfied by the set of approving
|
||||
# reviewers (AND-composition; internal#189).
|
||||
#
|
||||
# Expression syntax:
|
||||
# "team-a" — OR-set: any ONE of the comma-separated teams
|
||||
# "team-a AND team-b" — AND: BOTH must each have ≥1 approver
|
||||
# "(a,b,c)" — OR-set wrapped in parens; same as "a,b,c"
|
||||
#
|
||||
# Example: "qa AND security AND (managers,ceo)" means:
|
||||
# ≥1 approver in team "qa" AND
|
||||
# ≥1 approver in team "security" AND
|
||||
# ≥1 approver in team "managers" OR "ceo"
|
||||
#
|
||||
# Per the spec (internal#189), the hard gate here pairs with the
|
||||
# advisory gate of sop-conformance LLM-judge (internal#188): each
|
||||
# required-team click must reflect real verification (visible in review
|
||||
# body or A2A messages), not rubber-stamp APPROVE. Both gates together
|
||||
# close the "teammate clicks APPROVE without verifying" gap.
|
||||
#
|
||||
# Invoked from `.gitea/workflows/sop-tier-check.yml`. The workflow sets
|
||||
# the env vars below; this script does no IO outside of stdout/stderr +
|
||||
# the Gitea API.
|
||||
#
|
||||
# Required env:
|
||||
# GITEA_TOKEN — bot PAT with read:organization,read:user,
|
||||
# read:issue,read:repository scopes
|
||||
# GITEA_HOST — e.g. git.moleculesai.app
|
||||
# REPO — owner/name (from github.repository)
|
||||
# PR_NUMBER — int (from github.event.pull_request.number)
|
||||
# PR_AUTHOR — login (from github.event.pull_request.user.login)
|
||||
#
|
||||
# Optional:
|
||||
# SOP_DEBUG=1 — print per-API-call diagnostic lines. Default: off.
|
||||
# SOP_LEGACY_CHECK=1 — revert to OR-gate (≥1 approver from any eligible
|
||||
# team). Grace window for PRs in-flight when the
|
||||
# new AND-composition was deployed. Expires 2026-05-17
|
||||
# (7-day burn-in window; internal#189 Phase 1).
|
||||
# Set by workflow for PRs merged before the deploy.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
debug() {
|
||||
if [ "${SOP_DEBUG:-}" = "1" ]; then
|
||||
echo " [debug] $*" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
# Validate env
|
||||
: "${GITEA_TOKEN:?GITEA_TOKEN required}"
|
||||
: "${GITEA_HOST:?GITEA_HOST required}"
|
||||
: "${REPO:?REPO required (owner/name)}"
|
||||
: "${PR_NUMBER:?PR_NUMBER required}"
|
||||
: "${PR_AUTHOR:?PR_AUTHOR required}"
|
||||
|
||||
OWNER="${REPO%%/*}"
|
||||
NAME="${REPO##*/}"
|
||||
API="https://${GITEA_HOST}/api/v1"
|
||||
AUTH="Authorization: token ${GITEA_TOKEN}"
|
||||
echo "::notice::tier-check start: repo=$OWNER/$NAME pr=$PR_NUMBER author=$PR_AUTHOR"
|
||||
|
||||
# Sanity: token resolves to a user
|
||||
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""')
|
||||
if [ -z "$WHOAMI" ]; then
|
||||
echo "::error::GITEA_TOKEN cannot resolve a user via /api/v1/user — check the token scope and that the secret is wired correctly."
|
||||
exit 1
|
||||
fi
|
||||
echo "::notice::token resolves to user: $WHOAMI"
|
||||
|
||||
# 1. Read tier label
|
||||
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name')
|
||||
TIER=""
|
||||
for L in $LABELS; do
|
||||
case "$L" in
|
||||
tier:low|tier:medium|tier:high)
|
||||
if [ -n "$TIER" ]; then
|
||||
echo "::error::Multiple tier labels: $TIER + $L. Apply exactly one."
|
||||
exit 1
|
||||
fi
|
||||
TIER="$L"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ -z "$TIER" ]; then
|
||||
echo "::error::PR has no tier:low|tier:medium|tier:high label. Apply one before merge."
|
||||
exit 1
|
||||
fi
|
||||
debug "tier=$TIER"
|
||||
|
||||
# 2. Tier → required team expression (AND-composition; internal#189)
|
||||
#
|
||||
# Expression syntax:
|
||||
# clause-a AND clause-b AND ... — ALL clauses must pass
|
||||
# team-a,team-b,team-c — OR-set: ≥1 approver in ANY of these teams
|
||||
# (team-a,team-b) — same as team-a,team-b (parens optional)
|
||||
#
|
||||
# This map is the single source of truth. Update it when the team structure
|
||||
# or policy changes. Teams referenced here but absent in Gitea are treated
|
||||
# as unachievable (would always fail) — operators notice the clear error
|
||||
# and create the missing team.
|
||||
#
|
||||
# Current Gitea teams: ceo, engineers, managers
|
||||
# Future teams (create before removing "???" fallback): qa, security, security-audit
|
||||
declare -A TIER_EXPR=(
|
||||
# tier:low — same as previous OR gate: any engineer, manager, or ceo.
|
||||
["tier:low"]="engineers,managers,ceo"
|
||||
|
||||
# tier:medium — AND of (managers) AND (engineers) AND (qa???,security???)
|
||||
# The qa+security clause requires both teams to exist; when not yet
|
||||
# created, the PR author is responsible for adding them before requesting
|
||||
# approval on a tier:medium PR. Ops: create qa + security Gitea teams
|
||||
# and update this map to remove the "???" markers (internal#189 follow-up).
|
||||
["tier:medium"]="managers AND engineers AND qa???,security???"
|
||||
|
||||
# tier:high — ceo only. The AND-composition adds no value for a
|
||||
# single-team gate, but the framework is wired for consistency.
|
||||
["tier:high"]="ceo"
|
||||
)
|
||||
|
||||
EXPR="${TIER_EXPR[$TIER]-}"
|
||||
if [ -z "$EXPR" ]; then
|
||||
echo "::error::No expression defined for tier $TIER in TIER_EXPR map."
|
||||
exit 1
|
||||
fi
|
||||
debug "expression=$EXPR"
|
||||
|
||||
# 3. Legacy OR-gate override (7-day burn-in grace window; internal#189 Phase 1)
|
||||
if [ "${SOP_LEGACY_CHECK:-}" = "1" ]; then
|
||||
LEGACY_ELIGIBLE=""
|
||||
case "$TIER" in
|
||||
tier:low) LEGACY_ELIGIBLE="engineers managers ceo" ;;
|
||||
tier:medium) LEGACY_ELIGIBLE="managers ceo" ;;
|
||||
tier:high) LEGACY_ELIGIBLE="ceo" ;;
|
||||
esac
|
||||
echo "::notice::SOP_LEGACY_CHECK=1 — using OR-gate ({$LEGACY_ELIGIBLE}) for this PR."
|
||||
ELIGIBLE="$LEGACY_ELIGIBLE"
|
||||
fi
|
||||
|
||||
# 4. Resolve all team names → IDs
|
||||
# /orgs/{org}/teams/{slug}/... endpoints don't exist on Gitea 1.22;
|
||||
# we use /teams/{id}.
|
||||
ORG_TEAMS_FILE=$(mktemp)
|
||||
trap 'rm -f "$ORG_TEAMS_FILE"' EXIT
|
||||
HTTP_CODE=$(curl -sS -o "$ORG_TEAMS_FILE" -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/orgs/${OWNER}/teams")
|
||||
debug "teams-list HTTP=$HTTP_CODE size=$(wc -c <"$ORG_TEAMS_FILE")"
|
||||
if [ "${SOP_DEBUG:-}" = "1" ]; then
|
||||
echo " [debug] teams-list body (first 300 chars):" >&2
|
||||
head -c 300 "$ORG_TEAMS_FILE" >&2; echo >&2
|
||||
fi
|
||||
if [ "$HTTP_CODE" != "200" ]; then
|
||||
echo "::error::GET /orgs/${OWNER}/teams returned HTTP $HTTP_CODE — token likely lacks read:org scope."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Collect every team name that appears in the expression.
|
||||
# Bash word-splitting on $EXPR splits on spaces, so "AND" appears as a
|
||||
# token. We skip it explicitly.
|
||||
declare -A TEAM_ID
|
||||
_all_teams=""
|
||||
for _raw_clause in $EXPR; do
|
||||
# Strip parens and split on comma.
|
||||
_clause=${_raw_clause//[()]/}
|
||||
for _t in $(echo "$_clause" | tr ',' '\n'); do
|
||||
_t=$(echo "$_t" | tr -d '[:space:]')
|
||||
[ -z "$_t" ] && continue
|
||||
# Skip AND / OR operator tokens (bash word-split produced them from
|
||||
# spaces in the expression string).
|
||||
[ "$_t" = "AND" ] || [ "$_t" = "OR" ] && continue
|
||||
# Skip if already in set.
|
||||
case " $_all_teams " in
|
||||
*" $_t "*) ;; # already present
|
||||
*) _all_teams="${_all_teams} $_t " ;;
|
||||
esac
|
||||
done
|
||||
done
|
||||
|
||||
for _t in $_all_teams; do
|
||||
_t=$(echo "$_t" | tr -d ' ')
|
||||
[ -z "$_t" ] && continue
|
||||
_id=$(jq -r --arg t "$_t" '.[] | select(.name==$t) | .id' <"$ORG_TEAMS_FILE" | head -1)
|
||||
if [ -z "$_id" ] || [ "$_id" = "null" ]; then
|
||||
# "??" suffix marks teams that don't exist yet (tier:medium qa/security).
|
||||
# Treat as permanently failing clause; clear error message guides ops.
|
||||
if [[ "$_t" == *"???" ]]; then
|
||||
debug "team \"$_t\" not found (expected — pending team creation per internal#189)"
|
||||
continue
|
||||
fi
|
||||
_visible=$(jq -r '.[]?.name? // empty' <"$ORG_TEAMS_FILE" 2>/dev/null | tr '\n' ' ')
|
||||
echo "::error::Team \"$_t\" referenced in tier $TIER expression but not found in org $OWNER. Teams visible: $_visible"
|
||||
exit 1
|
||||
fi
|
||||
TEAM_ID[$_t]="$_id"
|
||||
debug "team-id: $_t → $_id"
|
||||
done
|
||||
|
||||
# 5. Read approving reviewers
|
||||
REVIEWS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}/reviews")
|
||||
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]')
|
||||
if [ -z "$APPROVERS" ]; then
|
||||
echo "::error::No approving reviews on this PR. Set SOP_DEBUG=1 and re-run for diagnostics."
|
||||
exit 1
|
||||
fi
|
||||
debug "approvers: $(echo "$APPROVERS" | tr '\n' ' ')"
|
||||
|
||||
# 6. For each approver: skip self-review; probe team membership by id.
|
||||
# Build $APPROVER_TEAMS[<user>]=space-surrounded team names (e.g. " managers ").
|
||||
# Pre/post spaces ensure case patterns *${_t}* match even when the name
|
||||
# is the first or last entry (bash case *word* needs delimiters on both sides).
|
||||
#
|
||||
# FALLBACK: if ALL team probes return 403 (token lacks read:org scope),
|
||||
# fall back to /orgs/{org}/members/{user}. This returns 204 for any org
|
||||
# member — a superset of team membership. Accepting it as a fallback means
|
||||
# the gate passes when the token is scoped to repo+user only (core-bot PAT).
|
||||
# This is safe because: (a) org membership is a prerequisite for every
|
||||
# eligible team; (b) the AND-composition of internal#189 still requires
|
||||
# multiple independent approvers; (c) any token with read:repository can
|
||||
# see the approving reviews, so bypass requires a colluding approver.
|
||||
declare -A APPROVER_TEAMS
|
||||
for U in $APPROVERS; do
|
||||
[ "$U" = "$PR_AUTHOR" ] && debug "skip self-review by $U" && continue
|
||||
_any_team_success="no"
|
||||
for T in "${!TEAM_ID[@]}"; do
|
||||
ID="${TEAM_ID[$T]}"
|
||||
CODE=$(curl -sS -o /dev/null -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/teams/${ID}/members/${U}")
|
||||
debug "probe: $U in team $T (id=$ID) → HTTP $CODE"
|
||||
if [ "$CODE" = "200" ] || [ "$CODE" = "204" ]; then
|
||||
APPROVER_TEAMS[$U]="${APPROVER_TEAMS[$U]:- } ${APPROVER_TEAMS[$U]:+ }$T "
|
||||
debug "$U qualifies for team $T"
|
||||
_any_team_success="yes"
|
||||
fi
|
||||
done
|
||||
# Fallback: if every team probe returned 403, try org membership.
|
||||
# "??" teams were never resolved to IDs so they never entered the loop.
|
||||
# If the user is an org member, credit them as being in each queried team
|
||||
# (engineers, managers, ceo are all org-level). This is safe because org
|
||||
# membership is a prerequisite for all three, and bypass requires a colluding
|
||||
# approver (same risk as before the AND-composition).
|
||||
if [ "$_any_team_success" = "no" ]; then
|
||||
ORG_CODE=$(curl -sS -o /dev/null -w '%{http_code}' -H "$AUTH" \
|
||||
"${API}/orgs/${OWNER}/members/${U}")
|
||||
debug "probe: $U in org $OWNER (fallback) → HTTP $ORG_CODE"
|
||||
if [ "$ORG_CODE" = "204" ]; then
|
||||
for T in "${!TEAM_ID[@]}"; do
|
||||
APPROVER_TEAMS[$U]="${APPROVER_TEAMS[$U]:- } ${APPROVER_TEAMS[$U]:+ }$T "
|
||||
done
|
||||
debug "$U credited as org member for all queried teams (fallback — token may lack read:org)"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# 7. Evaluate the tier expression.
|
||||
#
|
||||
# legacy OR-gate: use the simplified loop from before internal#189.
|
||||
if [ -n "${LEGACY_ELIGIBLE:-}" ]; then
|
||||
OK=""
|
||||
for _u in "${!APPROVER_TEAMS[@]}"; do
|
||||
for _t2 in $LEGACY_ELIGIBLE; do
|
||||
case "${APPROVER_TEAMS[$_u]}" in
|
||||
*${_t2}*)
|
||||
echo "::notice::approver $_u is in team $_t2 (eligible for $TIER)"
|
||||
OK="yes"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
[ -n "$OK" ] && break
|
||||
done
|
||||
if [ -z "$OK" ]; then
|
||||
echo "::error::Tier $TIER requires approval from a non-author member of {$LEGACY_ELIGIBLE}. Set SOP_DEBUG=1 to see per-probe HTTP codes."
|
||||
exit 1
|
||||
fi
|
||||
echo "::notice::sop-tier-check passed: $TIER (legacy OR-gate)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# AND-gate: evaluate the expression clause by clause.
|
||||
# _passed_clauses and _failed_clauses accumulate for the status description.
|
||||
_passed_clauses=""
|
||||
_failed_clauses=""
|
||||
|
||||
for _raw_clause in $EXPR; do
|
||||
# Normalise: strip parens, replace commas with spaces so bash word-split
|
||||
# can iterate the OR-set members. The previous form
|
||||
# _clause=$(echo ... | tr ',' '\n' | tr -d '[:space:]' | grep -v '^$')
|
||||
# collapsed every member into one concatenated token because
|
||||
# `tr -d '[:space:]'` strips the very newlines that just separated them
|
||||
# ("engineers,managers,ceo" -> "engineersmanagersceo"), so the OR-clause
|
||||
# only ever evaluated as a single nonsense team name and never matched
|
||||
# APPROVER_TEAMS. Fixed in #229: leave the comma-separated members as
|
||||
# space-separated tokens for `for _t in $_clause`.
|
||||
_no_parens=${_raw_clause//[()]/}
|
||||
_clause=${_no_parens//,/ }
|
||||
_clause_passed="no"
|
||||
_clause_names=""
|
||||
for _t in $_clause; do
|
||||
# Append (don't overwrite) team name to the human-readable accumulator.
|
||||
# The previous form `_clause_names="${_clause_names:+, }${_t}"`
|
||||
# rewrote the variable on every iteration, so the FAIL message only
|
||||
# ever showed the LAST team. Fixed: prepend prior value before the
|
||||
# comma-separator, then append the new team name.
|
||||
_clause_names="${_clause_names}${_clause_names:+, }${_t}"
|
||||
# Skip teams not yet in Gitea (qa??? / security??? placeholders).
|
||||
[[ "$_t" == *"???" ]] && debug "clause \"$_t\": skipped (team pending creation)" && continue
|
||||
[ -z "${TEAM_ID[$_t]:-}" ] && debug "clause \"$_t\": no ID resolved, skipping" && continue
|
||||
for _u in "${!APPROVER_TEAMS[@]}"; do
|
||||
# Note: APPROVER_TEAMS values are space-surrounded (e.g. " managers ").
|
||||
# Pattern *${_t}* matches team name anywhere in the space-padded string.
|
||||
case "${APPROVER_TEAMS[$_u]}" in
|
||||
*${_t}*)
|
||||
_clause_passed="yes"
|
||||
debug "clause \"$_t\": satisfied by $_u"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
done
|
||||
|
||||
# Label for display: strip "???" from pending teams.
|
||||
_label=$(echo "$_raw_clause" | tr -d '()' | tr ',' '/' | tr -d '[:space:]' | sed 's/???//g')
|
||||
|
||||
if [ "$_clause_passed" = "yes" ]; then
|
||||
# Append (don't overwrite) — same accumulator bug as _clause_names above.
|
||||
_passed_clauses="${_passed_clauses}${_passed_clauses:+, }$_label"
|
||||
echo "::notice::clause [$_label]: PASS — satisfied by approving reviewer(s)"
|
||||
else
|
||||
_failed_clauses="${_failed_clauses}${_failed_clauses:+, }$_label"
|
||||
echo "::error::clause [$_label]: FAIL — no approving reviewer belongs to any of these teams (${_clause_names}). Set SOP_DEBUG=1 to see per-team probe results."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$_failed_clauses" ]; then
|
||||
echo ""
|
||||
echo "::error::sop-tier-check FAILED for $TIER."
|
||||
echo " Passed :${_passed_clauses}"
|
||||
echo " Missing:${_failed_clauses}"
|
||||
echo " All clauses must be satisfied. Each missing team needs an APPROVED review from one of its members."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "::notice::sop-tier-check PASSED: $TIER — all required clauses satisfied [${_passed_clauses}]"
|
||||
@ -1,101 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Regression test for #229 — sop-tier-check tier:low OR-clause splitter.
|
||||
#
|
||||
# Bug (PR #225 → still broken after PR #231):
|
||||
# Line ~289 of sop-tier-check.sh used:
|
||||
# _clause=$(echo "$_raw_clause" | tr -d '()' | tr ',' '\n' | tr -d '[:space:]' | grep -v '^$')
|
||||
# `tr -d '[:space:]'` strips the newlines that `tr ',' '\n'` just
|
||||
# inserted, collapsing "engineers,managers,ceo" into a single token
|
||||
# "engineersmanagersceo". The for-loop then iterates ONCE on a name
|
||||
# that matches no team, so every tier:low PR fails:
|
||||
# ::error::clause [engineers/managers/ceo]: FAIL — no approving
|
||||
# reviewer belongs to any of these teamsengineersmanagersceo
|
||||
# (note also: missing separators in the error string is bug #2 —
|
||||
# `_clause_names` used "${var:+, }$x" which OVERWRITES per iteration).
|
||||
#
|
||||
# Fix shape (this PR):
|
||||
# _no_parens=${_raw_clause//[()]/}
|
||||
# _clause=${_no_parens//,/ } # comma -> space, bash word-split iterates
|
||||
# _clause_names="${_clause_names}${_clause_names:+, }${_t}" # APPEND, not overwrite
|
||||
#
|
||||
# This test extracts the splitter logic and asserts it produces the right
|
||||
# token list for each of the three tier expressions live in the script.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PASS=0
|
||||
FAIL=0
|
||||
|
||||
assert_eq() {
|
||||
local label="$1"
|
||||
local expected="$2"
|
||||
local got="$3"
|
||||
if [ "$expected" = "$got" ]; then
|
||||
echo " PASS $label"
|
||||
PASS=$((PASS + 1))
|
||||
else
|
||||
echo " FAIL $label"
|
||||
echo " expected: <$expected>"
|
||||
echo " got: <$got>"
|
||||
FAIL=$((FAIL + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
# ----- Splitter under test (mirrors the fixed sop-tier-check.sh block) -----
|
||||
split_clause() {
|
||||
local raw="$1"
|
||||
local no_parens=${raw//[()]/}
|
||||
local clause=${no_parens//,/ }
|
||||
local out=""
|
||||
for _t in $clause; do
|
||||
out="${out}${out:+|}$_t"
|
||||
done
|
||||
echo "$out"
|
||||
}
|
||||
|
||||
echo "test: tier:low OR-clause splits to 3 tokens"
|
||||
assert_eq "tier:low" "engineers|managers|ceo" "$(split_clause "engineers,managers,ceo")"
|
||||
|
||||
echo "test: tier:medium AND-expression — bash word-split on \$EXPR yields 5 tokens"
|
||||
EXPR="managers AND engineers AND qa???,security???"
|
||||
out=""
|
||||
for _raw in $EXPR; do
|
||||
out="${out}${out:+ ; }$(split_clause "$_raw")"
|
||||
done
|
||||
assert_eq "tier:medium" "managers ; AND ; engineers ; AND ; qa???|security???" "$out"
|
||||
|
||||
echo "test: tier:high single-team OR-clause"
|
||||
assert_eq "tier:high" "ceo" "$(split_clause "ceo")"
|
||||
|
||||
echo "test: paren-wrapped OR-set unwraps + splits"
|
||||
assert_eq "paren OR" "managers|ceo" "$(split_clause "(managers,ceo)")"
|
||||
|
||||
# ----- _clause_names accumulator (was overwriting per iteration) -----
|
||||
acc=""
|
||||
for t in engineers managers ceo; do
|
||||
acc="${acc}${acc:+, }${t}"
|
||||
done
|
||||
assert_eq "_clause_names append" "engineers, managers, ceo" "$acc"
|
||||
|
||||
# ----- _failed_clauses / _passed_clauses accumulator across raw clauses -----
|
||||
acc=""
|
||||
for c in clauseA clauseB clauseC; do
|
||||
acc="${acc}${acc:+, }${c}"
|
||||
done
|
||||
assert_eq "_failed_clauses append" "clauseA, clauseB, clauseC" "$acc"
|
||||
|
||||
# ----- End-to-end OR-gate: simulate APPROVER_TEAMS[core-lead]=' managers ' -----
|
||||
# The script's case pattern is *${_t}* with a space-padded value.
|
||||
APPROVER_TEAMS_VAL=" managers "
|
||||
matched=""
|
||||
for _t in $(split_clause "engineers,managers,ceo" | tr '|' ' '); do
|
||||
case "$APPROVER_TEAMS_VAL" in
|
||||
*${_t}*) matched="$_t"; break ;;
|
||||
esac
|
||||
done
|
||||
assert_eq "OR-gate matches managers" "managers" "$matched"
|
||||
|
||||
echo
|
||||
echo "------"
|
||||
echo "PASS=$PASS FAIL=$FAIL"
|
||||
[ "$FAIL" -eq 0 ]
|
||||
@ -1,58 +0,0 @@
|
||||
# audit-force-merge — emit `incident.force_merge` to runner stdout when
|
||||
# a PR is merged with required-status-checks not green. Vector picks
|
||||
# the JSON line off docker_logs and ships to Loki on
|
||||
# molecule-canonical-obs (per `reference_obs_stack_phase1`); query as:
|
||||
#
|
||||
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
|
||||
#
|
||||
# Closes the §SOP-6 audit gap (the doc says force-merges write to
|
||||
# `structure_events`, but that table lives in the platform DB, not
|
||||
# Gitea-side; Loki is the practical equivalent for Gitea Actions
|
||||
# events). When the credential / observability stack converges later,
|
||||
# this can sync into structure_events from Loki via a backfill job —
|
||||
# the structured JSON shape is forward-compatible.
|
||||
#
|
||||
# Logic in `.gitea/scripts/audit-force-merge.sh` per the same script-
|
||||
# extract pattern as sop-tier-check.
|
||||
|
||||
name: audit-force-merge
|
||||
|
||||
# pull_request_target loads from the base branch — same security model
|
||||
# as sop-tier-check. Without this, an attacker could rewrite the
|
||||
# workflow on a PR and skip the audit emission for their own
|
||||
# force-merge. See `.gitea/workflows/sop-tier-check.yml` for the full
|
||||
# rationale.
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
# Skip when PR is closed without merge — saves a runner.
|
||||
if: github.event.pull_request.merged == true
|
||||
steps:
|
||||
- name: Check out base branch (for the script)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Detect force-merge + emit audit event
|
||||
env:
|
||||
# Same org-level secret the sop-tier-check workflow uses.
|
||||
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITEA_HOST: git.moleculesai.app
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
# Required-status-check contexts to evaluate at merge time.
|
||||
# Newline-separated. Mirror this against branch protection
|
||||
# (settings → branches → protected branch → required checks).
|
||||
# Declared here rather than fetched from /branch_protections
|
||||
# because that endpoint requires admin write — sop-tier-bot is
|
||||
# read-only by design (least-privilege).
|
||||
REQUIRED_CHECKS: |
|
||||
sop-tier-check / tier-check (pull_request)
|
||||
Secret scan / Scan diff for credential-shaped strings (pull_request)
|
||||
run: bash .gitea/scripts/audit-force-merge.sh
|
||||
@ -1,303 +0,0 @@
|
||||
name: publish-runtime
|
||||
|
||||
# Gitea Actions port of .github/workflows/publish-runtime.yml.
|
||||
#
|
||||
# Ported 2026-05-10 (issue #206). Key differences from the GitHub version:
|
||||
# - Gitea Actions reads .gitea/workflows/, not .github/workflows/
|
||||
# - Dropped `environment: pypi-publish` — Gitea Actions does not support
|
||||
# named environments or OIDC trusted publishers
|
||||
# - Replaced `pypa/gh-action-pypi-publish@release/v1` (OIDC) with
|
||||
# `twine upload` using PYPI_TOKEN secret — same mechanism as a local
|
||||
# `python -m twine upload` with a PyPI token
|
||||
# - Replaced `github.ref_name` (GitHub-only) with `${GITHUB_REF#refs/tags/}`
|
||||
# — Gitea Actions exposes github.ref (the full ref) but not ref_name
|
||||
# - Dropped `merge_group` trigger (Gitea has no merge queue)
|
||||
# - Dropped `staging` branch trigger (no staging branch exists in this repo)
|
||||
#
|
||||
# PyPI publishing: requires PYPI_TOKEN repository secret (or org-level secret).
|
||||
# Set via: repo Settings → Actions → Variables and Secrets → New Secret.
|
||||
# The token should be a PyPI API token scoped to molecule-ai-workspace-runtime.
|
||||
#
|
||||
# The DISPATCH_TOKEN cascade (git push to template repos) is unchanged —
|
||||
# it uses the Gitea API directly and was already Gitea-compatible.
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "runtime-v*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to publish (e.g. 0.1.6). Required for manual dispatch."
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# Serialize publishes so two concurrent tag pushes don't both compute
|
||||
# "latest+1" and race on PyPI upload. The second one waits.
|
||||
concurrency:
|
||||
group: publish-runtime
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
wheel_sha256: ${{ steps.wheel_hash.outputs.wheel_sha256 }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
with:
|
||||
python-version: "3.11"
|
||||
cache: pip
|
||||
|
||||
- name: Derive version (tag, manual input, or PyPI auto-bump)
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ inputs.version }}"
|
||||
elif echo "$GITHUB_REF" | grep -q "^refs/tags/runtime-v"; then
|
||||
# Tag is `runtime-vX.Y.Z` — strip the prefix.
|
||||
VERSION="${GITHUB_REF#refs/tags/runtime-v}"
|
||||
else
|
||||
# Fallback: derive from PyPI latest + patch bump.
|
||||
# (The staging-push auto-bump trigger is dropped on Gitea —
|
||||
# no staging branch exists. This fallback path is kept for
|
||||
# robustness if a future automation uses workflow_dispatch without
|
||||
# an explicit version input.)
|
||||
LATEST=$(curl -fsS --retry 3 https://pypi.org/pypi/molecule-ai-workspace-runtime/json \
|
||||
| python -c "import sys,json; print(json.load(sys.stdin)['info']['version'])")
|
||||
MAJOR=$(echo "$LATEST" | cut -d. -f1)
|
||||
MINOR=$(echo "$LATEST" | cut -d. -f2)
|
||||
PATCH=$(echo "$LATEST" | cut -d. -f3)
|
||||
VERSION="${MAJOR}.${MINOR}.$((PATCH+1))"
|
||||
echo "Auto-bumped from PyPI latest $LATEST -> $VERSION"
|
||||
fi
|
||||
if ! echo "$VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+(\.dev[0-9]+|rc[0-9]+|a[0-9]+|b[0-9]+|\.post[0-9]+)?$'; then
|
||||
echo "::error::version $VERSION does not match PEP 440"
|
||||
exit 1
|
||||
fi
|
||||
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "Publishing molecule-ai-workspace-runtime $VERSION"
|
||||
|
||||
- name: Install build tooling
|
||||
run: pip install build twine
|
||||
|
||||
- name: Build package from workspace/
|
||||
run: |
|
||||
python scripts/build_runtime_package.py \
|
||||
--version "${{ steps.version.outputs.version }}" \
|
||||
--out "${{ runner.temp }}/runtime-build"
|
||||
|
||||
- name: Build wheel + sdist
|
||||
working-directory: ${{ runner.temp }}/runtime-build
|
||||
run: python -m build
|
||||
|
||||
- name: Capture wheel SHA256 for cascade content-verification
|
||||
id: wheel_hash
|
||||
working-directory: ${{ runner.temp }}/runtime-build
|
||||
run: |
|
||||
set -eu
|
||||
WHEEL=$(ls dist/*.whl 2>/dev/null | head -1)
|
||||
if [ -z "$WHEEL" ]; then
|
||||
echo "::error::No .whl in dist/ — \`python -m build\` must have failed silently"
|
||||
exit 1
|
||||
fi
|
||||
HASH=$(sha256sum "$WHEEL" | awk '{print $1}')
|
||||
echo "wheel_sha256=${HASH}" >> "$GITHUB_OUTPUT"
|
||||
echo "Local wheel SHA256 (pre-upload): ${HASH}"
|
||||
echo "Wheel filename: $(basename "$WHEEL")"
|
||||
|
||||
- name: Verify package contents (sanity)
|
||||
working-directory: ${{ runner.temp }}/runtime-build
|
||||
run: |
|
||||
python -m twine check dist/*
|
||||
python -m venv /tmp/smoke
|
||||
/tmp/smoke/bin/pip install --quiet dist/*.whl
|
||||
/tmp/smoke/bin/python "$GITHUB_WORKSPACE/scripts/wheel_smoke.py"
|
||||
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
# PYPI_TOKEN: repository secret scoped to molecule-ai-workspace-runtime.
|
||||
# Set via: Settings → Actions → Variables and Secrets → New Secret.
|
||||
# Format: pypi-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
|
||||
run: |
|
||||
if [ -z "$PYPI_TOKEN" ]; then
|
||||
echo "::error::PYPI_TOKEN secret is not set — set it at Settings → Actions → Variables and Secrets → New Secret."
|
||||
echo "::error::Required format: pypi-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
exit 1
|
||||
fi
|
||||
python -m twine upload \
|
||||
--repository pypi \
|
||||
--username __token__ \
|
||||
--password "$PYPI_TOKEN" \
|
||||
dist/*
|
||||
|
||||
cascade:
|
||||
needs: publish
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Wait for PyPI to propagate the new version
|
||||
env:
|
||||
RUNTIME_VERSION: ${{ needs.publish.outputs.version }}
|
||||
EXPECTED_SHA256: ${{ needs.publish.outputs.wheel_sha256 }}
|
||||
run: |
|
||||
set -eu
|
||||
if [ -z "$EXPECTED_SHA256" ]; then
|
||||
echo "::error::publish job did not expose wheel_sha256 — cannot verify wheel content. Refusing to fan out cascade."
|
||||
exit 1
|
||||
fi
|
||||
python -m venv /tmp/propagation-probe
|
||||
PROBE=/tmp/propagation-probe/bin
|
||||
$PROBE/pip install --upgrade --quiet pip
|
||||
for i in $(seq 1 30); do
|
||||
if $PROBE/pip install \
|
||||
--quiet \
|
||||
--no-cache-dir \
|
||||
--force-reinstall \
|
||||
--no-deps \
|
||||
"molecule-ai-workspace-runtime==${RUNTIME_VERSION}" \
|
||||
>/dev/null 2>&1; then
|
||||
INSTALLED=$($PROBE/pip show molecule-ai-workspace-runtime 2>/dev/null \
|
||||
| awk -F': ' '/^Version:/{print $2}')
|
||||
if [ "$INSTALLED" = "$RUNTIME_VERSION" ]; then
|
||||
echo "✓ PyPI resolved $RUNTIME_VERSION (install check)"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
echo "::error::pip install --no-cache-dir molecule-ai-workspace-runtime==${RUNTIME_VERSION} never resolved within ~5 min."
|
||||
echo "::error::Refusing to fan out cascade against a potentially stale PyPI index."
|
||||
exit 1
|
||||
fi
|
||||
echo " [$i/30] waiting for PyPI to propagate ${RUNTIME_VERSION}..."
|
||||
sleep 4
|
||||
done
|
||||
|
||||
# Stage (b): download wheel + SHA256 compare against what we built.
|
||||
# Catches Fastly stale-content serving old bytes under a new version URL.
|
||||
HASH=$(python -m pip download \
|
||||
--no-deps \
|
||||
--no-cache-dir \
|
||||
--dest /tmp/wheel-probe \
|
||||
"molecule-ai-workspace-runtime==${RUNTIME_VERSION}" \
|
||||
2>/dev/null \
|
||||
&& sha256sum /tmp/wheel-probe/*.whl | awk '{print $1}')
|
||||
if [ "$HASH" != "$EXPECTED_SHA256" ]; then
|
||||
echo "::error::PyPI propagated $RUNTIME_VERSION but wheel content SHA256 mismatch."
|
||||
echo "::error::Expected: $EXPECTED_SHA256"
|
||||
echo "::error::Got: $HASH"
|
||||
echo "::error::Fastly may be serving stale content. Refusing to fan out cascade."
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ PyPI CDN verified (SHA256 match)"
|
||||
|
||||
- name: Fan out via push to .runtime-version
|
||||
env:
|
||||
# Gitea PAT with write:repository scope on the 8 cascade-active
|
||||
# template repos. Used for git push to each template repo's main
|
||||
# branch, which trips their `on: push: branches: [main]` trigger
|
||||
# on publish-image.yml.
|
||||
DISPATCH_TOKEN: ${{ secrets.DISPATCH_TOKEN }}
|
||||
RUNTIME_VERSION: ${{ needs.publish.outputs.version }}
|
||||
run: |
|
||||
set +e # don't abort on a single repo failure — collect them all
|
||||
|
||||
if [ -z "$DISPATCH_TOKEN" ]; then
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "::warning::DISPATCH_TOKEN secret not set — skipping cascade."
|
||||
echo "::warning::set it at Settings → Actions → Variables and Secrets → New Secret."
|
||||
exit 0
|
||||
fi
|
||||
echo "::error::DISPATCH_TOKEN secret missing — cascade cannot fan out."
|
||||
echo "::error::PyPI was published, but the 8 template repos will NOT pick up the new version."
|
||||
exit 1
|
||||
fi
|
||||
VERSION="$RUNTIME_VERSION"
|
||||
if [ -z "$VERSION" ]; then
|
||||
echo "::error::publish job did not expose a version output"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GITEA_URL="${GITEA_URL:-https://git.moleculesai.app}"
|
||||
TEMPLATES="claude-code hermes openclaw codex langgraph crewai autogen deepagents gemini-cli"
|
||||
FAILED=""
|
||||
SKIPPED=""
|
||||
|
||||
git config --global user.name "publish-runtime cascade"
|
||||
git config --global user.email "publish-runtime@moleculesai.app"
|
||||
|
||||
WORKDIR="$(mktemp -d)"
|
||||
for tpl in $TEMPLATES; do
|
||||
REPO="molecule-ai/molecule-ai-workspace-template-$tpl"
|
||||
CLONE="$WORKDIR/$tpl"
|
||||
|
||||
HTTP=$(curl -sS -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: token $DISPATCH_TOKEN" \
|
||||
"$GITEA_URL/api/v1/repos/$REPO/contents/.github/workflows/publish-image.yml")
|
||||
if [ "$HTTP" = "404" ]; then
|
||||
echo "↷ $tpl has no publish-image.yml — soft-skip"
|
||||
SKIPPED="$SKIPPED $tpl"
|
||||
continue
|
||||
fi
|
||||
|
||||
attempt=0
|
||||
success=false
|
||||
while [ $attempt -lt 3 ]; do
|
||||
attempt=$((attempt + 1))
|
||||
rm -rf "$CLONE"
|
||||
if ! git clone --depth=1 \
|
||||
"https://x-access-token:${DISPATCH_TOKEN}@${GITEA_URL#https://}/$REPO.git" \
|
||||
"$CLONE" >/tmp/clone.log 2>&1; then
|
||||
echo "::warning::clone $tpl attempt $attempt failed: $(tail -n3 /tmp/clone.log)"
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
|
||||
cd "$CLONE"
|
||||
echo "$VERSION" > .runtime-version
|
||||
|
||||
if git diff --quiet -- .runtime-version; then
|
||||
echo "✓ $tpl already at $VERSION — no commit needed"
|
||||
success=true
|
||||
cd - >/dev/null
|
||||
break
|
||||
fi
|
||||
|
||||
git add .runtime-version
|
||||
git commit -m "chore: pin runtime to $VERSION (publish-runtime cascade)" \
|
||||
-m "Co-Authored-By: publish-runtime cascade <publish-runtime@moleculesai.app>" \
|
||||
>/dev/null
|
||||
|
||||
if git push origin HEAD:main >/tmp/push.log 2>&1; then
|
||||
echo "✓ $tpl pushed $VERSION on attempt $attempt"
|
||||
success=true
|
||||
cd - >/dev/null
|
||||
break
|
||||
fi
|
||||
|
||||
echo "::warning::push $tpl attempt $attempt failed, pull-rebasing"
|
||||
git pull --rebase origin main >/tmp/rebase.log 2>&1 || true
|
||||
cd - >/dev/null
|
||||
done
|
||||
|
||||
if [ "$success" != "true" ]; then
|
||||
FAILED="$FAILED $tpl"
|
||||
fi
|
||||
done
|
||||
rm -rf "$WORKDIR"
|
||||
|
||||
if [ -n "$FAILED" ]; then
|
||||
echo "::error::Cascade incomplete after 3 retries each. Failed:$FAILED"
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$SKIPPED" ]; then
|
||||
echo "Cascade complete: pinned $VERSION. Soft-skipped (no publish-image.yml):$SKIPPED"
|
||||
else
|
||||
echo "Cascade complete: $VERSION pinned across all manifest workspace_templates."
|
||||
fi
|
||||
@ -1,174 +0,0 @@
|
||||
name: publish-workspace-server-image
|
||||
|
||||
# Gitea Actions port of .github/workflows/publish-workspace-server-image.yml.
|
||||
#
|
||||
# Ported 2026-05-10 (issue #228). Key differences from the GitHub version:
|
||||
# - Gitea Actions reads .gitea/workflows/, not .github/workflows/
|
||||
# - Dropped `environment:` declarations — Gitea Actions does not support
|
||||
# named environments (used by GitHub OIDC token gates)
|
||||
# - Replaced `github.ref_name` (GitHub-only) with `${GITHUB_REF#refs/heads/}`
|
||||
# — Gitea Actions exposes GITHUB_REF in the same format as GitHub Actions
|
||||
# - docker/setup-buildx-action and aws-actions/configure-aws-credentials are
|
||||
# GitHub Marketplace actions; they are installed by Gitea Actions runners and
|
||||
# work identically here
|
||||
# - All other variables (GITHUB_SHA, GITHUB_REPOSITORY, GITHUB_OUTPUT,
|
||||
# secrets.*) use the same syntax as GitHub Actions
|
||||
#
|
||||
# Image tags produced:
|
||||
# :staging-<sha> — per-commit digest, stable for canary verify
|
||||
# :staging-latest — tracks most recent build on this branch
|
||||
#
|
||||
# ECR target: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/*
|
||||
# Required secrets: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AUTO_SYNC_TOKEN
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'workspace-server/**'
|
||||
- 'canvas/**'
|
||||
- 'manifest.json'
|
||||
- 'scripts/**'
|
||||
- '.gitea/workflows/publish-workspace-server-image.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
# Serialize per-branch so two rapid staging pushes don't race the same
|
||||
# :staging-latest tag retag. Allow staging and main to run in parallel
|
||||
# (different GITHUB_REF → different concurrency group) since they
|
||||
# produce different :staging-<sha> tags and last-write-wins on
|
||||
# :staging-latest is acceptable across branches.
|
||||
#
|
||||
# cancel-in-progress: false → in-flight builds finish; the next push's
|
||||
# build queues. This avoids a partially-pushed image.
|
||||
concurrency:
|
||||
group: publish-workspace-server-image-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform
|
||||
TENANT_IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform-tenant
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
# Health check: verify Docker daemon is accessible before attempting any
|
||||
# build steps. This fails loudly at step 1 when the runner's docker.sock
|
||||
# is inaccessible (e.g. permission change, daemon restart, or group-membership
|
||||
# drift) rather than silently continuing to step 2 where `docker build`
|
||||
# fails deep in the process with a cryptic ECR auth error that doesn't
|
||||
# surface the root cause. Also reports the daemon version so operator
|
||||
# can correlate with runner host logs.
|
||||
- name: Verify Docker daemon access
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "::group::Docker daemon health check"
|
||||
docker info 2>&1 | head -5 || {
|
||||
echo "::error::Docker daemon is not accessible at /var/run/docker.sock"
|
||||
echo "::error::Check: (1) daemon is running, (2) runner user is in docker group, (3) sock permissions are 660+"
|
||||
exit 1
|
||||
}
|
||||
echo "Docker daemon OK"
|
||||
echo "::endgroup::"
|
||||
|
||||
# Pre-clone manifest deps before docker build.
|
||||
#
|
||||
# Why: workspace-template-* repos on Gitea are private. The pre-fix
|
||||
# Dockerfile.tenant ran `git clone` inside an in-image stage with no
|
||||
# auth path — every CI build failed. We clone in the trusted CI
|
||||
# context where AUTO_SYNC_TOKEN is available and Dockerfile.tenant
|
||||
# just COPYs from .tenant-bundle-deps/.
|
||||
#
|
||||
# Token: AUTO_SYNC_TOKEN is the devops-engineer persona PAT.
|
||||
# clone-manifest.sh embeds it as basic-auth for the clones, then
|
||||
# strips .git dirs — the token never enters the image.
|
||||
- name: Pre-clone manifest deps
|
||||
env:
|
||||
MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -z "${MOLECULE_GITEA_TOKEN}" ]; then
|
||||
echo "::error::AUTO_SYNC_TOKEN secret is empty"
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p .tenant-bundle-deps
|
||||
bash scripts/clone-manifest.sh \
|
||||
manifest.json \
|
||||
.tenant-bundle-deps/workspace-configs-templates \
|
||||
.tenant-bundle-deps/org-templates \
|
||||
.tenant-bundle-deps/plugins
|
||||
ws_count=$(find .tenant-bundle-deps/workspace-configs-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
|
||||
org_count=$(find .tenant-bundle-deps/org-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
|
||||
plugins_count=$(find .tenant-bundle-deps/plugins -mindepth 1 -maxdepth 1 -type d | wc -l)
|
||||
echo "Cloned: ws=$ws_count org=$org_count plugins=$plugins_count"
|
||||
|
||||
- name: Compute tags
|
||||
id: tags
|
||||
run: |
|
||||
echo "sha=${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Build + push platform image (inline ECR auth — mirrors the operator-host
|
||||
# approach; credentials come from GITHUB_SECRET_AWS_ACCESS_KEY_ID /
|
||||
# GITHUB_SECRET_AWS_SECRET_ACCESS_KEY in Gitea Actions).
|
||||
- name: Build & push platform image to ECR (staging-<sha> + staging-latest)
|
||||
env:
|
||||
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
||||
TAG_SHA: staging-${{ steps.tags.outputs.sha }}
|
||||
TAG_LATEST: staging-latest
|
||||
GIT_SHA: ${{ github.sha }}
|
||||
REPO: ${{ github.repository }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: us-east-2
|
||||
run: |
|
||||
set -euo pipefail
|
||||
ECR_REGISTRY="${IMAGE_NAME%%/*}"
|
||||
aws ecr get-login-password --region us-east-2 | \
|
||||
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
|
||||
docker build \
|
||||
--file ./workspace-server/Dockerfile \
|
||||
--build-arg GIT_SHA="${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.source=https://github.com/${REPO}" \
|
||||
--label "org.opencontainers.image.revision=${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.description=Molecule AI platform — pending canary verify" \
|
||||
--tag "${IMAGE_NAME}:${TAG_SHA}" \
|
||||
--tag "${IMAGE_NAME}:${TAG_LATEST}" \
|
||||
.
|
||||
docker push "${IMAGE_NAME}:${TAG_SHA}"
|
||||
docker push "${IMAGE_NAME}:${TAG_LATEST}"
|
||||
|
||||
# Build + push tenant image (Go platform + Next.js canvas in one image).
|
||||
- name: Build & push tenant image to ECR (staging-<sha> + staging-latest)
|
||||
env:
|
||||
TENANT_IMAGE_NAME: ${{ env.TENANT_IMAGE_NAME }}
|
||||
TAG_SHA: staging-${{ steps.tags.outputs.sha }}
|
||||
TAG_LATEST: staging-latest
|
||||
GIT_SHA: ${{ github.sha }}
|
||||
REPO: ${{ github.repository }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: us-east-2
|
||||
run: |
|
||||
set -euo pipefail
|
||||
ECR_REGISTRY="${TENANT_IMAGE_NAME%%/*}"
|
||||
aws ecr get-login-password --region us-east-2 | \
|
||||
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
|
||||
docker build \
|
||||
--file ./workspace-server/Dockerfile.tenant \
|
||||
--build-arg NEXT_PUBLIC_PLATFORM_URL= \
|
||||
--build-arg GIT_SHA="${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.source=https://github.com/${REPO}" \
|
||||
--label "org.opencontainers.image.revision=${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify" \
|
||||
--tag "${TENANT_IMAGE_NAME}:${TAG_SHA}" \
|
||||
--tag "${TENANT_IMAGE_NAME}:${TAG_LATEST}" \
|
||||
.
|
||||
docker push "${TENANT_IMAGE_NAME}:${TAG_SHA}"
|
||||
docker push "${TENANT_IMAGE_NAME}:${TAG_LATEST}"
|
||||
@ -1,191 +0,0 @@
|
||||
name: Secret scan
|
||||
|
||||
# Hard CI gate. Refuses any PR / push whose diff additions contain a
|
||||
# recognisable credential. Defense-in-depth for the #2090-class incident
|
||||
# (2026-04-24): GitHub's hosted Copilot Coding Agent leaked a ghs_*
|
||||
# installation token into tenant-proxy/package.json via `npm init`
|
||||
# slurping the URL from a token-embedded origin remote. We can't fix
|
||||
# upstream's clone hygiene, so we gate here.
|
||||
#
|
||||
# Same regex set as the runtime's bundled pre-commit hook
|
||||
# (molecule-ai-workspace-runtime: molecule_runtime/scripts/pre-commit-checks.sh).
|
||||
# Keep the two sides aligned when adding patterns.
|
||||
#
|
||||
# Ported from .github/workflows/secret-scan.yml so the gate actually
|
||||
# fires on Gitea Actions. Differences from the GitHub version:
|
||||
# - drops `merge_group` event (Gitea has no merge queue)
|
||||
# - drops `workflow_call` (no cross-repo reusable invocation on Gitea)
|
||||
# - SELF path updated to .gitea/workflows/secret-scan.yml
|
||||
# The job name + step name are identical to the GitHub workflow so the
|
||||
# status-check context (`Secret scan / Scan diff for credential-shaped
|
||||
# strings (pull_request)`) matches branch protection on molecule-core/main.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches: [main, staging]
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
name: Scan diff for credential-shaped strings
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 2 # need previous commit to diff against on push events
|
||||
|
||||
# For pull_request events the diff base may be many commits behind
|
||||
# HEAD and absent from the shallow clone. Fetch it explicitly.
|
||||
- name: Fetch PR base SHA (pull_request events only)
|
||||
if: github.event_name == 'pull_request'
|
||||
run: git fetch --depth=1 origin ${{ github.event.pull_request.base.sha }}
|
||||
|
||||
- name: Refuse if credential-shaped strings appear in diff additions
|
||||
env:
|
||||
# Plumb event-specific SHAs through env so the script doesn't
|
||||
# need conditional `${{ ... }}` interpolation per event type.
|
||||
# github.event.before/after only exist on push events;
|
||||
# pull_request has pull_request.base.sha / pull_request.head.sha.
|
||||
PR_BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
||||
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
|
||||
PUSH_BEFORE: ${{ github.event.before }}
|
||||
PUSH_AFTER: ${{ github.event.after }}
|
||||
run: |
|
||||
# Pattern set covers GitHub family (the actual #2090 vector),
|
||||
# Anthropic / OpenAI / Slack / AWS. Anchored on prefixes with low
|
||||
# false-positive rates against agent-generated content. Mirror of
|
||||
# molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh
|
||||
# — keep aligned.
|
||||
SECRET_PATTERNS=(
|
||||
'ghp_[A-Za-z0-9]{36,}' # GitHub PAT (classic)
|
||||
'ghs_[A-Za-z0-9]{36,}' # GitHub App installation token
|
||||
'gho_[A-Za-z0-9]{36,}' # GitHub OAuth user-to-server
|
||||
'ghu_[A-Za-z0-9]{36,}' # GitHub OAuth user
|
||||
'ghr_[A-Za-z0-9]{36,}' # GitHub OAuth refresh
|
||||
'github_pat_[A-Za-z0-9_]{82,}' # GitHub fine-grained PAT
|
||||
'sk-ant-[A-Za-z0-9_-]{40,}' # Anthropic API key
|
||||
'sk-proj-[A-Za-z0-9_-]{40,}' # OpenAI project key
|
||||
'sk-svcacct-[A-Za-z0-9_-]{40,}' # OpenAI service-account key
|
||||
'sk-cp-[A-Za-z0-9_-]{60,}' # MiniMax API key (F1088 vector — caught only after the fact)
|
||||
'xox[baprs]-[A-Za-z0-9-]{20,}' # Slack tokens
|
||||
'AKIA[0-9A-Z]{16}' # AWS access key ID
|
||||
'ASIA[0-9A-Z]{16}' # AWS STS temp access key ID
|
||||
)
|
||||
|
||||
# Determine the diff base. Each event type stores its SHAs in
|
||||
# a different place — see the env block above.
|
||||
case "${{ github.event_name }}" in
|
||||
pull_request)
|
||||
BASE="$PR_BASE_SHA"
|
||||
HEAD="$PR_HEAD_SHA"
|
||||
;;
|
||||
*)
|
||||
BASE="$PUSH_BEFORE"
|
||||
HEAD="$PUSH_AFTER"
|
||||
;;
|
||||
esac
|
||||
|
||||
# On push events with shallow clones, BASE may be present in
|
||||
# the event payload but absent from the local object DB
|
||||
# (fetch-depth=2 doesn't always reach the previous commit
|
||||
# across true merges). Try fetching it on demand. If the
|
||||
# fetch fails — e.g. the SHA was force-overwritten — we fall
|
||||
# through to the empty-BASE branch below, which scans the
|
||||
# entire tree as if every file were new. Correct, just slow.
|
||||
if [ -n "$BASE" ] && ! echo "$BASE" | grep -qE '^0+$'; then
|
||||
if ! git cat-file -e "$BASE" 2>/dev/null; then
|
||||
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Files added or modified in this change.
|
||||
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$' || ! git cat-file -e "$BASE" 2>/dev/null; then
|
||||
# New branch / no previous SHA / BASE unreachable — check the
|
||||
# entire tree as added content. Slower, but correct on first
|
||||
# push.
|
||||
CHANGED=$(git ls-tree -r --name-only HEAD)
|
||||
DIFF_RANGE=""
|
||||
else
|
||||
CHANGED=$(git diff --name-only --diff-filter=AM "$BASE" "$HEAD")
|
||||
DIFF_RANGE="$BASE $HEAD"
|
||||
fi
|
||||
|
||||
if [ -z "$CHANGED" ]; then
|
||||
echo "No changed files to inspect."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Self-exclude: this workflow file legitimately contains the
|
||||
# pattern strings as regex literals. Without an exclude it would
|
||||
# block its own merge. Both the .github/ original and this
|
||||
# .gitea/ port are excluded so a sync between them stays clean.
|
||||
SELF_GITHUB=".github/workflows/secret-scan.yml"
|
||||
SELF_GITEA=".gitea/workflows/secret-scan.yml"
|
||||
|
||||
OFFENDING=""
|
||||
# `while IFS= read -r` (not `for f in $CHANGED`) so filenames
|
||||
# containing whitespace don't word-split silently — a path
|
||||
# with a space would otherwise produce two iterations on
|
||||
# tokens that aren't real filenames, breaking the
|
||||
# self-exclude + diff lookup.
|
||||
while IFS= read -r f; do
|
||||
[ -z "$f" ] && continue
|
||||
[ "$f" = "$SELF_GITHUB" ] && continue
|
||||
[ "$f" = "$SELF_GITEA" ] && continue
|
||||
if [ -n "$DIFF_RANGE" ]; then
|
||||
ADDED=$(git diff --no-color --unified=0 "$BASE" "$HEAD" -- "$f" 2>/dev/null | grep -E '^\+[^+]' || true)
|
||||
else
|
||||
# No diff range (new branch first push) — scan the full file
|
||||
# contents as if every line were new.
|
||||
ADDED=$(cat "$f" 2>/dev/null || true)
|
||||
fi
|
||||
[ -z "$ADDED" ] && continue
|
||||
for pattern in "${SECRET_PATTERNS[@]}"; do
|
||||
if echo "$ADDED" | grep -qE "$pattern"; then
|
||||
OFFENDING="${OFFENDING}${f} (matched: ${pattern})\n"
|
||||
break
|
||||
fi
|
||||
done
|
||||
done <<< "$CHANGED"
|
||||
|
||||
if [ -n "$OFFENDING" ]; then
|
||||
echo "::error::Credential-shaped strings detected in diff additions:"
|
||||
# `printf '%b' "$OFFENDING"` interprets backslash escapes
|
||||
# (the literal `\n` we appended above becomes a newline)
|
||||
# WITHOUT treating OFFENDING as a format string. Plain
|
||||
# `printf "$OFFENDING"` is a format-string sink: a filename
|
||||
# containing `%` would be interpreted as a conversion
|
||||
# specifier, corrupting the error message (or printing
|
||||
# `%(missing)` artifacts).
|
||||
printf '%b' "$OFFENDING"
|
||||
echo ""
|
||||
echo "The actual matched values are NOT echoed here, deliberately —"
|
||||
echo "round-tripping a leaked credential into CI logs widens the blast"
|
||||
echo "radius (logs are searchable + retained)."
|
||||
echo ""
|
||||
echo "Recovery:"
|
||||
echo " 1. Remove the secret from the file. Replace with an env var"
|
||||
echo " reference (e.g. \${{ secrets.GITHUB_TOKEN }} in workflows,"
|
||||
echo " process.env.X in code)."
|
||||
echo " 2. If the credential was already pushed (this PR's commit"
|
||||
echo " history reaches a public ref), treat it as compromised —"
|
||||
echo " ROTATE it immediately, do not just remove it. The token"
|
||||
echo " remains valid in git history forever and may be in any"
|
||||
echo " log/cache that consumed this branch."
|
||||
echo " 3. Force-push the cleaned commit (or stack a revert) and"
|
||||
echo " re-run CI."
|
||||
echo ""
|
||||
echo "If the match is a false positive (test fixture, docs example,"
|
||||
echo "or this workflow's own regex literals): use a clearly-fake"
|
||||
echo "placeholder like ghs_EXAMPLE_DO_NOT_USE that doesn't satisfy"
|
||||
echo "the length suffix, OR add the file path to the SELF exclude"
|
||||
echo "list in this workflow with a short reason."
|
||||
echo ""
|
||||
echo "Mirror of the regex set lives in the runtime's bundled"
|
||||
echo "pre-commit hook (molecule-ai-workspace-runtime:"
|
||||
echo "molecule_runtime/scripts/pre-commit-checks.sh) — keep aligned."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ No credential-shaped strings in this change."
|
||||
@ -1,100 +0,0 @@
|
||||
# sop-tier-check — canonical Gitea Actions workflow for §SOP-6 enforcement.
|
||||
#
|
||||
# Logic lives in `.gitea/scripts/sop-tier-check.sh` (extracted 2026-05-09
|
||||
# from the previous inline-bash version). The script is the single source
|
||||
# of truth; this workflow file just sets env + invokes it.
|
||||
#
|
||||
# Copy BOTH files (`.gitea/workflows/sop-tier-check.yml` +
|
||||
# `.gitea/scripts/sop-tier-check.sh`) into any repo that wants the
|
||||
# §SOP-6 PR gate enforced. Pair with branch protection on the protected
|
||||
# branch:
|
||||
# required_status_checks: ["sop-tier-check / tier-check (pull_request)"]
|
||||
# required_approving_reviews: 1
|
||||
# approving_review_teams: ["ceo", "managers", "engineers"]
|
||||
#
|
||||
# Tier → required-team expression (internal#189 AND-composition):
|
||||
# tier:low → engineers,managers,ceo (OR: any one suffices)
|
||||
# tier:medium → managers AND engineers AND qa???,security??? (AND: all required)
|
||||
# tier:high → ceo (OR: single team, wired for AND)
|
||||
#
|
||||
# "???" = teams not yet created in Gitea. When qa + security teams are
|
||||
# added, update TIER_EXPR["tier:medium"] in the script to remove the
|
||||
# markers. PRs already in-flight when qa/security are created continue
|
||||
# to work because their authors explicitly requested those reviews.
|
||||
#
|
||||
# Force-merge: Owners-team override remains available out-of-band via
|
||||
# the Gitea merge API; force-merge writes `incident.force_merge` to
|
||||
# `structure_events` per §Persistent structured logging gate (Phase 3).
|
||||
#
|
||||
# Environment variables:
|
||||
# SOP_DEBUG=1 — per-API-call diagnostic lines. Default: off.
|
||||
# SOP_LEGACY_CHECK=1 — revert to OR-gate for this run. Grace window
|
||||
# for PRs in-flight when AND-composition deployed.
|
||||
# Burn-in: remove after 2026-05-17 (7-day window).
|
||||
#
|
||||
# BURN-IN NOTE (internal#189 Phase 1): continue-on-error: true is set on
|
||||
# the tier-check job below. This prevents AND-composition from blocking
|
||||
# PRs during the 7-day burn-in. After 2026-05-17:
|
||||
# 1. Remove `continue-on-error: true` from this job block.
|
||||
# 2. Update this BURN-IN NOTE comment to mark the window closed.
|
||||
|
||||
name: sop-tier-check
|
||||
|
||||
# SECURITY: triggers MUST use `pull_request_target`, not `pull_request`.
|
||||
# `pull_request_target` loads the workflow definition from the BASE
|
||||
# branch (i.e. `main`), not the PR's HEAD. With `pull_request`, anyone
|
||||
# with write access to a feature branch could rewrite this file in
|
||||
# their PR to dump SOP_TIER_CHECK_TOKEN (org-read scope) to logs and
|
||||
# exfiltrate it. Verified 2026-05-09 against Gitea 1.22.6 —
|
||||
# `pull_request_target` (added in Gitea 1.21 via go-gitea/gitea#25229)
|
||||
# is the documented mitigation.
|
||||
#
|
||||
# This workflow does NOT call `actions/checkout` of PR HEAD code, so no
|
||||
# untrusted code is ever executed in the runner — we only HTTP-call the
|
||||
# Gitea API. If a future change adds a checkout step, it MUST pin to
|
||||
# `${{ github.event.pull_request.base.sha }}` (NOT `head.sha`) to keep
|
||||
# the trust boundary.
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened, labeled, unlabeled]
|
||||
pull_request_review:
|
||||
types: [submitted, dismissed, edited]
|
||||
|
||||
jobs:
|
||||
tier-check:
|
||||
runs-on: ubuntu-latest
|
||||
# BURN-IN: continue-on-error prevents AND-composition from blocking
|
||||
# PRs during the 7-day window. Remove after 2026-05-17 (internal#189).
|
||||
continue-on-error: true
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
steps:
|
||||
- name: Check out base branch (for the script)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
# Pin to base.sha — pull_request_target's protection only
|
||||
# works if we never check out PR HEAD. Same SHA the workflow
|
||||
# itself was loaded from.
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Verify tier label + reviewer team membership
|
||||
env:
|
||||
# SOP_TIER_CHECK_TOKEN is the org-level secret for the
|
||||
# sop-tier-bot PAT (read:organization,read:user,read:issue,
|
||||
# read:repository). Stored at the org level
|
||||
# (/api/v1/orgs/molecule-ai/actions/secrets) so per-repo
|
||||
# configuration is unnecessary — every repo in the org
|
||||
# picks it up automatically.
|
||||
# Falls back to GITHUB_TOKEN with a clear error if missing.
|
||||
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
GITEA_HOST: git.moleculesai.app
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
# Set to '1' for diagnostic per-API-call output. Off by default
|
||||
# so production logs aren't noisy.
|
||||
SOP_DEBUG: '0'
|
||||
# BURN-IN: set to '1' for PRs in-flight at AND-composition deploy
|
||||
# time to use the legacy OR-gate. Remove after 2026-05-17.
|
||||
SOP_LEGACY_CHECK: '0'
|
||||
run: bash .gitea/scripts/sop-tier-check.sh
|
||||
2
.github/scripts/lint_secret_pattern_drift.py
vendored
2
.github/scripts/lint_secret_pattern_drift.py
vendored
@ -37,7 +37,7 @@ CANONICAL_FILE = Path(".github/workflows/secret-scan.yml")
|
||||
CONSUMERS: list[tuple[str, str]] = [
|
||||
(
|
||||
"molecule-ai-workspace-runtime/molecule_runtime/scripts/pre-commit-checks.sh",
|
||||
"https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-runtime/raw/branch/main/molecule_runtime/scripts/pre-commit-checks.sh",
|
||||
"https://raw.githubusercontent.com/Molecule-AI/molecule-ai-workspace-runtime/main/molecule_runtime/scripts/pre-commit-checks.sh",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
429
.github/workflows/auto-promote-on-e2e.yml
vendored
Normal file
429
.github/workflows/auto-promote-on-e2e.yml
vendored
Normal file
@ -0,0 +1,429 @@
|
||||
name: Auto-promote :latest after main image build
|
||||
|
||||
# Retags `ghcr.io/molecule-ai/{platform,platform-tenant}:staging-<sha>`
|
||||
# → `:latest` after either the image build or E2E completes on a `main`
|
||||
# push, gated on E2E Staging SaaS not being red for that SHA.
|
||||
#
|
||||
# Why two triggers:
|
||||
#
|
||||
# `publish-workspace-server-image` and `e2e-staging-saas` are both
|
||||
# paths-filtered, but with DIFFERENT path sets:
|
||||
#
|
||||
# publish-workspace-server-image:
|
||||
# workspace-server/**, canvas/**, manifest.json
|
||||
#
|
||||
# e2e-staging-saas (full lifecycle):
|
||||
# workspace-server/internal/handlers/{registry,workspace_provision,
|
||||
# a2a_proxy}.go, workspace-server/internal/middleware/**,
|
||||
# workspace-server/internal/provisioner/**, tests/e2e/test_staging_full_saas.sh
|
||||
#
|
||||
# The E2E set is a strict SUBSET of the publish set. So:
|
||||
# - canvas/** changes → publish fires, E2E does not
|
||||
# - workspace-server/cmd/** changes → publish fires, E2E does not
|
||||
# - workspace-server/internal/sweep/** → publish fires, E2E does not
|
||||
#
|
||||
# The previous version triggered ONLY on E2E completion, which meant
|
||||
# non-E2E-path changes (canvas, cmd, sweep, etc.) rebuilt the image
|
||||
# but never advanced `:latest`. Result: as of 2026-04-28 this workflow
|
||||
# had run zero times since merge despite eight main pushes — `:latest`
|
||||
# was ~7 hours / 9 PRs behind main with no human realising. See
|
||||
# `molecule-core` Slack discussion 2026-04-28.
|
||||
#
|
||||
# Adding `publish-workspace-server-image` as a second trigger closes
|
||||
# the gap: any image rebuild on main eligibly advances `:latest`.
|
||||
#
|
||||
# Why E2E remains a kill-switch (not the trigger):
|
||||
#
|
||||
# When E2E DID run for this SHA and ended red, we abort — `:latest`
|
||||
# stays on the prior known-good digest. When E2E didn't run (paths
|
||||
# filtered out), we proceed: pre-merge gates already validated this
|
||||
# SHA on staging via auto-promote-staging requiring CI + E2E Canvas +
|
||||
# E2E API + CodeQL all green. Image content for non-E2E-paths
|
||||
# (canvas, cmd, sweep) is exercised by those staging gates.
|
||||
#
|
||||
# Why `main` only:
|
||||
#
|
||||
# `:latest` is what prod tenants pull. We only want SHAs that have
|
||||
# reached main (via auto-promote-staging) to advance `:latest`.
|
||||
# Triggering on staging would let a staging-only revert advance
|
||||
# `:latest` to a SHA that never reaches main, breaking the "production
|
||||
# runs what's on main" invariant.
|
||||
#
|
||||
# Idempotency:
|
||||
#
|
||||
# When a SHA touches paths that match BOTH publish and E2E, both
|
||||
# workflows fire and complete. Both trigger this workflow on
|
||||
# completion → two runs race. Both retag `:staging-<sha>` →
|
||||
# `:latest`. crane tag is idempotent (re-tagging the same digest is a
|
||||
# no-op), so the second run is harmless. concurrency group serializes
|
||||
# them anyway.
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- 'E2E Staging SaaS (full lifecycle)'
|
||||
- 'publish-workspace-server-image'
|
||||
types: [completed]
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
sha:
|
||||
description: 'Short sha to promote (override; defaults to upstream workflow_run head_sha)'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
# Serialize promotes per-SHA so the publish+E2E both-fired race lands
|
||||
# cleanly. Different SHAs can promote in parallel.
|
||||
group: auto-promote-latest-${{ github.event.workflow_run.head_sha || github.event.inputs.sha || github.sha }}
|
||||
cancel-in-progress: false
|
||||
|
||||
env:
|
||||
IMAGE_NAME: ghcr.io/molecule-ai/platform
|
||||
TENANT_IMAGE_NAME: ghcr.io/molecule-ai/platform-tenant
|
||||
|
||||
jobs:
|
||||
promote:
|
||||
# Proceed if upstream succeeded OR manual dispatch. Upstream-failure
|
||||
# paths are filtered here; the E2E-was-red kill-switch lives in the
|
||||
# gate-check step below (covers the case where upstream is publish
|
||||
# success but E2E for the same SHA failed).
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Compute short sha
|
||||
id: sha
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -n "${{ github.event.inputs.sha }}" ]; then
|
||||
FULL="${{ github.event.inputs.sha }}"
|
||||
else
|
||||
FULL="${{ github.event.workflow_run.head_sha }}"
|
||||
fi
|
||||
echo "short=${FULL:0:7}" >> "$GITHUB_OUTPUT"
|
||||
echo "full=${FULL}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Gate — E2E Staging SaaS state for this SHA
|
||||
# When upstream IS E2E success, we know it's green (filtered by
|
||||
# the job-level `if` already). When upstream is publish, look up
|
||||
# E2E state for the same SHA. Four buckets:
|
||||
#
|
||||
# - completed/success: E2E confirmed safe → proceed
|
||||
# - completed/failure|cancelled|timed_out: E2E found a
|
||||
# regression → ABORT (exit 1), `:latest` stays put
|
||||
# - in_progress|queued|requested: E2E is RACING with publish
|
||||
# for a runtime-touching SHA. publish typically completes
|
||||
# ~5-10min before E2E (~10-15min). If we promote on the
|
||||
# publish signal here, a later E2E failure can't roll back
|
||||
# `:latest` — it'd already be wrongly advanced. So we DEFER:
|
||||
# skip subsequent steps (proceed=false) and let E2E's own
|
||||
# completion event re-fire this workflow, which then takes
|
||||
# the upstream-is-E2E path. exit 0 so the run shows as
|
||||
# success rather than a noisy fake-failure.
|
||||
# - none/none: E2E was paths-filtered out for this SHA (the
|
||||
# change touched canvas/cmd/sweep/etc. — paths covered by
|
||||
# publish but not by E2E). pre-merge gates on staging
|
||||
# already validated this SHA → proceed.
|
||||
#
|
||||
# Manual dispatch skips this check — operator override.
|
||||
id: gate
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
SHA: ${{ steps.sha.outputs.full }}
|
||||
UPSTREAM_NAME: ${{ github.event.workflow_run.name }}
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Manual dispatch — skipping E2E gate (operator override)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$UPSTREAM_NAME" = "E2E Staging SaaS (full lifecycle)" ]; then
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Upstream is E2E itself (success per job-level if) — gate trivially satisfied"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Upstream is publish-workspace-server-image. Check E2E state.
|
||||
# The jq filter must defend against TWO empty cases that gh
|
||||
# CLI emits indistinguishably:
|
||||
# 1. gh exits non-zero (network blip, auth issue) → handled
|
||||
# by the `|| echo "none/none"` fallback below.
|
||||
# 2. gh exits zero but returns `[]` (no E2E run on this
|
||||
# main SHA — the common case for canvas-only / cmd-only
|
||||
# / sweep-only changes whose paths don't trigger E2E).
|
||||
# Without `(.[0] // {})`, jq sees `null` and emits
|
||||
# "null/none" — which the case statement below has no
|
||||
# branch for, so it falls into *) → exit 1.
|
||||
# Surfaced 2026-04-30 the first time the App-token chain
|
||||
# (#2389) actually fired auto-promote-on-e2e from a publish
|
||||
# upstream — every prior run was E2E-upstream which
|
||||
# short-circuits before this gate.
|
||||
RESULT=$(gh run list \
|
||||
--repo "$REPO" \
|
||||
--workflow e2e-staging-saas.yml \
|
||||
--branch main \
|
||||
--commit "$SHA" \
|
||||
--limit 1 \
|
||||
--json status,conclusion \
|
||||
--jq '(.[0] // {}) | "\(.status // "none")/\(.conclusion // "none")"' \
|
||||
2>/dev/null || echo "none/none")
|
||||
|
||||
echo "E2E Staging SaaS for ${SHA:0:7}: $RESULT"
|
||||
|
||||
case "$RESULT" in
|
||||
completed/success)
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::E2E green for this SHA — proceeding with promote"
|
||||
;;
|
||||
completed/failure|completed/timed_out)
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❌ Auto-promote aborted — E2E Staging SaaS failed"
|
||||
echo
|
||||
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\`"
|
||||
echo "\`:latest\` stays on the prior known-good digest."
|
||||
echo
|
||||
echo "If the failure was a flake, manually dispatch this workflow with the same sha to override."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
completed/cancelled)
|
||||
# cancelled ≠ failure. Per-SHA concurrency cancels older E2E
|
||||
# runs when a newer push lands (memory:
|
||||
# feedback_concurrency_group_per_sha) — the newer SHA will
|
||||
# have its own E2E + promote chain. Treat the same as
|
||||
# in_progress: defer without aborting, let the next E2E run
|
||||
# promote when it lands.
|
||||
#
|
||||
# Caught 2026-05-05 02:03 on sha 31f9a5e — auto-promote
|
||||
# blocked the whole chain because this case fell through to
|
||||
# exit 1 instead of clean defer.
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ⏭ Auto-promote deferred — E2E Staging SaaS was cancelled"
|
||||
echo
|
||||
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\`"
|
||||
echo "Likely per-SHA concurrency (newer push superseded this E2E run)."
|
||||
echo "The newer SHA's E2E will fire its own promote when it lands."
|
||||
echo "If you need this specific SHA promoted, manually dispatch."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
;;
|
||||
in_progress/*|queued/*|requested/*|waiting/*|pending/*)
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ⏳ Auto-promote deferred — E2E Staging SaaS still running"
|
||||
echo
|
||||
echo "Publish completed before E2E for \`${SHA:0:7}\` (state: \`$RESULT\`)."
|
||||
echo "Skipping retag here — E2E's own completion event will re-fire this workflow."
|
||||
echo "If E2E ends green, that run promotes \`:latest\`. If red, it aborts."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
;;
|
||||
none/none)
|
||||
echo "proceed=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::E2E paths-filtered out for this SHA — pre-merge staging gates carry"
|
||||
;;
|
||||
*)
|
||||
echo "proceed=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❓ Auto-promote aborted — unexpected E2E state"
|
||||
echo
|
||||
echo "E2E Staging SaaS for \`${SHA:0:7}\`: \`$RESULT\` (unhandled)"
|
||||
echo "Manual investigation needed; re-dispatch with the same sha once resolved."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- if: steps.gate.outputs.proceed == 'true'
|
||||
uses: imjasonh/setup-crane@6da1ae018866400525525ce74ff892880c099987 # v0.5
|
||||
|
||||
- name: GHCR login
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
||||
crane auth login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: Verify :staging-<sha> exists for both images
|
||||
# Better to fail fast with a clear message than to half-tag
|
||||
# (platform retagged but platform-tenant missing → tenants pull
|
||||
# a stale image).
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for img in "${IMAGE_NAME}" "${TENANT_IMAGE_NAME}"; do
|
||||
tag="${img}:staging-${{ steps.sha.outputs.short }}"
|
||||
if ! crane manifest "$tag" >/dev/null 2>&1; then
|
||||
echo "::error::Missing tag: $tag"
|
||||
echo "::error::publish-workspace-server-image must complete on this SHA before auto-promote can retag :latest."
|
||||
exit 1
|
||||
fi
|
||||
echo " ok: $tag exists"
|
||||
done
|
||||
|
||||
- name: Ancestry check — refuse to promote :latest backwards
|
||||
# #2244: workflow_run completions arrive in arbitrary order. If
|
||||
# SHA-A and SHA-B both reach main within ~10 min and SHA-B's E2E
|
||||
# completes before SHA-A's, this workflow can fire for SHA-A
|
||||
# AFTER it already promoted SHA-B → :latest goes backwards. The
|
||||
# orphan-reconciler "next run corrects it" doesn't apply: there's
|
||||
# no auto-corrective re-promote, :latest stays wrong until the
|
||||
# next main push lands.
|
||||
#
|
||||
# Detection: read current :latest's `org.opencontainers.image.revision`
|
||||
# label (set by publish-workspace-server-image.yml at build time)
|
||||
# and ask the GitHub compare API whether the candidate SHA is
|
||||
# ahead-of / identical-to / behind / diverged-from current.
|
||||
# Hard-fail on `behind` and `diverged` per the approved design —
|
||||
# silent-bypass is the class we're moving away from. Workflow
|
||||
# goes red, oncall sees it, operator decides how to recover
|
||||
# (manual dispatch with the right SHA, force-promote, etc.).
|
||||
#
|
||||
# Manual dispatch skips this check — operator override semantics
|
||||
# match the gate-check step above.
|
||||
#
|
||||
# Backward-compat: when current :latest carries no revision
|
||||
# label (legacy image pre-publish-with-label), skip-with-warning.
|
||||
# All :latest images on main are post-label as of 2026-04-29, so
|
||||
# this branch will be dead within 90 days; remove then.
|
||||
if: steps.gate.outputs.proceed == 'true' && github.event_name != 'workflow_dispatch'
|
||||
id: ancestry
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
TARGET_SHA: ${{ steps.sha.outputs.full }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Read the current :latest config and pull the revision label.
|
||||
# `crane config` returns the OCI image config blob (not the manifest);
|
||||
# labels live under `.config.Labels`. `// empty` makes jq return ""
|
||||
# rather than the literal "null" so the test below works.
|
||||
CURRENT_REVISION=$(crane config "${IMAGE_NAME}:latest" 2>/dev/null \
|
||||
| jq -r '.config.Labels["org.opencontainers.image.revision"] // empty' \
|
||||
|| true)
|
||||
|
||||
if [ -z "$CURRENT_REVISION" ]; then
|
||||
echo "decision=skip-no-label" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ⚠ Ancestry check skipped — current :latest has no revision label"
|
||||
echo
|
||||
echo "Likely a legacy image built before \`org.opencontainers.image.revision\` was set."
|
||||
echo "Falling through to retag. After all \`:latest\` images are post-label (TODO 90 days), this branch is dead and should be removed."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::warning::Current :latest carries no revision label — skipping ancestry check (legacy image)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$CURRENT_REVISION" = "$TARGET_SHA" ]; then
|
||||
echo "decision=identical" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice:::latest already at ${TARGET_SHA:0:7} — retag will be a no-op"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Ask GitHub which side of the merge graph TARGET_SHA sits on
|
||||
# relative to CURRENT_REVISION. Returns one of: ahead | identical
|
||||
# | behind | diverged. Network or auth errors collapse to "error"
|
||||
# via the explicit fallback so the case below always matches.
|
||||
STATUS=$(gh api \
|
||||
"repos/${REPO}/compare/${CURRENT_REVISION}...${TARGET_SHA}" \
|
||||
--jq '.status' 2>/dev/null || echo "error")
|
||||
|
||||
echo "ancestry compare ${CURRENT_REVISION:0:7} → ${TARGET_SHA:0:7}: $STATUS"
|
||||
|
||||
case "$STATUS" in
|
||||
ahead)
|
||||
echo "decision=ahead" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Target ${TARGET_SHA:0:7} is ahead of current :latest (${CURRENT_REVISION:0:7}) — proceeding with retag"
|
||||
;;
|
||||
identical)
|
||||
echo "decision=identical" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Target identical to :latest — retag will be a no-op"
|
||||
;;
|
||||
behind)
|
||||
echo "decision=behind" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❌ Auto-promote refused — target is BEHIND current :latest"
|
||||
echo
|
||||
echo "| Field | Value |"
|
||||
echo "|---|---|"
|
||||
echo "| Target SHA | \`$TARGET_SHA\` |"
|
||||
echo "| Current :latest revision | \`$CURRENT_REVISION\` |"
|
||||
echo "| GitHub compare status | \`behind\` |"
|
||||
echo
|
||||
echo "This guard catches the workflow_run-completion-order race (#2244):"
|
||||
echo "two rapid main pushes whose E2Es complete out-of-order can otherwise"
|
||||
echo "promote \`:latest\` backwards. \`:latest\` stays on \`${CURRENT_REVISION:0:7}\`."
|
||||
echo
|
||||
echo "**Recovery:** if this is a legitimate revert that should land on \`:latest\`,"
|
||||
echo "manually dispatch this workflow with the target sha as input — the manual-dispatch"
|
||||
echo "path skips the ancestry check (operator override)."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
diverged)
|
||||
echo "decision=diverged" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❓ Auto-promote refused — history diverged"
|
||||
echo
|
||||
echo "| Field | Value |"
|
||||
echo "|---|---|"
|
||||
echo "| Target SHA | \`$TARGET_SHA\` |"
|
||||
echo "| Current :latest revision | \`$CURRENT_REVISION\` |"
|
||||
echo "| GitHub compare status | \`diverged\` |"
|
||||
echo
|
||||
echo "Likely cause: force-push rewrote main's history, leaving the previous"
|
||||
echo "\`:latest\` revision orphaned. Needs human review before \`:latest\` advances."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
error|*)
|
||||
echo "decision=error" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ❌ Auto-promote aborted — ancestry-check API error"
|
||||
echo
|
||||
echo "\`gh api repos/${REPO}/compare/${CURRENT_REVISION}...${TARGET_SHA}\` returned unexpected status: \`$STATUS\`"
|
||||
echo
|
||||
echo "Manual dispatch with the target sha bypasses this check."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Retag platform :staging-<sha> → :latest
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
crane tag "${IMAGE_NAME}:staging-${{ steps.sha.outputs.short }}" latest
|
||||
|
||||
- name: Retag tenant :staging-<sha> → :latest
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
crane tag "${TENANT_IMAGE_NAME}:staging-${{ steps.sha.outputs.short }}" latest
|
||||
|
||||
- name: Summary
|
||||
if: steps.gate.outputs.proceed == 'true'
|
||||
run: |
|
||||
{
|
||||
echo "## :latest promoted to ${{ steps.sha.outputs.short }}"
|
||||
echo
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "- Trigger: manual dispatch"
|
||||
else
|
||||
echo "- Upstream: \`${{ github.event.workflow_run.name }}\` ([run](${{ github.event.workflow_run.html_url }}))"
|
||||
fi
|
||||
echo "- platform:staging-${{ steps.sha.outputs.short }} → :latest"
|
||||
echo "- platform-tenant:staging-${{ steps.sha.outputs.short }} → :latest"
|
||||
echo
|
||||
echo "Tenant fleet auto-pulls within 5 min via IMAGE_AUTO_REFRESH=true."
|
||||
echo "Force immediate fanout: dispatch redeploy-tenants-on-main.yml."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
434
.github/workflows/auto-promote-staging.yml
vendored
Normal file
434
.github/workflows/auto-promote-staging.yml
vendored
Normal file
@ -0,0 +1,434 @@
|
||||
name: Auto-promote staging → main
|
||||
|
||||
# Fires after any of the staging-branch quality gates complete. When ALL
|
||||
# required gates are green on the same staging SHA, opens (or re-uses)
|
||||
# a PR `staging → main` and enables auto-merge so the merge queue lands
|
||||
# it. Closes the gap that historically let features sit on staging for
|
||||
# weeks waiting for a bulk promotion PR (see molecule-core#1496 for the
|
||||
# 1172-commit example).
|
||||
#
|
||||
# 2026-04-28 rewrite (PR #142): the previous version did a direct
|
||||
# `git merge --ff-only origin staging && git push origin main`. That
|
||||
# breaks against main's branch-protection ruleset, which requires
|
||||
# status checks "set by the expected GitHub apps" — direct pushes
|
||||
# can't satisfy that condition (only PR merges through the queue can).
|
||||
# The workflow was failing every tick with:
|
||||
# remote: error: GH006: Protected branch update failed for refs/heads/main.
|
||||
# remote: - Required status checks ... were not set by the expected GitHub apps.
|
||||
# Fix: mirror the PR-based pattern from auto-sync-main-to-staging.yml
|
||||
# (the reverse-direction sync, fixed in #2234 for the same reason).
|
||||
# Both directions now use the same merge-queue path that humans use,
|
||||
# no special-case bypass.
|
||||
#
|
||||
# Safety model:
|
||||
# - Runs ONLY on workflow_run events for the staging branch.
|
||||
# - Requires EVERY named gate workflow to have the same head_sha and
|
||||
# all be `conclusion == success`. If any of them is red, skipped,
|
||||
# cancelled, or pending, we abort (stay on the current main).
|
||||
# - The PR base=main head=staging path lets GitHub itself enforce
|
||||
# branch protection. If main has diverged from staging or required
|
||||
# checks aren't satisfied, the merge queue declines the PR — no
|
||||
# need for a manual ff-only ancestry check here.
|
||||
# - Loop safety: the auto-sync-main-to-staging workflow fires when
|
||||
# main lands the auto-promote PR, but its merge into staging is by
|
||||
# GITHUB_TOKEN which doesn't trigger downstream workflow_run events
|
||||
# (GitHub Actions safety). So this workflow doesn't re-fire from
|
||||
# its own promote landing.
|
||||
#
|
||||
# Toggle via repo variable AUTO_PROMOTE_ENABLED (true/unset). When
|
||||
# unset, the workflow logs what it would have done but doesn't open
|
||||
# the PR — useful for dry-running the gate logic without surfacing
|
||||
# a noisy PR while staging CI is still flaky.
|
||||
#
|
||||
# **One-time repo setting (load-bearing):** this workflow opens the
|
||||
# staging→main PR via `gh pr create` using the default GITHUB_TOKEN.
|
||||
# Since GitHub's 2022 default change, that token cannot create or
|
||||
# approve PRs unless the repo opts in. The toggle is at:
|
||||
#
|
||||
# Settings → Actions → General → Workflow permissions
|
||||
# → ✅ Allow GitHub Actions to create and approve pull requests
|
||||
#
|
||||
# Without it, every workflow_run fails with:
|
||||
#
|
||||
# pull request create failed: GraphQL: GitHub Actions is not
|
||||
# permitted to create or approve pull requests (createPullRequest)
|
||||
#
|
||||
# Observed 2026-04-29 01:43 UTC blocking promotion of fcd87b9 (PRs
|
||||
# #2248 + #2249); manually bridged via PR #2252. Re-check this
|
||||
# setting if auto-promote starts failing with createPullRequest
|
||||
# errors after a repo or org admin change.
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows:
|
||||
- CI
|
||||
- E2E Staging Canvas (Playwright)
|
||||
- E2E API Smoke Test
|
||||
- CodeQL
|
||||
types: [completed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: "Force promote even when AUTO_PROMOTE_ENABLED is unset (manual override)"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
# actions: write is needed by the post-merge dispatch tail step
|
||||
# (#2358 / #2357) — `gh workflow run publish-workspace-server-image.yml`
|
||||
# POSTs to /actions/workflows/.../dispatches which requires this scope.
|
||||
# Without it the call 403s and the publish/canary/redeploy chain still
|
||||
# doesn't run on staging→main promotions, undoing #2358.
|
||||
actions: write
|
||||
|
||||
# Serialize auto-promote runs. Multiple staging gate completions can land
|
||||
# in quick succession (CI + E2E + CodeQL all finish within seconds of
|
||||
# each other on a green PR) — without this, two parallel runs both:
|
||||
# 1. Open / re-use the same promote PR.
|
||||
# 2. Both call `gh pr merge --auto` (idempotent — fine).
|
||||
# 3. Both poll for the same mergedAt and both `gh workflow run` publish
|
||||
# → 2× redundant publish builds racing for the same `:staging-latest`
|
||||
# retag, and 2× canary-verify chains.
|
||||
# cancel-in-progress: false because we don't want a brand-new run to kill
|
||||
# a polling-tail that's about to dispatch — the polling tail's 30 min cap
|
||||
# is the right backstop, not workflow-level cancel.
|
||||
concurrency:
|
||||
group: auto-promote-staging
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
check-all-gates-green:
|
||||
# Only consider staging pushes. PRs into staging don't promote.
|
||||
if: >
|
||||
(github.event_name == 'workflow_run' &&
|
||||
github.event.workflow_run.head_branch == 'staging' &&
|
||||
github.event.workflow_run.event == 'push')
|
||||
|| github.event_name == 'workflow_dispatch'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
all_green: ${{ steps.gates.outputs.all_green }}
|
||||
head_sha: ${{ steps.gates.outputs.head_sha }}
|
||||
steps:
|
||||
# Skip empty-tree promotes (the perpetual auto-promote↔auto-sync cycle
|
||||
# observed 2026-05-03). Sequence: auto-promote merges via the staging
|
||||
# merge-queue's MERGE strategy, creating a merge commit on main that
|
||||
# staging doesn't have. auto-sync then merges main back into staging
|
||||
# via another merge commit (the queue's MERGE strategy applies on
|
||||
# the staging side too, even when the workflow's local FF would
|
||||
# have sufficed). Now staging has a new merge-commit SHA whose
|
||||
# tree == main's tree — but auto-promote sees "staging ahead of
|
||||
# main by 1" and opens YET another empty promote PR. Each round
|
||||
# costs ~30-40 min wallclock, ~2 manual approvals, and burns a
|
||||
# full CodeQL Go run (~15 min). Without this guard the cycle
|
||||
# repeats indefinitely.
|
||||
#
|
||||
# Long-term fix is to switch the merge_queue ruleset's
|
||||
# `merge_method` away from MERGE so FF-able PRs land cleanly,
|
||||
# but that's a broader change affecting every staging PR's
|
||||
# commit shape. This guard is the one-line surgical fix that
|
||||
# breaks the cycle without touching merge-queue config.
|
||||
#
|
||||
# Fail-open: if `git diff` errors for any reason, fall through
|
||||
# to the gate check (preserve existing behavior). Only skip
|
||||
# when the diff is DEFINITIVELY empty.
|
||||
- name: Checkout for tree-diff check
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: staging
|
||||
- name: Skip if staging tree == main tree (perpetual-cycle break)
|
||||
id: tree-diff
|
||||
env:
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
run: |
|
||||
set -eu
|
||||
git fetch origin main --depth=50 || { echo "::warning::git fetch main failed — proceeding (fail-open)"; exit 0; }
|
||||
# Compare staging tip's tree against main's tree. `git diff
|
||||
# --quiet` exits 0 if no differences, 1 if there are.
|
||||
if git diff --quiet origin/main "$HEAD_SHA" -- 2>/dev/null; then
|
||||
{
|
||||
echo "## ⏭ Skipped — no code to promote"
|
||||
echo
|
||||
echo "staging tip (\`${HEAD_SHA:0:8}\`) and \`main\` have identical trees."
|
||||
echo "This is the auto-promote↔auto-sync merge-commit cycle: staging has a"
|
||||
echo "new SHA (a sync-back merge commit) but the underlying file tree is"
|
||||
echo "already on main, so there's no real code to ship."
|
||||
echo
|
||||
echo "Skipping to avoid opening an empty promote PR. Cycle terminates here."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::notice::auto-promote: staging tree == main tree — no code to promote, skipping"
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "skip=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Check all required gates on this SHA
|
||||
if: steps.tree-diff.outputs.skip != 'true'
|
||||
id: gates
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
REPO: ${{ github.repository }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Required gate workflow files. Use file paths (relative to
|
||||
# .github/workflows/) rather than display names because:
|
||||
#
|
||||
# 1. `gh run list --workflow=<name>` is ambiguous when two
|
||||
# workflows have the same `name:` — observed 2026-04-28
|
||||
# with "CodeQL" matching both `codeql.yml` (explicit) and
|
||||
# GitHub's UI-configured Code-quality default setup
|
||||
# (internal "codeql"). gh CLI returns "could not resolve
|
||||
# to a unique workflow" → empty result → gate evaluated
|
||||
# as missing/none → auto-promote dead-locked despite all
|
||||
# checks actually passing.
|
||||
#
|
||||
# 2. File paths are the unique identifier for workflows;
|
||||
# `name:` is just a display string and can collide.
|
||||
#
|
||||
# When adding/removing a gate, update this list AND the
|
||||
# branch-protection required-checks list (which uses check-run
|
||||
# display names, not workflow names; the two are decoupled and
|
||||
# should be kept in sync manually).
|
||||
GATES=(
|
||||
"ci.yml"
|
||||
"e2e-staging-canvas.yml"
|
||||
"e2e-api.yml"
|
||||
"codeql.yml"
|
||||
)
|
||||
|
||||
echo "head_sha=${HEAD_SHA}" >> "$GITHUB_OUTPUT"
|
||||
echo "Checking gates on SHA ${HEAD_SHA}"
|
||||
|
||||
ALL_GREEN=true
|
||||
for gate in "${GATES[@]}"; do
|
||||
# Query the most recent run of this workflow on this SHA.
|
||||
# event=push to avoid picking up PR runs. branch=staging to
|
||||
# guard against someone dispatching the gate on a non-staging
|
||||
# branch at the same SHA.
|
||||
RESULT=$(gh run list \
|
||||
--repo "$REPO" \
|
||||
--workflow "$gate" \
|
||||
--branch staging \
|
||||
--event push \
|
||||
--commit "$HEAD_SHA" \
|
||||
--limit 1 \
|
||||
--json status,conclusion \
|
||||
--jq '.[0] | "\(.status)/\(.conclusion // "none")"' \
|
||||
2>/dev/null || echo "missing/none")
|
||||
|
||||
echo " $gate → $RESULT"
|
||||
|
||||
# Only completed/success counts. completed/failure or
|
||||
# in_progress/anything or no record at all = abort.
|
||||
if [ "$RESULT" != "completed/success" ]; then
|
||||
ALL_GREEN=false
|
||||
fi
|
||||
done
|
||||
|
||||
echo "all_green=${ALL_GREEN}" >> "$GITHUB_OUTPUT"
|
||||
if [ "$ALL_GREEN" != "true" ]; then
|
||||
echo "::notice::auto-promote: not all gates are green on ${HEAD_SHA} — staying on current main"
|
||||
fi
|
||||
|
||||
promote:
|
||||
needs: check-all-gates-green
|
||||
if: needs.check-all-gates-green.outputs.all_green == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check rollout gate
|
||||
env:
|
||||
AUTO_PROMOTE_ENABLED: ${{ vars.AUTO_PROMOTE_ENABLED }}
|
||||
FORCE_INPUT: ${{ github.event.inputs.force }}
|
||||
run: |
|
||||
set -eu
|
||||
# Repo variable AUTO_PROMOTE_ENABLED=true flips this on. While
|
||||
# it's unset, the workflow dry-runs (logs what it would have
|
||||
# done) but doesn't open the promote PR. Set the variable in
|
||||
# Settings → Secrets and variables → Actions → Variables.
|
||||
if [ "${AUTO_PROMOTE_ENABLED:-}" != "true" ] && [ "${FORCE_INPUT:-false}" != "true" ]; then
|
||||
{
|
||||
echo "## ⏸ Auto-promote disabled"
|
||||
echo
|
||||
echo "Repo variable \`AUTO_PROMOTE_ENABLED\` is not set to \`true\`."
|
||||
echo "All gates are green on staging; would have opened a promote PR to \`main\`."
|
||||
echo
|
||||
echo "To enable: Settings → Secrets and variables → Actions → Variables → \`AUTO_PROMOTE_ENABLED=true\`."
|
||||
echo "To test once manually: workflow_dispatch with \`force=true\`."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
echo "::notice::auto-promote disabled — dry run only"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Mint the App token BEFORE the promote-PR step so the auto-merge
|
||||
# call can use it. GITHUB_TOKEN-initiated merges suppress the
|
||||
# downstream `push` event on main, breaking the
|
||||
# publish-workspace-server-image → canary-verify → redeploy-tenants
|
||||
# chain (issue #2357). Using the App token here means the
|
||||
# merge-queue-landed merge IS able to fire the cascade naturally;
|
||||
# the polling tail below stays as defense-in-depth.
|
||||
- name: Mint App token for promote-PR + downstream dispatch
|
||||
if: ${{ vars.AUTO_PROMOTE_ENABLED == 'true' || github.event.inputs.force == 'true' }}
|
||||
id: app-token
|
||||
uses: actions/create-github-app-token@1b10c78c7865c340bc4f6099eb2f838309f1e8c3 # v3.1.1
|
||||
with:
|
||||
app-id: ${{ secrets.MOLECULE_AI_APP_ID }}
|
||||
private-key: ${{ secrets.MOLECULE_AI_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Open (or reuse) staging → main promote PR + enable auto-merge
|
||||
if: ${{ vars.AUTO_PROMOTE_ENABLED == 'true' || github.event.inputs.force == 'true' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
TARGET_SHA: ${{ needs.check-all-gates-green.outputs.head_sha }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Look for an existing open promote PR (idempotent on re-run
|
||||
# of the workflow). The PR's head IS the staging branch — the
|
||||
# whole point is "advance main to staging's tip", so we don't
|
||||
# need a per-SHA branch like auto-sync-main-to-staging uses.
|
||||
PR_NUM=$(gh pr list --repo "$REPO" \
|
||||
--base main --head staging --state open \
|
||||
--json number --jq '.[0].number // ""')
|
||||
|
||||
if [ -z "$PR_NUM" ]; then
|
||||
TITLE="staging → main: auto-promote ${TARGET_SHA:0:7}"
|
||||
BODY_FILE=$(mktemp)
|
||||
cat > "$BODY_FILE" <<EOFBODY
|
||||
Automated promotion of \`staging\` (\`${TARGET_SHA:0:8}\`) to \`main\`. All required staging gates green at this SHA: CI, E2E Staging Canvas, E2E API Smoke, CodeQL.
|
||||
|
||||
This PR is auto-generated by \`.github/workflows/auto-promote-staging.yml\` whenever every required gate completes green on the same staging SHA. It exists because main's branch protection requires status checks "set by the expected GitHub apps" — direct \`git push\` from a workflow can't satisfy that, only PR merges through the queue can.
|
||||
|
||||
Merge queue lands this; no human action needed unless gates fail. Reverse-direction sync (the merge commit on main → staging) is handled by \`auto-sync-main-to-staging.yml\`.
|
||||
EOFBODY
|
||||
PR_URL=$(gh pr create --repo "$REPO" \
|
||||
--base main --head staging \
|
||||
--title "$TITLE" \
|
||||
--body-file "$BODY_FILE")
|
||||
PR_NUM=$(echo "$PR_URL" | grep -oE '[0-9]+$' | tail -1)
|
||||
rm -f "$BODY_FILE"
|
||||
echo "::notice::Opened PR #${PR_NUM}"
|
||||
else
|
||||
echo "::notice::Re-using existing promote PR #${PR_NUM}"
|
||||
fi
|
||||
|
||||
# Enable auto-merge — the merge queue picks it up once
|
||||
# required gates are green on the merge_group ref.
|
||||
if ! gh pr merge "$PR_NUM" --repo "$REPO" --auto --merge 2>&1; then
|
||||
echo "::warning::Failed to enable auto-merge on PR #${PR_NUM} — operator may need to merge manually."
|
||||
fi
|
||||
|
||||
{
|
||||
echo "## ✅ Auto-promote PR opened"
|
||||
echo
|
||||
echo "- Source: staging at \`${TARGET_SHA:0:8}\`"
|
||||
echo "- PR: #${PR_NUM}"
|
||||
echo
|
||||
echo "Merge queue lands the PR once required gates are green; no human action needed unless gates fail."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
# Hand the PR number to the next step so we can dispatch the
|
||||
# tenant-redeploy chain after the merge queue lands the merge.
|
||||
echo "promote_pr_num=${PR_NUM}" >> "$GITHUB_OUTPUT"
|
||||
id: promote_pr
|
||||
|
||||
# The App token minted above (before the promote-PR step) is
|
||||
# also used by the polling tail below. Defense-in-depth: with
|
||||
# the merge-queue-landed merge now using the App token, the
|
||||
# main-branch push event SHOULD fire the publish/canary/redeploy
|
||||
# cascade naturally — but if for any reason it doesn't (e.g. an
|
||||
# unrelated event-suppression edge case), the explicit dispatches
|
||||
# below still wake the chain.
|
||||
- name: Wait for promote merge, then dispatch publish + redeploy (#2357)
|
||||
# Defense-in-depth dispatch. With the auto-merge call above
|
||||
# now using the App token (this commit), the merge-queue-landed
|
||||
# merge SHOULD fire publish-workspace-server-image naturally
|
||||
# via on:push:[main] — App-token-initiated pushes DO trigger
|
||||
# workflow_run cascades, unlike GITHUB_TOKEN-initiated ones
|
||||
# (the documented "no recursion" rule —
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow).
|
||||
#
|
||||
# This explicit dispatch stays as belt-and-suspenders for any
|
||||
# edge case where the natural cascade misfires. If it never
|
||||
# observably fires after this token swap (i.e. the publish
|
||||
# workflow has already started by the time we get here), the
|
||||
# second dispatch is a harmless no-op (publish-workspace-server-image
|
||||
# has its own concurrency group that dedupes).
|
||||
#
|
||||
# See PR for #2357: pre-fix the merge action was via
|
||||
# GITHUB_TOKEN, suppressing the cascade and forcing this tail
|
||||
# to be the SOLE chain trigger. With the auto-merge token swap
|
||||
# the tail becomes redundant in the happy path; keep until
|
||||
# we've observed >=10 successful natural cascades, then drop.
|
||||
if: steps.promote_pr.outputs.promote_pr_num != ''
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
PR_NUM: ${{ steps.promote_pr.outputs.promote_pr_num }}
|
||||
run: |
|
||||
# Poll for merge — max 30 min (60 × 30s). The merge queue
|
||||
# typically lands within 5-10 min when gates are green. Break
|
||||
# early if the PR is closed without merging (operator action,
|
||||
# gates flipped red post-approval, branch-protection rejection)
|
||||
# so we don't tie up a runner for the full 30 min on a dead PR.
|
||||
MERGED=""
|
||||
STATE=""
|
||||
for _ in $(seq 1 60); do
|
||||
VIEW=$(gh pr view "$PR_NUM" --repo "$REPO" --json mergedAt,state)
|
||||
MERGED=$(echo "$VIEW" | jq -r '.mergedAt // ""')
|
||||
STATE=$(echo "$VIEW" | jq -r '.state // ""')
|
||||
if [ -n "$MERGED" ] && [ "$MERGED" != "null" ]; then
|
||||
echo "::notice::Promote PR #${PR_NUM} merged at ${MERGED}"
|
||||
break
|
||||
fi
|
||||
if [ "$STATE" = "CLOSED" ]; then
|
||||
echo "::warning::Promote PR #${PR_NUM} was closed without merging — skipping deploy dispatch."
|
||||
exit 0
|
||||
fi
|
||||
sleep 30
|
||||
done
|
||||
|
||||
if [ -z "$MERGED" ] || [ "$MERGED" = "null" ]; then
|
||||
echo "::warning::Promote PR #${PR_NUM} didn't merge within 30min — skipping deploy dispatch (manually run \`gh workflow run publish-workspace-server-image.yml --ref main\` once it lands)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Dispatch publish on main using the App token. App-initiated
|
||||
# workflow_dispatch DOES propagate the workflow_run cascade,
|
||||
# unlike GITHUB_TOKEN-initiated dispatch.
|
||||
# publish completes → canary-verify chains via workflow_run →
|
||||
# redeploy-tenants-on-main chains via workflow_run + branches:[main].
|
||||
if gh workflow run publish-workspace-server-image.yml \
|
||||
--repo "$REPO" --ref main 2>&1; then
|
||||
echo "::notice::Dispatched publish-workspace-server-image on ref=main as molecule-ai App — canary-verify and redeploy-tenants-on-main will chain via workflow_run."
|
||||
{
|
||||
echo "## 🚀 Tenant redeploy chain dispatched"
|
||||
echo
|
||||
echo "- publish-workspace-server-image (workflow_dispatch on \`main\`, actor: \`molecule-ai[bot]\`)"
|
||||
echo "- canary-verify will chain on completion"
|
||||
echo "- redeploy-tenants-on-main will chain on canary green"
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
else
|
||||
echo "::error::Failed to dispatch publish-workspace-server-image. Run manually: gh workflow run publish-workspace-server-image.yml --ref main"
|
||||
fi
|
||||
|
||||
# ALSO dispatch auto-sync-main-to-staging.yml. Same root cause as
|
||||
# publish above (issue #2357): the merge-queue-initiated push to
|
||||
# main is by GITHUB_TOKEN → no `on: push` triggers fire downstream.
|
||||
# Without this dispatch, every staging→main promote leaves staging
|
||||
# one merge commit BEHIND main, which silently dead-locks the NEXT
|
||||
# promote PR as `mergeStateStatus: BEHIND` because main's
|
||||
# branch-protection has `strict: true`. Verified empirically on
|
||||
# 2026-05-02 against PR #2442 (Phase 2 promote): only the explicit
|
||||
# publish-workspace-server-image dispatch fired on the previous
|
||||
# promote SHA 76c604fb, while auto-sync silently no-op'd, leaving
|
||||
# staging behind for ~24h until manually bridged.
|
||||
if gh workflow run auto-sync-main-to-staging.yml \
|
||||
--repo "$REPO" --ref main 2>&1; then
|
||||
echo "::notice::Dispatched auto-sync-main-to-staging on ref=main as molecule-ai App — staging will absorb the new main merge commit via PR + merge queue."
|
||||
else
|
||||
echo "::error::Failed to dispatch auto-sync-main-to-staging. Run manually: gh workflow run auto-sync-main-to-staging.yml --ref main"
|
||||
fi
|
||||
83
.github/workflows/auto-promote-stale-alarm.yml
vendored
Normal file
83
.github/workflows/auto-promote-stale-alarm.yml
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
name: auto-promote-stale-alarm
|
||||
|
||||
# Hourly cron + on-demand alarm for the silent-block failure mode that
|
||||
# motivated issue #2975:
|
||||
# - The auto-promote-staging.yml workflow opened a PR + armed
|
||||
# auto-merge, but main's branch protection requires a human review
|
||||
# (reviewDecision=REVIEW_REQUIRED). The PR sat BLOCKED with no
|
||||
# surface-up-the-stack for 12+ hours, holding 25 commits hostage
|
||||
# including the Memory v2 redesign and a reno-stars data-loss fix.
|
||||
#
|
||||
# This workflow runs `scripts/check-stale-promote-pr.sh` against the
|
||||
# repo's open auto-promote PRs (base=main head=staging). When a PR has
|
||||
# been BLOCKED on REVIEW_REQUIRED for >4h, it:
|
||||
# 1. Emits a workflow-level warning (visible in run summary + the
|
||||
# Actions UI feed).
|
||||
# 2. Posts a comment on the PR (idempotent — one alarm per PR).
|
||||
#
|
||||
# The detection logic lives in scripts/check-stale-promote-pr.sh so
|
||||
# it's unit-testable with stubbed `gh` (see test-check-stale-promote-pr.sh).
|
||||
# This file is the schedule + invocation surface only — SSOT for the
|
||||
# detector itself.
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Hourly. Cheap (one `gh pr list` + jq), and 1h granularity is
|
||||
# plenty for a 4h staleness threshold — operators see the alarm
|
||||
# within at most 1h of crossing the threshold.
|
||||
- cron: "27 * * * *" # at :27 to dodge the cron herd at :00
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
stale_hours:
|
||||
description: "Hours after which a BLOCKED+REVIEW_REQUIRED PR is stale (default 4)"
|
||||
required: false
|
||||
default: "4"
|
||||
post_comment:
|
||||
description: "Post a comment on stale PRs (default true)"
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write # post comments on stale PRs
|
||||
|
||||
# Serialize so the on-demand and scheduled runs don't double-comment
|
||||
# the same PR. cancel-in-progress=false because the script is idempotent
|
||||
# (existing comment marker prevents dupes), but a scheduled run firing
|
||||
# while a manual one runs would just re-list the same PR set.
|
||||
concurrency:
|
||||
group: auto-promote-stale-alarm
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
scan:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout (need scripts/ only)
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
sparse-checkout: |
|
||||
scripts/check-stale-promote-pr.sh
|
||||
sparse-checkout-cone-mode: false
|
||||
- name: Run stale-PR detector
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
STALE_HOURS: ${{ inputs.stale_hours || '4' }}
|
||||
POST_COMMENT: ${{ inputs.post_comment || 'true' }}
|
||||
run: |
|
||||
# The script's exit code reflects the count of stale PRs.
|
||||
# We don't want a stale finding to fail the workflow run —
|
||||
# the warning + comment are the signal, the green/red is
|
||||
# noise. So convert any non-zero exit to a workflow notice
|
||||
# and exit 0.
|
||||
set +e
|
||||
bash scripts/check-stale-promote-pr.sh
|
||||
rc=$?
|
||||
set -e
|
||||
if [ "$rc" -ne 0 ]; then
|
||||
echo "::notice::Stale PR detector found $rc PR(s) needing attention. See warnings above + comments on the PRs."
|
||||
fi
|
||||
# Always succeed — operator-facing surface is the warning,
|
||||
# not the workflow status.
|
||||
exit 0
|
||||
237
.github/workflows/auto-sync-main-to-staging.yml
vendored
Normal file
237
.github/workflows/auto-sync-main-to-staging.yml
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
name: Auto-sync main → staging
|
||||
|
||||
# Reflects every push to `main` back onto `staging` so the
|
||||
# staging-as-superset-of-main invariant holds.
|
||||
#
|
||||
# Background:
|
||||
#
|
||||
# `auto-promote-staging.yml` advances main via `git merge --ff-only`
|
||||
# + `git push origin main` — that's a clean fast-forward, no merge
|
||||
# commit. But manual merges of `staging → main` PRs through the
|
||||
# GitHub UI / API create a merge commit on main that staging
|
||||
# doesn't have. The next `staging → main` PR then evaluates as
|
||||
# "BEHIND" because staging is missing that merge commit, requiring
|
||||
# a manual `gh pr update-branch` round-trip.
|
||||
#
|
||||
# This happened twice on 2026-04-28 (PRs #2202, #2205, both manual
|
||||
# bridges). Each time the bridge needed update-branch + a re-CI
|
||||
# round before merging. Operationally annoying and avoidable.
|
||||
#
|
||||
# Architecture:
|
||||
#
|
||||
# This repo's `staging` branch is protected by a `merge_queue`
|
||||
# ruleset (id 15500102) that blocks ALL direct pushes — no bypass
|
||||
# even for org admins or the GitHub Actions integration. Direct
|
||||
# `git push origin staging` returns GH013. So instead of pushing
|
||||
# directly, this workflow:
|
||||
#
|
||||
# 1. Checks if main is already in staging's ancestry → no-op.
|
||||
# 2. Creates an `auto-sync/main-<sha>` branch from staging.
|
||||
# 3. Tries `git merge --ff-only origin/main` → if staging hasn't
|
||||
# diverged this is a clean ff.
|
||||
# 4. Otherwise `git merge --no-ff origin/main` to absorb main's
|
||||
# tip while keeping staging's history.
|
||||
# 5. Pushes the auto-sync branch.
|
||||
# 6. Opens a PR (base=staging, head=auto-sync/main-<sha>) and
|
||||
# enables auto-merge so the merge queue lands it.
|
||||
#
|
||||
# This mirrors the path human PRs take through staging — same
|
||||
# rules, same gates, no special-case bypass.
|
||||
#
|
||||
# Loop safety:
|
||||
#
|
||||
# `GITHUB_TOKEN`-authored merges (including the merge queue's land
|
||||
# of the auto-sync PR) do NOT trigger downstream workflow runs
|
||||
# (GitHub Actions safety). So when the auto-sync PR lands on
|
||||
# staging, `auto-promote-staging.yml` is NOT triggered by that
|
||||
# push. The next developer push to staging triggers auto-promote
|
||||
# normally. No loop possible.
|
||||
#
|
||||
# Concurrency:
|
||||
#
|
||||
# Two pushes to main in quick succession (e.g., manual UI merge
|
||||
# immediately followed by auto-promote-staging's ff-merge) could
|
||||
# otherwise open two overlapping auto-sync PRs. The concurrency
|
||||
# group serializes runs; the second waits for the first to exit.
|
||||
# (The first run exits after opening + auto-merge-queueing the PR,
|
||||
# not after the merge actually completes — so multiple PRs can be
|
||||
# open simultaneously, but the merge queue handles them serially.)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
# workflow_dispatch lets:
|
||||
# 1. Operators manually backfill a missed sync (e.g. after a manual
|
||||
# UI merge that the runner missed).
|
||||
# 2. auto-promote-staging.yml's polling tail explicitly invoke us
|
||||
# after the promote PR lands. This is load-bearing: when the
|
||||
# merge queue lands a promote-PR merge, the resulting push to
|
||||
# `main` is "by GITHUB_TOKEN", and per GitHub's no-recursion
|
||||
# rule (https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow)
|
||||
# that push event does NOT fire any downstream workflows. The
|
||||
# `on: push` trigger above is silently dead for the very pattern
|
||||
# we exist to handle. Verified empirically 2026-05-02 against
|
||||
# SHA 76c604fb (PR #2437 staging→main): only ONE workflow fired
|
||||
# (publish-workspace-server-image, dispatched explicitly by
|
||||
# auto-promote's polling tail with an App token). Every other
|
||||
# `on: push: branches: [main]` workflow — including this one —
|
||||
# was suppressed. Until the underlying merge call moves to an
|
||||
# App token, an explicit dispatch is the only reliable path.
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: auto-sync-main-to-staging
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
sync-staging:
|
||||
# ubuntu-latest matches every other workflow in this repo. The
|
||||
# earlier `[self-hosted, macos, arm64]` was a copy-paste artefact
|
||||
# from the molecule-controlplane repo (which IS private and uses a
|
||||
# Mac runner) — molecule-core has no Mac runner registered, so the
|
||||
# job sat unassigned whenever the trigger fired. Verified 2026-05-02:
|
||||
# this is the ONLY workflow in molecule-core/.github/workflows/ with
|
||||
# a non-ubuntu runs-on.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout staging
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: staging
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure git author
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Check if staging already contains main
|
||||
id: check
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git fetch origin main
|
||||
if git merge-base --is-ancestor origin/main HEAD; then
|
||||
echo "needs_sync=false" >> "$GITHUB_OUTPUT"
|
||||
{
|
||||
echo "## ✅ No-op"
|
||||
echo
|
||||
echo "staging already contains \`origin/main\` ($(git rev-parse --short=8 origin/main))."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
else
|
||||
echo "needs_sync=true" >> "$GITHUB_OUTPUT"
|
||||
MAIN_SHORT=$(git rev-parse --short=8 origin/main)
|
||||
echo "main_short=${MAIN_SHORT}" >> "$GITHUB_OUTPUT"
|
||||
echo "branch=auto-sync/main-${MAIN_SHORT}" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::staging is missing main's tip (${MAIN_SHORT}) — opening sync PR"
|
||||
fi
|
||||
|
||||
- name: Create auto-sync branch + merge main
|
||||
if: steps.check.outputs.needs_sync == 'true'
|
||||
id: prep
|
||||
run: |
|
||||
set -euo pipefail
|
||||
BRANCH="${{ steps.check.outputs.branch }}"
|
||||
|
||||
# If a previous auto-sync run already opened a branch for the
|
||||
# same main sha, prefer reusing it (idempotent behavior on
|
||||
# workflow restart). Force-update from latest staging anyway
|
||||
# so it absorbs any staging-side commits that landed since.
|
||||
git checkout -B "$BRANCH"
|
||||
|
||||
if git merge --ff-only origin/main; then
|
||||
echo "did_ff=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Fast-forwarded ${BRANCH} to origin/main"
|
||||
else
|
||||
echo "did_ff=false" >> "$GITHUB_OUTPUT"
|
||||
if ! git merge --no-ff origin/main -m "chore: sync main → staging (auto)"; then
|
||||
# Hygiene: leave the work tree clean before failing.
|
||||
git merge --abort || true
|
||||
{
|
||||
echo "## ❌ Conflict"
|
||||
echo
|
||||
echo "Auto-merge \`main → staging\` failed with conflicts."
|
||||
echo "A human needs to resolve manually."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
- name: Push auto-sync branch
|
||||
if: steps.check.outputs.needs_sync == 'true'
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Force-with-lease so a concurrent auto-sync run can't
|
||||
# silently clobber an in-flight branch we just updated. If a
|
||||
# different writer touched the branch, we abort and the next
|
||||
# run picks up the latest state.
|
||||
git push --force-with-lease origin "${{ steps.check.outputs.branch }}"
|
||||
|
||||
- name: Open auto-sync PR + enable auto-merge
|
||||
if: steps.check.outputs.needs_sync == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: ${{ steps.check.outputs.branch }}
|
||||
MAIN_SHORT: ${{ steps.check.outputs.main_short }}
|
||||
DID_FF: ${{ steps.prep.outputs.did_ff }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Find existing PR for this branch (idempotent on workflow
|
||||
# restart) before creating a new one.
|
||||
PR_NUM=$(gh pr list --head "$BRANCH" --base staging --state open --json number --jq '.[0].number // ""')
|
||||
|
||||
if [ -z "$PR_NUM" ]; then
|
||||
# Body lives in a temp file to keep the multi-line content
|
||||
# out of the YAML block scalar (un-indented newlines inside
|
||||
# an inline shell string break YAML parsing).
|
||||
BODY_FILE=$(mktemp)
|
||||
if [ "$DID_FF" = "true" ]; then
|
||||
TITLE="chore: sync main → staging (auto, ff to ${MAIN_SHORT})"
|
||||
cat > "$BODY_FILE" <<EOFBODY
|
||||
Automated fast-forward of \`staging\` to \`origin/main\` (\`${MAIN_SHORT}\`). Staging has no in-flight commits that diverge from main. Merge queue lands this; no human action needed.
|
||||
|
||||
This PR is auto-generated by \`.github/workflows/auto-sync-main-to-staging.yml\` on every push to \`main\`. It exists because this repo's \`staging\` branch has a \`merge_queue\` ruleset that blocks direct pushes — even from the GitHub Actions integration.
|
||||
EOFBODY
|
||||
else
|
||||
TITLE="chore: sync main → staging (auto, merge ${MAIN_SHORT})"
|
||||
cat > "$BODY_FILE" <<EOFBODY
|
||||
Automated merge of \`origin/main\` (\`${MAIN_SHORT}\`) into \`staging\`. Staging has commits main doesn't, so this is a non-ff merge that absorbs main's tip. Merge queue lands this.
|
||||
|
||||
This PR is auto-generated by \`.github/workflows/auto-sync-main-to-staging.yml\` on every push to \`main\`.
|
||||
EOFBODY
|
||||
fi
|
||||
|
||||
# gh pr create prints the URL on stdout; extract the PR number.
|
||||
PR_URL=$(gh pr create \
|
||||
--base staging \
|
||||
--head "$BRANCH" \
|
||||
--title "$TITLE" \
|
||||
--body-file "$BODY_FILE")
|
||||
PR_NUM=$(echo "$PR_URL" | grep -oE '[0-9]+$' | tail -1)
|
||||
rm -f "$BODY_FILE"
|
||||
echo "::notice::Opened PR #${PR_NUM}"
|
||||
else
|
||||
echo "::notice::Re-using existing PR #${PR_NUM} for ${BRANCH}"
|
||||
fi
|
||||
|
||||
# Enable auto-merge — the merge queue picks it up once
|
||||
# required gates are green. Use --merge for merge commits
|
||||
# (matches the rest of this repo's PR convention).
|
||||
if ! gh pr merge "$PR_NUM" --auto --merge 2>&1; then
|
||||
echo "::warning::Failed to enable auto-merge on PR #${PR_NUM} — operator may need to merge manually."
|
||||
fi
|
||||
|
||||
{
|
||||
echo "## ✅ Auto-sync PR opened"
|
||||
echo
|
||||
echo "- Branch: \`$BRANCH\`"
|
||||
echo "- PR: #$PR_NUM"
|
||||
echo "- Strategy: $([ "$DID_FF" = "true" ] && echo "ff" || echo "merge commit")"
|
||||
echo
|
||||
echo "Merge queue lands the PR once required gates are green; no human action needed unless gates fail."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
37
.github/workflows/auto-tag-runtime.yml
vendored
37
.github/workflows/auto-tag-runtime.yml
vendored
@ -57,42 +57,17 @@ jobs:
|
||||
id: bump
|
||||
if: steps.skip.outputs.skip != 'true'
|
||||
env:
|
||||
# Gitea-shape token (act_runner forwards GITHUB_TOKEN as a
|
||||
# short-lived per-run secret with read access to this repo).
|
||||
# We hit `/api/v1/repos/.../pulls?state=closed` directly
|
||||
# because `gh pr list` calls Gitea's GraphQL endpoint, which
|
||||
# returns HTTP 405 (issue #75 / post-#66 sweep).
|
||||
GITEA_TOKEN: ${{ github.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
GITEA_API_URL: ${{ github.server_url }}/api/v1
|
||||
PUSH_SHA: ${{ github.sha }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
# Find the merged PR whose merge_commit_sha matches this push.
|
||||
# Gitea's `/repos/{owner}/{repo}/pulls?state=closed` returns
|
||||
# PRs sorted newest-first; we paginate up to 50 and jq-filter
|
||||
# on `merge_commit_sha == PUSH_SHA`. Bounded — auto-tag fires
|
||||
# per push to main, so the matching PR is always among the
|
||||
# most recent closures. 50 is comfortably more than the
|
||||
# ~10-20 staging→main promotes that close in any reasonable
|
||||
# window.
|
||||
set -euo pipefail
|
||||
PRS_JSON=$(curl --fail-with-body -sS \
|
||||
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||
-H "Accept: application/json" \
|
||||
"${GITEA_API_URL}/repos/${REPO}/pulls?state=closed&sort=newest&limit=50" \
|
||||
2>/dev/null || echo "[]")
|
||||
PR=$(printf '%s' "$PRS_JSON" \
|
||||
| jq -c --arg sha "$PUSH_SHA" \
|
||||
'[.[] | select(.merged_at != null and .merge_commit_sha == $sha)] | .[0] // empty')
|
||||
# The merged PR for this push commit. `gh pr list --search` finds
|
||||
# closed PRs whose merge commit matches; we take the first.
|
||||
PR=$(gh pr list --state merged --search "${{ github.sha }}" --json number,labels --jq '.[0]' 2>/dev/null || echo "")
|
||||
if [ -z "$PR" ] || [ "$PR" = "null" ]; then
|
||||
echo "No merged PR found for ${PUSH_SHA} — defaulting to patch bump."
|
||||
echo "No merged PR found for ${{ github.sha }} — defaulting to patch bump."
|
||||
echo "kind=patch" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
# Gitea returns labels under `.labels[].name`, same shape as
|
||||
# GitHub's REST. The previous `gh pr list --json number,labels`
|
||||
# output was identical; jq filter unchanged.
|
||||
LABELS=$(printf '%s' "$PR" | jq -r '.labels[]?.name // empty')
|
||||
LABELS=$(echo "$PR" | jq -r '.labels[].name')
|
||||
if echo "$LABELS" | grep -qx 'release:major'; then
|
||||
echo "kind=major" >> "$GITHUB_OUTPUT"
|
||||
elif echo "$LABELS" | grep -qx 'release:minor'; then
|
||||
|
||||
30
.github/workflows/branch-protection-drift.yml
vendored
30
.github/workflows/branch-protection-drift.yml
vendored
@ -19,7 +19,6 @@ on:
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'tools/branch-protection/**'
|
||||
- '.github/workflows/**'
|
||||
- '.github/workflows/branch-protection-drift.yml'
|
||||
|
||||
permissions:
|
||||
@ -80,32 +79,3 @@ jobs:
|
||||
# Repo-admin scope, needed for /branches/:b/protection.
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN_FOR_ADMIN_API }}
|
||||
run: bash tools/branch-protection/drift_check.sh
|
||||
|
||||
# Self-test the parity script before running it on the real
|
||||
# workflows — pins the script's classification logic against
|
||||
# synthetic safe/unsafe/missing/unsafe-mix/matrix fixtures so a
|
||||
# regression in the script can't false-pass on the production
|
||||
# workflow audit. Cheap (~0.5s); always runs.
|
||||
- name: Self-test check-name parity script
|
||||
run: bash tools/branch-protection/test_check_name_parity.sh
|
||||
|
||||
# Check-name parity gate (#144 / saved memory
|
||||
# feedback_branch_protection_check_name_parity).
|
||||
#
|
||||
# drift_check.sh asserts the live branch protection matches what
|
||||
# apply.sh would set; check_name_parity.sh closes the orthogonal
|
||||
# gap: it asserts every required check name in apply.sh maps to a
|
||||
# workflow job whose "always emits this status" shape is intact.
|
||||
#
|
||||
# The two checks fail in different scenarios:
|
||||
#
|
||||
# - drift_check fails → live state was rewritten out-of-band
|
||||
# (UI click, manual PATCH).
|
||||
# - check_name_parity fails → an apply.sh required name has no
|
||||
# emitter, OR the emitting workflow has a top-level paths:
|
||||
# filter without per-step if-gates (the silent-block shape).
|
||||
#
|
||||
# Cheap (~1s); runs without the admin token because it only reads
|
||||
# apply.sh + .github/workflows/ from the checkout.
|
||||
- name: Run check-name parity gate
|
||||
run: bash tools/branch-protection/check_name_parity.sh
|
||||
|
||||
82
.github/workflows/canary-staging.yml
vendored
82
.github/workflows/canary-staging.yml
vendored
@ -20,19 +20,6 @@ on:
|
||||
# a few minutes under load — that's fine for a canary.
|
||||
- cron: '*/30 * * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
keep_on_failure:
|
||||
description: >-
|
||||
Skip teardown when the canary fails (debugging only). The
|
||||
tenant org + EC2 + CF tunnel + DNS stay alive so an operator
|
||||
can SSM into the workspace EC2 and capture docker logs of the
|
||||
failing claude-code container. REMEMBER to manually delete
|
||||
via DELETE /cp/admin/tenants/<slug> when done so the org
|
||||
doesn't accumulate cost. Only honored on workflow_dispatch;
|
||||
cron runs always tear down (we don't want unattended cron
|
||||
to leak resources).
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
# Serialise with the full-SaaS workflow so they don't contend for the
|
||||
# same org-create quota on staging. Different group key from
|
||||
@ -93,14 +80,6 @@ jobs:
|
||||
# is "Token Plan only" but cheap-per-token and fast.
|
||||
E2E_MODEL_SLUG: MiniMax-M2.7-highspeed
|
||||
E2E_RUN_ID: "canary-${{ github.run_id }}"
|
||||
# Debug-only: when an operator dispatches with keep_on_failure=true,
|
||||
# the canary script's E2E_KEEP_ORG=1 path skips teardown so the
|
||||
# tenant org + EC2 stay alive for SSM-based log capture. Cron runs
|
||||
# never set this (the input only exists on workflow_dispatch) so
|
||||
# unattended cron always tears down. See molecule-core#129
|
||||
# failure mode #1 — capturing the actual exception requires
|
||||
# docker logs from the live container.
|
||||
E2E_KEEP_ORG: ${{ github.event.inputs.keep_on_failure == 'true' && '1' || '0' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
@ -158,28 +137,27 @@ jobs:
|
||||
id: canary
|
||||
run: bash tests/e2e/test_staging_full_saas.sh
|
||||
|
||||
# Alerting: open a sticky issue on the FIRST failure; comment on
|
||||
# subsequent failures; auto-close on next green. Comment-on-existing
|
||||
# de-duplicates so a single open issue accumulates the streak —
|
||||
# ops sees one issue with N comments rather than N issues.
|
||||
# Alerting: open an issue only after THREE consecutive failures so
|
||||
# transient flakes (Cloudflare DNS hiccup, AWS API blip) don't spam
|
||||
# the issue list. If an issue is already open, we still comment on
|
||||
# every failure so ops sees the streak. Auto-close on next green.
|
||||
#
|
||||
# Why no consecutive-failures threshold (e.g., wait 3 runs before
|
||||
# filing): the prior threshold check used
|
||||
# `github.rest.actions.listWorkflowRuns()` which Gitea 1.22.6 does
|
||||
# not expose (returns 404). On Gitea Actions the threshold call
|
||||
# ALWAYS failed, breaking the entire alerting step and going days
|
||||
# silent on real regressions (38h+ chronic red on 2026-05-07/08
|
||||
# before this fix; tracked in molecule-core#129). Filing on first
|
||||
# failure is also better UX — we want to know about the first red,
|
||||
# not wait 90 min for it to "count." Real flakes get one issue +
|
||||
# a quick close-on-green; persistent reds accumulate comments.
|
||||
# Threshold rationale: canary fires every 30 min, so 3 failures =
|
||||
# ~90 min of consecutive red — well past any single-run flake but
|
||||
# still tight enough that a real outage gets surfaced before the
|
||||
# next deploy window.
|
||||
- name: Open issue on failure
|
||||
if: failure()
|
||||
uses: actions/github-script@3a2844b7e9c422d3c10d287c895573f7108da1b3 # v9.0.0
|
||||
env:
|
||||
# Inject the workflow path explicitly — context.workflow is
|
||||
# the *name*, not the file path the actions API needs.
|
||||
WORKFLOW_PATH: '.github/workflows/canary-staging.yml'
|
||||
CONSECUTIVE_THRESHOLD: '3'
|
||||
with:
|
||||
script: |
|
||||
const title = '🔴 Canary failing: staging SaaS smoke';
|
||||
const runURL = `${context.serverUrl}/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
|
||||
const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`;
|
||||
|
||||
// Find an existing open canary issue (stable title match).
|
||||
// If one exists, this isn't a "first failure" — comment and exit.
|
||||
@ -199,12 +177,32 @@ jobs:
|
||||
return;
|
||||
}
|
||||
|
||||
// No open issue yet — file one on this first failure. The
|
||||
// comment-on-existing branch above means subsequent failures
|
||||
// accumulate as comments on this same issue, so we don't
|
||||
// spam new issues per run.
|
||||
// No open issue yet — check the last N-1 runs' conclusions.
|
||||
// We open the issue only if the last (THRESHOLD-1) runs ALSO
|
||||
// failed (so this is the 3rd consecutive red).
|
||||
const threshold = parseInt(process.env.CONSECUTIVE_THRESHOLD, 10);
|
||||
const { data: runs } = await github.rest.actions.listWorkflowRuns({
|
||||
owner: context.repo.owner, repo: context.repo.repo,
|
||||
workflow_id: process.env.WORKFLOW_PATH,
|
||||
status: 'completed',
|
||||
per_page: threshold,
|
||||
// Skip the current in-progress run; it isn't 'completed' yet.
|
||||
});
|
||||
// listWorkflowRuns returns recent first. We need (threshold-1)
|
||||
// prior failures (current run is the threshold-th).
|
||||
const priorFailures = (runs.workflow_runs || [])
|
||||
.slice(0, threshold - 1)
|
||||
.filter(r => r.id !== context.runId)
|
||||
.filter(r => r.conclusion === 'failure')
|
||||
.length;
|
||||
if (priorFailures < threshold - 1) {
|
||||
core.info(`Below threshold: ${priorFailures + 1}/${threshold} consecutive failures — not filing yet`);
|
||||
return;
|
||||
}
|
||||
|
||||
const body =
|
||||
`Canary run failed at ${new Date().toISOString()}.\n\n` +
|
||||
`Canary run failed at ${new Date().toISOString()}, ` +
|
||||
`${threshold} consecutive runs red.\n\n` +
|
||||
`Run: ${runURL}\n\n` +
|
||||
`This issue auto-closes on the next green canary run. ` +
|
||||
`Consecutive failures add a comment here rather than a new issue.`;
|
||||
@ -213,7 +211,7 @@ jobs:
|
||||
title, body,
|
||||
labels: ['canary-staging', 'bug'],
|
||||
});
|
||||
core.info('Opened canary failure issue (first red)');
|
||||
core.info(`Opened canary failure issue (${threshold} consecutive reds)`);
|
||||
|
||||
- name: Auto-close canary issue on success
|
||||
if: success()
|
||||
|
||||
157
.github/workflows/canary-verify.yml
vendored
157
.github/workflows/canary-verify.yml
vendored
@ -1,34 +1,19 @@
|
||||
name: canary-verify
|
||||
|
||||
# Runs the canary smoke suite against the staging canary tenant fleet
|
||||
# after a new :staging-<sha> image lands in ECR. On green, calls the
|
||||
# CP redeploy-fleet endpoint to promote :staging-<sha> → :latest so
|
||||
# the prod tenant fleet's 5-minute auto-updater picks up the verified
|
||||
# digest. On red, :latest stays on the prior known-good digest and
|
||||
# prod is untouched.
|
||||
#
|
||||
# Registry note (2026-05-10): This workflow previously used GHCR
|
||||
# (ghcr.io/molecule-ai/platform-tenant) — that registry was retired
|
||||
# during the 2026-05-06 Gitea suspension migration when publish-
|
||||
# workspace-server-image.yml switched to the operator's ECR org
|
||||
# (153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/
|
||||
# platform-tenant). The GHCR → ECR migration was never applied to
|
||||
# this file, so canary-verify was silently smoke-testing the stale
|
||||
# GHCR image while the actual staging/prod tenants ran the ECR image.
|
||||
# Result: smoke tests could not catch a broken ECR build. Fix:
|
||||
# - Wait step: reads SHA from running canary /health (tenant-
|
||||
# agnostic, works regardless of registry).
|
||||
# - Promote step: calls CP redeploy-fleet endpoint with target_tag=
|
||||
# staging-<sha>, same mechanism as redeploy-tenants-on-main.yml.
|
||||
# No longer attempts GHCR crane ops.
|
||||
# after a new :staging-<sha> image lands in GHCR. On green, promotes
|
||||
# :staging-<sha> → :latest so the prod tenant fleet's 5-minute
|
||||
# auto-updater picks up the verified digest. On red, :latest stays
|
||||
# on the prior known-good digest and prod is untouched.
|
||||
#
|
||||
# Dependencies:
|
||||
# - publish-workspace-server-image.yml publishes :staging-<sha>
|
||||
# to ECR on staging and main merges.
|
||||
# - Canary tenants are configured to pull :staging-<sha> from ECR
|
||||
# (TENANT_IMAGE env set to the ECR :staging-<sha> tag).
|
||||
# (NOT :latest) on main merge
|
||||
# - canary tenants are configured to pull :staging-<sha> as their
|
||||
# tenant image (set TENANT_IMAGE=ghcr.io/…:staging-<sha> on the
|
||||
# canary provisioner code path OR rotate via an admin endpoint)
|
||||
# - Repo secrets CANARY_TENANT_URLS / CANARY_ADMIN_TOKENS /
|
||||
# CANARY_CP_SHARED_SECRET are populated.
|
||||
# CANARY_CP_SHARED_SECRET are populated
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
@ -42,12 +27,8 @@ permissions:
|
||||
actions: read
|
||||
|
||||
env:
|
||||
# ECR registry (post-2026-05-06 SSOT for tenant images).
|
||||
# publish-workspace-server-image.yml pushes here.
|
||||
IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform
|
||||
TENANT_IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform-tenant
|
||||
# CP endpoint for redeploy-fleet (used in promote step below).
|
||||
CP_URL: ${{ vars.CP_URL || 'https://staging-api.moleculesai.app' }}
|
||||
IMAGE_NAME: ghcr.io/molecule-ai/platform
|
||||
TENANT_IMAGE_NAME: ghcr.io/molecule-ai/platform-tenant
|
||||
|
||||
jobs:
|
||||
canary-smoke:
|
||||
@ -71,12 +52,6 @@ jobs:
|
||||
# the new SHA (~2-3 min typical vs 6 min fixed). Falls back to
|
||||
# proceeding after 7 min even if not all canaries responded —
|
||||
# the smoke suite will catch any that didn't update.
|
||||
#
|
||||
# NOTE: The SHA is read from the running tenant's /health response,
|
||||
# NOT from a registry lookup. This is registry-agnostic and works
|
||||
# regardless of whether the tenant pulls from ECR, GHCR, or any
|
||||
# other registry — the canary is telling us what it's actually
|
||||
# running, which is the ground truth for smoke testing.
|
||||
env:
|
||||
CANARY_TENANT_URLS: ${{ secrets.CANARY_TENANT_URLS }}
|
||||
EXPECTED_SHA: ${{ steps.compute.outputs.sha }}
|
||||
@ -133,7 +108,7 @@ jobs:
|
||||
echo
|
||||
echo "One or more canary secrets are unset (\`CANARY_TENANT_URLS\`, \`CANARY_ADMIN_TOKENS\`, \`CANARY_CP_SHARED_SECRET\`)."
|
||||
echo "Phase 2 canary fleet has not been stood up yet —"
|
||||
echo "see [canary-tenants.md](https://git.moleculesai.app/molecule-ai/molecule-controlplane/blob/main/docs/canary-tenants.md)."
|
||||
echo "see [canary-tenants.md](https://github.com/molecule-ai/molecule-controlplane/blob/main/docs/canary-tenants.md)."
|
||||
echo
|
||||
echo "**Skipped — promote-to-latest will NOT auto-fire.** Dispatch \`promote-latest.yml\` manually when ready."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
@ -158,98 +133,42 @@ jobs:
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
promote-to-latest:
|
||||
# On green, calls the CP redeploy-fleet endpoint with target_tag=
|
||||
# staging-<sha> to promote the verified ECR image. This is the same
|
||||
# mechanism as redeploy-tenants-on-main.yml — no GHCR crane ops.
|
||||
#
|
||||
# Pre-fix history: the old GHCR promote step used `crane tag` against
|
||||
# ghcr.io/molecule-ai/platform-tenant, but publish-workspace-server-
|
||||
# image.yml had already migrated to ECR on 2026-05-07 (commit
|
||||
# 10e510f5). The GHCR tags were never updated, so this step was
|
||||
# silently promoting a stale GHCR image while actual prod tenants
|
||||
# pulled from ECR. Canary smoke tests were GHCR-targeted and could
|
||||
# not catch a broken ECR build.
|
||||
# On green, retag :staging-<sha> → :latest for BOTH images.
|
||||
# crane is a lightweight registry client (no Docker daemon needed on
|
||||
# the runner) that can retag remotely with a single API call each.
|
||||
# Gated on smoke_ran=true — without a real canary fleet the smoke
|
||||
# step no-ops with success, and we don't want that to silently
|
||||
# auto-promote every main merge.
|
||||
needs: canary-smoke
|
||||
if: ${{ needs.canary-smoke.result == 'success' && needs.canary-smoke.outputs.smoke_ran == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SHA: ${{ needs.canary-smoke.outputs.sha }}
|
||||
CP_URL: ${{ vars.CP_URL || 'https://staging-api.moleculesai.app' }}
|
||||
# CP_ADMIN_API_TOKEN gates write access to the redeploy endpoint.
|
||||
# Stored at the repo level so all workflows pick it up automatically.
|
||||
CP_ADMIN_API_TOKEN: ${{ secrets.CP_ADMIN_API_TOKEN }}
|
||||
# canary_slug pin: deploy the verified :staging-<sha> to the canary
|
||||
# first (soak 120s), then fan out to the rest of the fleet.
|
||||
CANARY_SLUG: ${{ vars.CANARY_PROMOTE_SLUG || '' }}
|
||||
SOAK_SECONDS: ${{ vars.CANARY_PROMOTE_SOAK || '120' }}
|
||||
BATCH_SIZE: ${{ vars.CANARY_PROMOTE_BATCH || '3' }}
|
||||
steps:
|
||||
- name: Check CP credentials
|
||||
- uses: imjasonh/setup-crane@6da1ae018866400525525ce74ff892880c099987 # v0.5
|
||||
|
||||
- name: GHCR login
|
||||
run: |
|
||||
if [ -z "${CP_ADMIN_API_TOKEN:-}" ]; then
|
||||
echo "::error::CP_ADMIN_API_TOKEN secret is not set — promote step cannot call redeploy-fleet."
|
||||
echo "::error::Set it at: repo Settings → Actions → Variables and Secrets → New Secret."
|
||||
exit 1
|
||||
fi
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
||||
crane auth login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: Promote verified ECR image to :latest
|
||||
- name: Retag platform :staging-<sha> → :latest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
crane tag \
|
||||
"${IMAGE_NAME}:staging-${{ needs.canary-smoke.outputs.sha }}" \
|
||||
latest
|
||||
|
||||
TARGET_TAG="staging-${SHA}"
|
||||
BODY=$(jq -nc \
|
||||
--arg tag "$TARGET_TAG" \
|
||||
--argjson soak "${SOAK_SECONDS:-120}" \
|
||||
--argjson batch "${BATCH_SIZE:-3}" \
|
||||
--argjson dry false \
|
||||
'{
|
||||
target_tag: $tag,
|
||||
soak_seconds: $soak,
|
||||
batch_size: $batch,
|
||||
dry_run: $dry
|
||||
}')
|
||||
|
||||
if [ -n "${CANARY_SLUG:-}" ]; then
|
||||
BODY=$(jq '. * {canary_slug: $slug}' --arg slug "$CANARY_SLUG" <<<"$BODY")
|
||||
fi
|
||||
|
||||
echo "Calling: POST $CP_URL/cp/admin/tenants/redeploy-fleet"
|
||||
echo " target_tag: $TARGET_TAG"
|
||||
echo " body: $BODY"
|
||||
|
||||
HTTP_RESPONSE=$(mktemp)
|
||||
HTTP_CODE_FILE=$(mktemp)
|
||||
set +e
|
||||
curl -sS -o "$HTTP_RESPONSE" -w '%{http_code}' \
|
||||
-m 1200 \
|
||||
-H "Authorization: Bearer $CP_ADMIN_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-X POST "$CP_URL/cp/admin/tenants/redeploy-fleet" \
|
||||
-d "$BODY" >"$HTTP_CODE_FILE"
|
||||
CURL_EXIT=$?
|
||||
set -e
|
||||
|
||||
HTTP_CODE=$(cat "$HTTP_CODE_FILE" 2>/dev/null || echo "000")
|
||||
[ -z "$HTTP_CODE" ] && HTTP_CODE="000"
|
||||
|
||||
echo "HTTP $HTTP_CODE (curl exit $CURL_EXIT)"
|
||||
cat "$HTTP_RESPONSE" | jq . || cat "$HTTP_RESPONSE"
|
||||
|
||||
if [ "$HTTP_CODE" -ge 400 ]; then
|
||||
echo "::error::CP redeploy-fleet returned HTTP $HTTP_CODE — refusing to proceed."
|
||||
exit 1
|
||||
fi
|
||||
- name: Retag tenant :staging-<sha> → :latest
|
||||
run: |
|
||||
crane tag \
|
||||
"${TENANT_IMAGE_NAME}:staging-${{ needs.canary-smoke.outputs.sha }}" \
|
||||
latest
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
{
|
||||
echo "## Canary verified — :latest promoted via CP redeploy-fleet"
|
||||
echo ""
|
||||
echo "- **Target tag:** \`staging-${{ needs.canary-smoke.outputs.sha }}\`"
|
||||
echo "- **Registry:** ECR (\`${TENANT_IMAGE_NAME}\`)"
|
||||
echo "- **Canary slug:** \`${CANARY_SLUG:-<none>}\` (soak ${SOAK_SECONDS}s)"
|
||||
echo "- **Batch size:** ${BATCH_SIZE:-3}"
|
||||
echo ""
|
||||
echo "CP redeploy-fleet is rolling out the verified image across the prod fleet."
|
||||
echo "The fleet's 5-minute health-check loop will pick up the update automatically."
|
||||
echo "## Canary verified — :latest promoted"
|
||||
echo
|
||||
echo "- \`${IMAGE_NAME}:staging-${{ needs.canary-smoke.outputs.sha }}\` → \`${IMAGE_NAME}:latest\`"
|
||||
echo "- \`${TENANT_IMAGE_NAME}:staging-${{ needs.canary-smoke.outputs.sha }}\` → \`${TENANT_IMAGE_NAME}:latest\`"
|
||||
echo
|
||||
echo "Prod tenant fleet will pick up the new digest on its next 5-min auto-update cycle."
|
||||
} >> "$GITHUB_STEP_SUMMARY"
|
||||
|
||||
99
.github/workflows/check-merge-group-trigger.yml
vendored
99
.github/workflows/check-merge-group-trigger.yml
vendored
@ -14,13 +14,6 @@ name: Check merge_group trigger on required workflows
|
||||
# Reasoning for staging-only: main has its own CI gating model (PR review),
|
||||
# but staging is what the merge queue runs on, so it's the trigger that
|
||||
# matters.
|
||||
#
|
||||
# Gitea stub: Gitea has no merge queue feature and no `merge_group:`
|
||||
# event type. The linter would find no `merge_group:` triggers to verify
|
||||
# (they don't exist on Gitea), so the lint is vacuously satisfied.
|
||||
# Converting to a no-op stub keeps the workflow+job name stable for any
|
||||
# commit-status context consumers while eliminating the `gh api` call
|
||||
# that fails against Gitea's REST surface (#75 / PR-D).
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@ -32,6 +25,9 @@ on:
|
||||
paths:
|
||||
- '.github/workflows/**.yml'
|
||||
- '.github/workflows/**.yaml'
|
||||
# Self-listen on merge_group so the linter passes its own queue run.
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
jobs:
|
||||
check:
|
||||
@ -40,9 +36,88 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Gitea no-op (merge queue not applicable)
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- name: Verify merge_group trigger on required-check workflows
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Gitea Actions — merge queue not supported; no-op."
|
||||
echo "On GitHub this workflow lints that required-check workflows declare"
|
||||
echo "merge_group: triggers to prevent queue deadlock. On Gitea that"
|
||||
echo "constraint is inapplicable — all workflows pass vacuously."
|
||||
set -euo pipefail
|
||||
|
||||
# Branch we care about — the one merge queue runs on.
|
||||
BRANCH=staging
|
||||
|
||||
# Pull the list of required status check contexts. If the branch
|
||||
# has no protection or no required checks, exit clean — nothing
|
||||
# to lint.
|
||||
REQUIRED=$(gh api "repos/${REPO}/branches/${BRANCH}/protection/required_status_checks" \
|
||||
--jq '.contexts[]' 2>/dev/null || true)
|
||||
if [ -z "$REQUIRED" ]; then
|
||||
echo "No required status checks on ${BRANCH} — nothing to verify."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Required checks on ${BRANCH}:"
|
||||
echo "${REQUIRED}" | sed 's/^/ - /'
|
||||
echo
|
||||
|
||||
# Build a map: workflow file -> set of job names declared in it.
|
||||
# We use yq if available, otherwise grep the `name:` lines under
|
||||
# `jobs:`. Stick with grep for portability — runner image always
|
||||
# has it; yq isn't in the default image as of 2026-04.
|
||||
declare -A workflow_jobs
|
||||
shopt -s nullglob
|
||||
for wf in .github/workflows/*.yml .github/workflows/*.yaml; do
|
||||
[ -f "$wf" ] || continue
|
||||
# Extract the workflow name (the `name:` at file root).
|
||||
wf_name=$(awk '/^name:[[:space:]]/ {sub(/^name:[[:space:]]+/,""); gsub(/^"|"$/,""); print; exit}' "$wf")
|
||||
# Extract job step names from the `jobs:` block. A job step is:
|
||||
# - id under `jobs:` (key with 2-space indent followed by colon)
|
||||
# - the `name:` field inside that job (4-space indent)
|
||||
# We collect both because required_status_checks contexts can
|
||||
# match either, depending on how the workflow was authored.
|
||||
jobs_block=$(awk '/^jobs:/{flag=1; next} flag' "$wf")
|
||||
job_names=$(echo "$jobs_block" | awk '/^[[:space:]]{4}name:[[:space:]]/ {sub(/^[[:space:]]+name:[[:space:]]+/,""); gsub(/^["'"'"']|["'"'"']$/,""); print}')
|
||||
workflow_jobs["$wf"]="${wf_name}"$'\n'"${job_names}"
|
||||
done
|
||||
|
||||
# For each required check, find the workflow that produces it.
|
||||
# Then verify that workflow lists merge_group as a trigger.
|
||||
FAILED=0
|
||||
while IFS= read -r check; do
|
||||
[ -z "$check" ] && continue
|
||||
owning_wf=""
|
||||
for wf in "${!workflow_jobs[@]}"; do
|
||||
if echo "${workflow_jobs[$wf]}" | grep -Fxq "$check"; then
|
||||
owning_wf="$wf"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$owning_wf" ]; then
|
||||
echo "::warning::Required check '${check}' has no matching workflow in this repo. Skipping (may be from an external app)."
|
||||
continue
|
||||
fi
|
||||
|
||||
# Does the workflow's trigger list include merge_group?
|
||||
# Match either bare `merge_group:` line or merge_group with
|
||||
# subsequent indented config (types: [checks_requested]).
|
||||
if grep -qE '^[[:space:]]*merge_group:' "$owning_wf"; then
|
||||
echo "OK: '${check}' (in $owning_wf) — has merge_group trigger"
|
||||
else
|
||||
echo "::error file=${owning_wf}::Required check '${check}' is produced by ${owning_wf}, but the workflow does not declare a 'merge_group:' trigger. With merge queue enabled on ${BRANCH}, this will deadlock the queue (every PR sits AWAITING_CHECKS forever). Add this to the workflow's 'on:' block:"
|
||||
echo "::error file=${owning_wf}:: merge_group:"
|
||||
echo "::error file=${owning_wf}:: types: [checks_requested]"
|
||||
FAILED=1
|
||||
fi
|
||||
done <<< "$REQUIRED"
|
||||
|
||||
if [ "$FAILED" -ne 0 ]; then
|
||||
echo
|
||||
echo "::error::Block. See errors above. Reference: $(grep -l 'reference_merge_queue' /dev/null 2>/dev/null || echo 'memory: reference_merge_queue_enablement.md')."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "All required workflows on ${BRANCH} declare merge_group triggers."
|
||||
|
||||
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@ -235,13 +235,7 @@ jobs:
|
||||
run: npx vitest run --coverage
|
||||
- name: Upload coverage summary as artifact
|
||||
if: needs.changes.outputs.canvas == 'true' && always()
|
||||
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
|
||||
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
|
||||
# implement, surfacing as `GHESNotSupportedError: @actions/artifact
|
||||
# v2.0.0+, upload-artifact@v4+ and download-artifact@v4+ are not
|
||||
# currently supported on GHES`. Drop this pin when Gitea ships
|
||||
# the v4 protocol (tracked: post-Gitea-1.23 followup).
|
||||
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: canvas-coverage-${{ github.run_id }}
|
||||
path: canvas/coverage/
|
||||
@ -304,9 +298,13 @@ jobs:
|
||||
needs: [changes, canvas-build]
|
||||
# Only fires on direct pushes to main (i.e. after staging→main promotion).
|
||||
if: needs.changes.outputs.canvas == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
permissions:
|
||||
# Required to post commit comments via the GitHub API.
|
||||
contents: write
|
||||
steps:
|
||||
- name: Write deploy reminder to step summary
|
||||
- name: Post deploy reminder as commit comment
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMIT_SHA: ${{ github.sha }}
|
||||
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
run: |
|
||||
@ -333,13 +331,10 @@ jobs:
|
||||
printf '\n> Posted automatically by CI · commit `%s` · [build log](%s)\n' \
|
||||
"$COMMIT_SHA" "$RUN_URL" >> /tmp/deploy-reminder.md
|
||||
|
||||
# Gitea has no commit-comments API (no equivalent of
|
||||
# POST /repos/{owner}/{repo}/commits/{commit_sha}/comments).
|
||||
# Write to GITHUB_STEP_SUMMARY instead — both GitHub Actions and
|
||||
# Gitea Actions render this as the workflow run's summary page,
|
||||
# which is where operators look for post-deploy action items.
|
||||
# (#75 / PR-D)
|
||||
cat /tmp/deploy-reminder.md >> "$GITHUB_STEP_SUMMARY"
|
||||
gh api \
|
||||
--method POST \
|
||||
"repos/${{ github.repository }}/commits/${{ github.sha }}/comments" \
|
||||
--field "body=@/tmp/deploy-reminder.md"
|
||||
|
||||
# Python Lint & Test — required check, always runs. See platform-build
|
||||
# for the rationale.
|
||||
|
||||
184
.github/workflows/codeql.yml
vendored
184
.github/workflows/codeql.yml
vendored
@ -1,92 +1,36 @@
|
||||
name: CodeQL
|
||||
|
||||
# Stub workflow — CodeQL Action is structurally incompatible with Gitea
|
||||
# Actions (post-2026-05-06 SCM migration off GitHub).
|
||||
# Controls CodeQL scan triggers for this repo.
|
||||
#
|
||||
# Why this is a stub, not a real CodeQL run:
|
||||
# GitHub's "Code quality" default setup (the UI-configured one) is
|
||||
# hardcoded to only scan the default branch — on this repo that's
|
||||
# `staging`, so PRs promoting staging→main would otherwise never be
|
||||
# scanned. This workflow fills that gap by explicitly scanning both
|
||||
# branches on push and PR.
|
||||
#
|
||||
# 1. github/codeql-action/init@v4 hits api.github.com endpoints
|
||||
# (CodeQL CLI bundle download + query-pack registry + telemetry)
|
||||
# that Gitea 1.22.x does NOT proxy. The act_runner has
|
||||
# GITHUB_SERVER_URL=https://git.moleculesai.app correctly set
|
||||
# (per saved memory feedback_act_runner_github_server_url and
|
||||
# /config.yaml on the operator host), but the Gitea API surface
|
||||
# simply does not implement the codeql-action bundle endpoints.
|
||||
# Observed in run 1d/3101 (2026-05-07): "::error::404 page not
|
||||
# found" inside the Initialize CodeQL step, before any analysis.
|
||||
#
|
||||
# 2. PR #35 attempted to mark `continue-on-error: true` at the JOB
|
||||
# level (correct YAML structure). Gitea 1.22.6 does NOT propagate
|
||||
# job-level continue-on-error to the commit-status API — every
|
||||
# matrix leg still posts `failure` to the status surface, which
|
||||
# keeps OVERALL=failure on every push to main + staging and
|
||||
# blocks visual auto-promote signals (#156).
|
||||
#
|
||||
# 3. Hongming policy decision (2026-05-07, task #156): CodeQL is
|
||||
# ADVISORY, not blocking, on Gitea Actions. We do not block PR
|
||||
# merge or staging→main promotion on CodeQL findings until we
|
||||
# have a Gitea-compatible static-analysis pipeline.
|
||||
#
|
||||
# What this stub preserves:
|
||||
#
|
||||
# - Workflow name `CodeQL` (referenced by auto-promote-staging.yml
|
||||
# line 67 as a workflow_run gate — must stay stable).
|
||||
# - Job name template `Analyze (${{ matrix.language }})` and the
|
||||
# 3-leg matrix (go, javascript-typescript, python). Branch
|
||||
# protection / required-check parity (#144) keys on these
|
||||
# exact context names.
|
||||
# - merge_group + push + pull_request + schedule triggers, so the
|
||||
# merge-queue check name still resolves (per saved memory
|
||||
# feedback_branch_protection_check_name_parity).
|
||||
#
|
||||
# Re-enabling real analysis (future work):
|
||||
#
|
||||
# - Option A: self-hosted Semgrep / OpenGrep via a custom action
|
||||
# that doesn't hit api.github.com. Tracked behind #156 follow-up.
|
||||
# - Option B: Sonatype Nexus IQ or similar, called from a step
|
||||
# that uses the Gitea-issued token only.
|
||||
# - Option C: re-host this workflow on a small GitHub mirror used
|
||||
# ONLY for SAST (push-mirrored from Gitea). Acceptable trade-off
|
||||
# if/when payment is restored on a non-suspended GitHub org —
|
||||
# but per saved memory feedback_no_single_source_of_truth, we
|
||||
# should design for multi-vendor backup, not GitHub-only SAST.
|
||||
#
|
||||
# Until one of those lands, this stub keeps commit-status green so
|
||||
# the auto-promote chain isn't permanently red on a tool we cannot
|
||||
# actually run.
|
||||
#
|
||||
# Security policy: ADVISORY. We accept the residual risk of un-scanned
|
||||
# pushes during this window. Compensating controls in place:
|
||||
# - secret-scan.yml runs on every push (active, blocks on hits)
|
||||
# - block-internal-paths.yml blocks forbidden file paths
|
||||
# - lint-curl-status-capture.yml catches one specific class of bug
|
||||
# - branch-protection-drift.yml + the merge_group required-checks
|
||||
# parity keep the gate surface stable
|
||||
# These are not equivalent to CodeQL coverage. Status of the
|
||||
# replacement plan is tracked in #156.
|
||||
# Runs on ubuntu-latest (GHA-hosted — public repo, free). GHAS is NOT
|
||||
# enabled on this repo, so results are not uploaded to the Security
|
||||
# tab — the scan fails the PR check on findings, and the SARIF is
|
||||
# kept as a workflow artifact for triage.
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, staging]
|
||||
pull_request:
|
||||
branches: [main, staging]
|
||||
# Required so the matrix legs emit a real result on the queued
|
||||
# commit instead of a false-green when merge queue is enabled.
|
||||
# Per saved memory feedback_branch_protection_check_name_parity:
|
||||
# path-filtered / matrix workflows MUST emit the protected name
|
||||
# via a job that always runs.
|
||||
# GitHub merge queue fires `merge_group` for the queue's pre-merge CI run.
|
||||
# Required so CodeQL Analyze checks get a real result on the queued
|
||||
# commit instead of a false-green. Event only fires once merge queue is
|
||||
# enabled on the target branch — safe to add unconditionally.
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
schedule:
|
||||
# Weekly heartbeat. Cheap on a stub (the no-op job is ~5s) but
|
||||
# keeps the workflow visible in Gitea's Actions UI so the next
|
||||
# operator notices it's a stub instead of a missing surface.
|
||||
# Weekly run picks up findings in code that hasn't been touched.
|
||||
- cron: '30 1 * * 0'
|
||||
|
||||
# Workflow-level concurrency: only one stub run per branch/PR at a
|
||||
# time. cancel-in-progress: false because a quick follow-up push
|
||||
# shouldn't kill an in-flight run — even though the stub is fast,
|
||||
# the contract should match a real CodeQL run for when we re-enable.
|
||||
# Workflow-level concurrency: only one CodeQL run per branch/PR at a time.
|
||||
# `cancel-in-progress: false` queues new runs so a quick follow-up push
|
||||
# doesn't nuke a 45-min analysis mid-flight.
|
||||
concurrency:
|
||||
group: codeql-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
@ -94,17 +38,16 @@ concurrency:
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
# No security-events: write — we don't call the upload API anyway,
|
||||
# GHAS isn't on Gitea.
|
||||
# No security-events: write — we don't call the upload API.
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
# Job NAME shape is load-bearing — auto-promote-staging.yml +
|
||||
# branch protection both key on `Analyze (${{ matrix.language }})`.
|
||||
# Do NOT rename without coordinating both surfaces.
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# CodeQL set to advisory (non-blocking) on Gitea Actions — Hongming dec'''n 2026-05-07 (#156).
|
||||
# Findings still emit as SARIF artifacts; failing CodeQL run does not block PR merge.
|
||||
continue-on-error: true
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
timeout-minutes: 45
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@ -112,25 +55,68 @@ jobs:
|
||||
language: [go, javascript-typescript, python]
|
||||
|
||||
steps:
|
||||
# Single-step stub: log the policy decision + emit success.
|
||||
# Exit 0 explicitly so the commit-status API records `success`
|
||||
# for each of the three matrix legs.
|
||||
- name: CodeQL stub (advisory, non-blocking on Gitea)
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
# github-app-auth sibling-checkout removed 2026-05-07 (#157):
|
||||
# plugin was dropped + the Dockerfile no longer needs it.
|
||||
# jq is pre-installed on ubuntu-latest — no setup step needed.
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# security-extended widens past the default to include the
|
||||
# full security-query set for a public SaaS surface.
|
||||
queries: security-extended
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
id: analyze
|
||||
uses: github/codeql-action/analyze@95e58e9a2cdfd71adc6e0353d5c52f41a045d225 # v4.35.2
|
||||
with:
|
||||
category: "/language:${{ matrix.language }}"
|
||||
# upload: never — GHAS isn't enabled on this repo, so the
|
||||
# upload API 403s. Write SARIF locally instead.
|
||||
upload: never
|
||||
output: sarif-results/${{ matrix.language }}
|
||||
|
||||
- name: Parse SARIF + fail on findings
|
||||
# The analyze step writes <database>.sarif into the output
|
||||
# directory — database name is the short CodeQL lang id, not
|
||||
# the matrix value (e.g. "javascript-typescript" →
|
||||
# javascript.sarif), so glob rather than hardcode.
|
||||
# Filter to error/warning severity: security-extended emits
|
||||
# "note" rows for informational findings we don't want to fail
|
||||
# the build over.
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cat <<EOF
|
||||
CodeQL is currently ADVISORY on Gitea Actions (post-2026-05-06).
|
||||
Language matrix leg: ${{ matrix.language }}
|
||||
Reason: github/codeql-action/init@v4 calls api.github.com
|
||||
bundle endpoints that Gitea 1.22.x does not implement.
|
||||
Observed: "::error::404 page not found" in the Init
|
||||
CodeQL step on every prior run.
|
||||
Policy: per Hongming decision 2026-05-07 (#156), CodeQL is
|
||||
non-blocking until a Gitea-compatible SAST pipeline
|
||||
lands. See workflow file header for replacement
|
||||
options + compensating controls.
|
||||
Status: emitting success so auto-promote isn't permanently
|
||||
red on a tool we cannot actually run today.
|
||||
EOF
|
||||
echo "::notice::CodeQL ${{ matrix.language }} — advisory stub, success."
|
||||
dir="sarif-results/${{ matrix.language }}"
|
||||
sarif=$(ls "$dir"/*.sarif 2>/dev/null | head -1 || true)
|
||||
if [ -z "$sarif" ] || [ ! -f "$sarif" ]; then
|
||||
echo "::error::No SARIF file found under $dir"
|
||||
ls -la "$dir" 2>/dev/null || true
|
||||
exit 1
|
||||
fi
|
||||
echo "Parsing $sarif"
|
||||
count=$(jq '[.runs[].results[] | select(.level == "error" or .level == "warning")] | length' "$sarif")
|
||||
echo "CodeQL findings (error+warning) for ${{ matrix.language }}: $count"
|
||||
if [ "$count" -gt 0 ]; then
|
||||
echo "::error::CodeQL found $count issues. Details below; full SARIF in the artifact."
|
||||
jq -r '.runs[].results[] | select(.level == "error" or .level == "warning") | " - [\(.level)] \(.ruleId // "?"): \(.message.text // "(no message)") @ \(.locations[0].physicalLocation.artifactLocation.uri // "?"):\(.locations[0].physicalLocation.region.startLine // "?")"' "$sarif"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload SARIF artifact
|
||||
# Keep SARIF around on success + failure so triagers can diff.
|
||||
# 14-day retention — longer than default 3, short enough not
|
||||
# to bloat quota.
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3 # pinned to v3 for Gitea act_runner v0.6 compatibility (internal#46)
|
||||
with:
|
||||
name: codeql-sarif-${{ matrix.language }}
|
||||
path: sarif-results/${{ matrix.language }}/
|
||||
retention-days: 14
|
||||
|
||||
130
.github/workflows/e2e-api.yml
vendored
130
.github/workflows/e2e-api.yml
vendored
@ -12,59 +12,6 @@ name: E2E API Smoke Test
|
||||
# spending CI cycles. See the in-job comment on the `e2e-api` job for
|
||||
# why this is one job (not two-jobs-sharing-name) and the 2026-04-29
|
||||
# PR #2264 incident that drove the consolidation.
|
||||
#
|
||||
# Parallel-safety (Class B Hongming-owned CICD red sweep, 2026-05-08)
|
||||
# -------------------------------------------------------------------
|
||||
# Same substrate hazard as PR #98 (handlers-postgres-integration). Our
|
||||
# Gitea act_runner runs with `container.network: host` (operator host
|
||||
# `/opt/molecule/runners/config.yaml`), which means:
|
||||
#
|
||||
# * Two concurrent runs both try to bind their `-p 15432:5432` /
|
||||
# `-p 16379:6379` host ports — the second postgres/redis FATALs
|
||||
# with `Address in use` and `docker run` returns exit 125 with
|
||||
# `Conflict. The container name "/molecule-ci-postgres" is already
|
||||
# in use by container ...`. Verified in run a7/2727 on 2026-05-07.
|
||||
# * The fixed container names `molecule-ci-postgres` / `-redis` (the
|
||||
# pre-fix shape) collide on name AS WELL AS port. The cleanup-with-
|
||||
# `docker rm -f` at the start of the second job KILLS the first
|
||||
# job's still-running postgres/redis.
|
||||
#
|
||||
# Fix shape (mirrors PR #98's bridge-net pattern, adapted because
|
||||
# platform-server is a Go binary on the host, not a containerised
|
||||
# step):
|
||||
#
|
||||
# 1. Unique container names per run:
|
||||
# pg-e2e-api-${RUN_ID}-${RUN_ATTEMPT}
|
||||
# redis-e2e-api-${RUN_ID}-${RUN_ATTEMPT}
|
||||
# `${RUN_ID}-${RUN_ATTEMPT}` is unique even across reruns of the
|
||||
# same run_id.
|
||||
# 2. Ephemeral host port per run (`-p 0:5432`), then read the actual
|
||||
# bound port via `docker port` and export DATABASE_URL/REDIS_URL
|
||||
# pointing at it. No fixed host-port → no port collision.
|
||||
# 3. `127.0.0.1` (NOT `localhost`) in URLs — IPv6 first-resolve was
|
||||
# the original flake fixed in #92 and the script's still IPv6-
|
||||
# enabled.
|
||||
# 4. `if: always()` cleanup so containers don't leak when test steps
|
||||
# fail.
|
||||
#
|
||||
# Issue #94 items #2 + #3 (also fixed here):
|
||||
# * Pre-pull `alpine:latest` so the platform-server's provisioner
|
||||
# (`internal/handlers/container_files.go`) can stand up its
|
||||
# ephemeral token-write helper without a daemon.io round-trip.
|
||||
# * Create `molecule-core-net` bridge network if missing so the
|
||||
# provisioner's container.HostConfig {NetworkMode: ...} attach
|
||||
# succeeds.
|
||||
# Item #1 (timeouts) — evidence on recent runs (77/3191, ae/4270, 0e/
|
||||
# 2318) shows Postgres ready in 3s, Redis in 1s, Platform in 1s when
|
||||
# they DO come up. Timeouts are not the bottleneck; not bumped.
|
||||
#
|
||||
# Item explicitly NOT fixed here: failing test `Status back online`
|
||||
# fails because the platform's langgraph workspace template image
|
||||
# (ghcr.io/molecule-ai/workspace-template-langgraph:latest) returns
|
||||
# 403 Forbidden post-2026-05-06 GitHub org suspension. That is a
|
||||
# template-registry resolution issue (ADR-002 / local-build mode) and
|
||||
# belongs in a separate change that touches workspace-server, not
|
||||
# this workflow file.
|
||||
|
||||
on:
|
||||
push:
|
||||
@ -131,14 +78,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
env:
|
||||
# Unique per-run container names so concurrent runs on the host-
|
||||
# network act_runner don't collide on name OR port.
|
||||
# `${RUN_ID}-${RUN_ATTEMPT}` stays unique across reruns of the
|
||||
# same run_id. PORT is set later (after docker port lookup) since
|
||||
# we let Docker assign an ephemeral host port.
|
||||
PG_CONTAINER: pg-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
REDIS_CONTAINER: redis-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
DATABASE_URL: postgres://dev:dev@localhost:15432/molecule?sslmode=disable
|
||||
REDIS_URL: redis://localhost:16379
|
||||
PORT: "8080"
|
||||
PG_CONTAINER: molecule-ci-postgres
|
||||
REDIS_CONTAINER: molecule-ci-redis
|
||||
steps:
|
||||
- name: No-op pass (paths filter excluded this commit)
|
||||
if: needs.detect-changes.outputs.api != 'true'
|
||||
@ -153,53 +97,11 @@ jobs:
|
||||
go-version: 'stable'
|
||||
cache: true
|
||||
cache-dependency-path: workspace-server/go.sum
|
||||
- name: Pre-pull alpine + ensure provisioner network (Issue #94 items #2 + #3)
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
# Provisioner uses alpine:latest for ephemeral token-write
|
||||
# containers (workspace-server/internal/handlers/container_files.go).
|
||||
# Pre-pull so the first provision in test_api.sh doesn't race
|
||||
# the daemon's pull cache. Idempotent — `docker pull` is a no-op
|
||||
# when the image is already present.
|
||||
docker pull alpine:latest >/dev/null
|
||||
# Provisioner attaches workspace containers to
|
||||
# molecule-core-net (workspace-server/internal/provisioner/
|
||||
# provisioner.go::DefaultNetwork). The bridge already exists on
|
||||
# the operator host's docker daemon — `network create` is
|
||||
# idempotent via `|| true`.
|
||||
docker network create molecule-core-net >/dev/null 2>&1 || true
|
||||
echo "alpine:latest pre-pulled; molecule-core-net ensured."
|
||||
- name: Start Postgres (docker)
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
# Defensive cleanup — only matches THIS run's container name,
|
||||
# so it cannot kill a sibling run's postgres. (Pre-fix the
|
||||
# name was static and this rm hit other runs' containers.)
|
||||
docker rm -f "$PG_CONTAINER" 2>/dev/null || true
|
||||
# `-p 0:5432` requests an ephemeral host port; we read it back
|
||||
# below and export DATABASE_URL.
|
||||
docker run -d --name "$PG_CONTAINER" \
|
||||
-e POSTGRES_USER=dev -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=molecule \
|
||||
-p 0:5432 postgres:16 >/dev/null
|
||||
# Resolve the host-side port assignment. `docker port` prints
|
||||
# `0.0.0.0:NNNN` (and on host-net runners may also print an
|
||||
# IPv6 line — take the first IPv4 line).
|
||||
PG_PORT=$(docker port "$PG_CONTAINER" 5432/tcp | awk -F: '/^0\.0\.0\.0:/ {print $2; exit}')
|
||||
if [ -z "$PG_PORT" ]; then
|
||||
# Fallback: any first line. Some Docker versions print only
|
||||
# one line.
|
||||
PG_PORT=$(docker port "$PG_CONTAINER" 5432/tcp | head -1 | awk -F: '{print $NF}')
|
||||
fi
|
||||
if [ -z "$PG_PORT" ]; then
|
||||
echo "::error::Could not resolve host port for $PG_CONTAINER"
|
||||
docker port "$PG_CONTAINER" 5432/tcp || true
|
||||
docker logs "$PG_CONTAINER" || true
|
||||
exit 1
|
||||
fi
|
||||
# 127.0.0.1 (NOT localhost) — IPv6 first-resolve flake (#92).
|
||||
echo "PG_PORT=${PG_PORT}" >> "$GITHUB_ENV"
|
||||
echo "DATABASE_URL=postgres://dev:dev@127.0.0.1:${PG_PORT}/molecule?sslmode=disable" >> "$GITHUB_ENV"
|
||||
echo "Postgres host port: ${PG_PORT}"
|
||||
docker run -d --name "$PG_CONTAINER" -e POSTGRES_USER=dev -e POSTGRES_PASSWORD=dev -e POSTGRES_DB=molecule -p 15432:5432 postgres:16
|
||||
for i in $(seq 1 30); do
|
||||
if docker exec "$PG_CONTAINER" pg_isready -U dev >/dev/null 2>&1; then
|
||||
echo "Postgres ready after ${i}s"
|
||||
@ -214,20 +116,7 @@ jobs:
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
docker rm -f "$REDIS_CONTAINER" 2>/dev/null || true
|
||||
docker run -d --name "$REDIS_CONTAINER" -p 0:6379 redis:7 >/dev/null
|
||||
REDIS_PORT=$(docker port "$REDIS_CONTAINER" 6379/tcp | awk -F: '/^0\.0\.0\.0:/ {print $2; exit}')
|
||||
if [ -z "$REDIS_PORT" ]; then
|
||||
REDIS_PORT=$(docker port "$REDIS_CONTAINER" 6379/tcp | head -1 | awk -F: '{print $NF}')
|
||||
fi
|
||||
if [ -z "$REDIS_PORT" ]; then
|
||||
echo "::error::Could not resolve host port for $REDIS_CONTAINER"
|
||||
docker port "$REDIS_CONTAINER" 6379/tcp || true
|
||||
docker logs "$REDIS_CONTAINER" || true
|
||||
exit 1
|
||||
fi
|
||||
echo "REDIS_PORT=${REDIS_PORT}" >> "$GITHUB_ENV"
|
||||
echo "REDIS_URL=redis://127.0.0.1:${REDIS_PORT}" >> "$GITHUB_ENV"
|
||||
echo "Redis host port: ${REDIS_PORT}"
|
||||
docker run -d --name "$REDIS_CONTAINER" -p 16379:6379 redis:7
|
||||
for i in $(seq 1 15); do
|
||||
if docker exec "$REDIS_CONTAINER" redis-cli ping 2>/dev/null | grep -q PONG; then
|
||||
echo "Redis ready after ${i}s"
|
||||
@ -246,15 +135,13 @@ jobs:
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
working-directory: workspace-server
|
||||
run: |
|
||||
# DATABASE_URL + REDIS_URL exported by the start-postgres /
|
||||
# start-redis steps point at this run's per-run host ports.
|
||||
./platform-server > platform.log 2>&1 &
|
||||
echo $! > platform.pid
|
||||
- name: Wait for /health
|
||||
if: needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
for i in $(seq 1 30); do
|
||||
if curl -sf http://127.0.0.1:8080/health > /dev/null; then
|
||||
if curl -sf http://localhost:8080/health > /dev/null; then
|
||||
echo "Platform up after ${i}s"
|
||||
exit 0
|
||||
fi
|
||||
@ -298,9 +185,6 @@ jobs:
|
||||
kill "$(cat workspace-server/platform.pid)" 2>/dev/null || true
|
||||
fi
|
||||
- name: Stop service containers
|
||||
# always() so containers don't leak when test steps fail. The
|
||||
# cleanup is best-effort: if the container is already gone
|
||||
# (e.g. concurrent rerun race), don't fail the job.
|
||||
if: always() && needs.detect-changes.outputs.api == 'true'
|
||||
run: |
|
||||
docker rm -f "$PG_CONTAINER" 2>/dev/null || true
|
||||
|
||||
13
.github/workflows/e2e-staging-canvas.yml
vendored
13
.github/workflows/e2e-staging-canvas.yml
vendored
@ -22,9 +22,9 @@ on:
|
||||
# spending CI cycles. See e2e-api.yml for the rationale on why this
|
||||
# is a single job rather than two-jobs-sharing-name.
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [main, staging]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [main, staging]
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Weekly on Sunday 08:00 UTC — catches Chrome / Playwright / Next.js
|
||||
@ -139,11 +139,7 @@ jobs:
|
||||
|
||||
- name: Upload Playwright report on failure
|
||||
if: failure() && needs.detect-changes.outputs.canvas == 'true'
|
||||
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
|
||||
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
|
||||
# implement (see ci.yml upload step for the canonical error
|
||||
# cite). Drop this pin when Gitea ships the v4 protocol.
|
||||
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: playwright-report-staging
|
||||
path: canvas/playwright-report-staging/
|
||||
@ -151,8 +147,7 @@ jobs:
|
||||
|
||||
- name: Upload screenshots on failure
|
||||
if: failure() && needs.detect-changes.outputs.canvas == 'true'
|
||||
# Pinned to v3 for Gitea act_runner v0.6 compatibility (see above).
|
||||
uses: actions/upload-artifact@c6a366c94c3e0affe28c06c8df20a878f24da3cf # v3.2.2
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: playwright-screenshots
|
||||
path: canvas/test-results/
|
||||
|
||||
4
.github/workflows/e2e-staging-external.yml
vendored
4
.github/workflows/e2e-staging-external.yml
vendored
@ -32,7 +32,7 @@ name: E2E Staging External Runtime
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/workspace.go'
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
@ -44,7 +44,7 @@ on:
|
||||
- 'tests/e2e/test_staging_external_runtime.sh'
|
||||
- '.github/workflows/e2e-staging-external.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/workspace.go'
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
|
||||
13
.github/workflows/e2e-staging-saas.yml
vendored
13
.github/workflows/e2e-staging-saas.yml
vendored
@ -20,12 +20,13 @@ name: E2E Staging SaaS (full lifecycle)
|
||||
# via the same paths watcher that e2e-api.yml uses)
|
||||
|
||||
on:
|
||||
# Trunk-based (Phase 3 of internal#81): main is the only branch.
|
||||
# Previously this fired on staging push too because staging was a
|
||||
# superset of main and ran the gate ahead of auto-promote; with no
|
||||
# staging branch, main is where E2E gates the deploy.
|
||||
# Fire on staging push too — previously this only ran on main, which
|
||||
# meant the most thorough end-to-end test caught regressions AFTER
|
||||
# they shipped to staging (and then to the auto-promote PR). Running
|
||||
# on staging push catches them BEFORE the staging→main promotion
|
||||
# opens, so a green canary into auto-promote is more meaningful.
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
- 'workspace-server/internal/handlers/workspace_provision.go'
|
||||
@ -35,7 +36,7 @@ on:
|
||||
- 'tests/e2e/test_staging_full_saas.sh'
|
||||
- '.github/workflows/e2e-staging-saas.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/internal/handlers/registry.go'
|
||||
- 'workspace-server/internal/handlers/workspace_provision.go'
|
||||
|
||||
139
.github/workflows/handlers-postgres-integration.yml
vendored
139
.github/workflows/handlers-postgres-integration.yml
vendored
@ -14,42 +14,12 @@ name: Handlers Postgres Integration
|
||||
# self-review caught it took 2 minutes to set up and would have caught
|
||||
# the bug at PR-time.
|
||||
#
|
||||
# Why this workflow does NOT use `services: postgres:` (Class B fix)
|
||||
# ------------------------------------------------------------------
|
||||
# Our act_runner config has `container.network: host` (operator host
|
||||
# /opt/molecule/runners/config.yaml), which act_runner applies to BOTH
|
||||
# the job container AND every service container. With host-net, two
|
||||
# concurrent runs of this workflow both try to bind 0.0.0.0:5432 — the
|
||||
# second postgres FATALs with `could not create any TCP/IP sockets:
|
||||
# Address in use`, and Docker auto-removes it (act_runner sets
|
||||
# AutoRemove:true on service containers). By the time the migrations
|
||||
# step runs `psql`, the postgres container is gone, hence
|
||||
# `Connection refused` then `failed to remove container: No such
|
||||
# container` at cleanup time.
|
||||
# This job spins a Postgres service container, applies the migration,
|
||||
# and runs `go test -tags=integration` against a live DB. Required
|
||||
# check on staging branch protection — backend handler PRs cannot
|
||||
# merge without a real-DB regression gate.
|
||||
#
|
||||
# Per-job `container.network` override is silently ignored by
|
||||
# act_runner — `--network and --net in the options will be ignored.`
|
||||
# appears in the runner log. Documented constraint.
|
||||
#
|
||||
# So we sidestep `services:` entirely. The job container still uses
|
||||
# host-net (inherited from runner config; required for cache server
|
||||
# discovery on the bridge IP 172.18.0.17:42631). We launch a sibling
|
||||
# postgres on the existing `molecule-core-net` bridge with a
|
||||
# UNIQUE name per run — `pg-handlers-${RUN_ID}-${RUN_ATTEMPT}` — and
|
||||
# read its bridge IP via `docker inspect`. A host-net job container
|
||||
# can reach a bridge-net container directly via the bridge IP (verified
|
||||
# manually on operator host 2026-05-08).
|
||||
#
|
||||
# Trade-offs vs. the original `services:` shape:
|
||||
# + No host-port collision; N parallel runs share the bridge cleanly
|
||||
# + `if: always()` cleanup runs even on test-step failure
|
||||
# - One more step in the workflow (+~3 lines)
|
||||
# - Requires `molecule-core-net` to exist on the operator host
|
||||
# (it does; declared in docker-compose.yml + docker-compose.infra.yml)
|
||||
#
|
||||
# Class B Hongming-owned CICD red sweep, 2026-05-08.
|
||||
#
|
||||
# Cost: ~30s job (postgres pull from cache + go build + 4 tests).
|
||||
# Cost: ~30s job (postgres pull from GH cache + go build + 4 tests).
|
||||
|
||||
on:
|
||||
push:
|
||||
@ -89,14 +59,20 @@ jobs:
|
||||
name: Handlers Postgres Integration
|
||||
needs: detect-changes
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# Unique name per run so concurrent jobs don't collide on the
|
||||
# bridge network. ${RUN_ID}-${RUN_ATTEMPT} is unique even across
|
||||
# workflow_dispatch reruns of the same run_id.
|
||||
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
|
||||
# Bridge network already exists on the operator host (declared
|
||||
# in docker-compose.yml + docker-compose.infra.yml).
|
||||
PG_NETWORK: molecule-core-net
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
env:
|
||||
POSTGRES_PASSWORD: test
|
||||
POSTGRES_DB: molecule
|
||||
ports:
|
||||
- 5432:5432
|
||||
# GHA spins this with --health-cmd built in for postgres images.
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 5s
|
||||
--health-timeout 5s
|
||||
--health-retries 10
|
||||
defaults:
|
||||
run:
|
||||
working-directory: workspace-server
|
||||
@ -113,57 +89,16 @@ jobs:
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
- if: needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Start sibling Postgres on bridge network
|
||||
working-directory: .
|
||||
run: |
|
||||
# Sanity: the bridge network must exist on the operator host.
|
||||
# Hard-fail loud if it doesn't — easier to spot than a silent
|
||||
# auto-create that diverges from the rest of the stack.
|
||||
if ! docker network inspect "${PG_NETWORK}" >/dev/null 2>&1; then
|
||||
echo "::error::Bridge network '${PG_NETWORK}' missing on operator host. Re-run docker-compose.infra.yml or check ops handbook."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If a stale container with the same name exists (rerun on
|
||||
# the same run_id), wipe it first.
|
||||
docker rm -f "${PG_NAME}" >/dev/null 2>&1 || true
|
||||
|
||||
docker run -d \
|
||||
--name "${PG_NAME}" \
|
||||
--network "${PG_NETWORK}" \
|
||||
--health-cmd "pg_isready -U postgres" \
|
||||
--health-interval 5s \
|
||||
--health-timeout 5s \
|
||||
--health-retries 10 \
|
||||
-e POSTGRES_PASSWORD=test \
|
||||
-e POSTGRES_DB=molecule \
|
||||
postgres:15-alpine >/dev/null
|
||||
|
||||
# Read back the bridge IP. Always present immediately after
|
||||
# `docker run -d` for bridge networks.
|
||||
PG_HOST=$(docker inspect "${PG_NAME}" \
|
||||
--format "{{(index .NetworkSettings.Networks \"${PG_NETWORK}\").IPAddress}}")
|
||||
if [ -z "${PG_HOST}" ]; then
|
||||
echo "::error::Could not resolve PG_HOST for ${PG_NAME} on ${PG_NETWORK}"
|
||||
docker logs "${PG_NAME}" || true
|
||||
exit 1
|
||||
fi
|
||||
echo "PG_HOST=${PG_HOST}" >> "$GITHUB_ENV"
|
||||
echo "INTEGRATION_DB_URL=postgres://postgres:test@${PG_HOST}:5432/molecule?sslmode=disable" >> "$GITHUB_ENV"
|
||||
echo "Started ${PG_NAME} at ${PG_HOST}:5432"
|
||||
|
||||
- if: needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Apply migrations to Postgres service
|
||||
env:
|
||||
PGPASSWORD: test
|
||||
run: |
|
||||
# Wait for postgres to actually accept connections. Docker's
|
||||
# health-cmd handles container-side readiness, but the wire
|
||||
# to the bridge IP is best-tested with pg_isready directly.
|
||||
# Wait for postgres to actually accept connections (the
|
||||
# GHA --health-cmd is best-effort but psql can still race).
|
||||
for i in {1..15}; do
|
||||
if pg_isready -h "${PG_HOST}" -p 5432 -U postgres -q; then break; fi
|
||||
echo "waiting for postgres at ${PG_HOST}:5432..."; sleep 2
|
||||
if pg_isready -h localhost -p 5432 -U postgres -q; then break; fi
|
||||
echo "waiting for postgres..."; sleep 2
|
||||
done
|
||||
|
||||
# Apply every .up.sql in lexicographic order with
|
||||
@ -196,7 +131,7 @@ jobs:
|
||||
# not fine once a cross-table atomicity test came in.
|
||||
set +e
|
||||
for migration in $(ls migrations/*.sql 2>/dev/null | grep -v '\.down\.sql$' | sort); do
|
||||
if psql -h "${PG_HOST}" -U postgres -d molecule -v ON_ERROR_STOP=1 \
|
||||
if psql -h localhost -U postgres -d molecule -v ON_ERROR_STOP=1 \
|
||||
-f "$migration" >/dev/null 2>&1; then
|
||||
echo "✓ $(basename "$migration")"
|
||||
else
|
||||
@ -210,7 +145,7 @@ jobs:
|
||||
# fail if any didn't land — that would be a real regression we
|
||||
# want loud.
|
||||
for tbl in delegations workspaces activity_logs pending_uploads; do
|
||||
if ! psql -h "${PG_HOST}" -U postgres -d molecule -tA \
|
||||
if ! psql -h localhost -U postgres -d molecule -tA \
|
||||
-c "SELECT 1 FROM information_schema.tables WHERE table_name = '$tbl'" \
|
||||
| grep -q 1; then
|
||||
echo "::error::$tbl table missing after migration replay — handler integration tests would be meaningless"
|
||||
@ -221,32 +156,16 @@ jobs:
|
||||
|
||||
- if: needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Run integration tests
|
||||
env:
|
||||
INTEGRATION_DB_URL: postgres://postgres:test@localhost:5432/molecule?sslmode=disable
|
||||
run: |
|
||||
# INTEGRATION_DB_URL is exported by the start-postgres step;
|
||||
# points at the per-run bridge IP, not 127.0.0.1, so concurrent
|
||||
# workflow runs don't fight over a host-net 5432 port.
|
||||
go test -tags=integration -timeout 5m -v ./internal/handlers/ -run "^TestIntegration_"
|
||||
|
||||
- if: failure() && needs.detect-changes.outputs.handlers == 'true'
|
||||
- if: needs.detect-changes.outputs.handlers == 'true' && failure()
|
||||
name: Diagnostic dump on failure
|
||||
env:
|
||||
PGPASSWORD: test
|
||||
run: |
|
||||
echo "::group::postgres container status"
|
||||
docker ps -a --filter "name=${PG_NAME}" --format '{{.Status}} {{.Names}}' || true
|
||||
docker logs "${PG_NAME}" 2>&1 | tail -50 || true
|
||||
echo "::endgroup::"
|
||||
echo "::group::delegations table state"
|
||||
psql -h "${PG_HOST}" -U postgres -d molecule -c "SELECT * FROM delegations LIMIT 50;" || true
|
||||
psql -h localhost -U postgres -d molecule -c "SELECT * FROM delegations LIMIT 50;" || true
|
||||
echo "::endgroup::"
|
||||
|
||||
- if: always() && needs.detect-changes.outputs.handlers == 'true'
|
||||
name: Stop sibling Postgres
|
||||
working-directory: .
|
||||
run: |
|
||||
# always() so containers don't leak when migrations or tests
|
||||
# fail. The cleanup is best-effort: if the container is
|
||||
# already gone (e.g. concurrent rerun race), don't fail the job.
|
||||
docker rm -f "${PG_NAME}" >/dev/null 2>&1 || true
|
||||
echo "Cleaned up ${PG_NAME}"
|
||||
|
||||
|
||||
106
.github/workflows/harness-replays.yml
vendored
106
.github/workflows/harness-replays.yml
vendored
@ -56,40 +56,21 @@ jobs:
|
||||
run: ${{ steps.decide.outputs.run }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: dorny/paths-filter@fbd0ab8f3e69293af611ebaee6363fc25e6d187d # v4.0.1
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
run:
|
||||
- 'workspace-server/**'
|
||||
- 'canvas/**'
|
||||
- 'tests/harness/**'
|
||||
- '.github/workflows/harness-replays.yml'
|
||||
- id: decide
|
||||
run: |
|
||||
# workflow_dispatch: always run (manual trigger)
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "run=true" >> "$GITHUB_OUTPUT"
|
||||
echo "debug=manual-trigger" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Determine the base commit to diff against.
|
||||
# For pull_request: use base.sha (the merge-base with main/staging).
|
||||
# For push: use github.event.before (the previous tip of the branch).
|
||||
# Fallback for new branches (all-zeros SHA): run everything.
|
||||
if [ "${{ github.event_name }}" = "pull_request" ] && \
|
||||
[ -n "${{ github.event.pull_request.base.sha }}" ]; then
|
||||
BASE="${{ github.event.pull_request.base.sha }}"
|
||||
elif [ -n "${{ github.event.before }}" ] && \
|
||||
! echo "${{ github.event.before }}" | grep -qE '^0+$'; then
|
||||
BASE="${{ github.event.before }}"
|
||||
else
|
||||
# New branch or github.event.before unavailable — run everything.
|
||||
echo "run=true" >> "$GITHUB_OUTPUT"
|
||||
echo "debug=new-branch-fallback" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# GitHub Actions and Gitea Actions both expose github.sha for HEAD.
|
||||
DIFF=$(git diff --name-only "$BASE" "${{ github.sha }}" 2>/dev/null)
|
||||
echo "debug=diff-base=$BASE diff-files=$DIFF" >> "$GITHUB_OUTPUT"
|
||||
|
||||
if echo "$DIFF" | grep -qE '^workspace-server/|^canvas/|^tests/harness/|^.github/workflows/harness-replays\.yml$'; then
|
||||
echo "run=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "run=false" >> "$GITHUB_OUTPUT"
|
||||
echo "run=${{ steps.filter.outputs.run }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
# ONE job that always runs. Real work is gated per-step on
|
||||
@ -110,80 +91,13 @@ jobs:
|
||||
run: |
|
||||
echo "No workspace-server / canvas / tests/harness / workflow changes — Harness Replays gate satisfied without running."
|
||||
echo "::notice::Harness Replays no-op pass (paths filter excluded this commit)."
|
||||
echo "::notice::Debug: ${{ needs.detect-changes.outputs.debug }}"
|
||||
|
||||
- if: needs.detect-changes.outputs.run == 'true'
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
# Log what files were detected so future failures include the diff.
|
||||
- name: Log detected changes
|
||||
if: needs.detect-changes.outputs.run == 'true'
|
||||
run: |
|
||||
echo "::notice::detect-changes debug: ${{ needs.detect-changes.outputs.debug }}"
|
||||
|
||||
# github-app-auth sibling-checkout removed 2026-05-07 (#157):
|
||||
# the plugin was dropped + Dockerfile.tenant no longer COPYs it.
|
||||
|
||||
# Pre-clone manifest deps before docker compose builds the tenant
|
||||
# image (Task #173 followup — same pattern as
|
||||
# publish-workspace-server-image.yml's "Pre-clone manifest deps"
|
||||
# step).
|
||||
#
|
||||
# Why pre-clone here too: tests/harness/compose.yml builds tenant-alpha
|
||||
# and tenant-beta from workspace-server/Dockerfile.tenant with
|
||||
# context=../.. (repo root). That Dockerfile expects
|
||||
# .tenant-bundle-deps/{workspace-configs-templates,org-templates,plugins}
|
||||
# to be present at build context root (post-#173 it COPYs from there
|
||||
# instead of running an in-image clone — the in-image clone failed
|
||||
# with "could not read Username for https://git.moleculesai.app"
|
||||
# because there's no auth path inside the build sandbox).
|
||||
#
|
||||
# Without this step harness-replays fails before any replay runs,
|
||||
# with `failed to calculate checksum of ref ...
|
||||
# "/.tenant-bundle-deps/plugins": not found`. Caught by run #892
|
||||
# (main, 2026-05-07T20:28:53Z) and run #964 (staging — same
|
||||
# symptom, different root cause: staging still has the in-image
|
||||
# clone path, hits the auth error directly).
|
||||
#
|
||||
# 2026-05-08 sub-finding (#192): the clone step ALSO fails when
|
||||
# any referenced workspace-template repo is private and the
|
||||
# AUTO_SYNC_TOKEN bearer (devops-engineer persona) lacks read
|
||||
# access. Root cause: 5 of 9 workspace-template repos
|
||||
# (openclaw, codex, crewai, deepagents, gemini-cli) had been
|
||||
# marked private with no team grant. Resolution: flipped them
|
||||
# to public per `feedback_oss_first_repo_visibility_default`
|
||||
# (the OSS surface should be public). Layer-3 (customer-private +
|
||||
# marketplace third-party repos) tracked separately in
|
||||
# internal#102.
|
||||
#
|
||||
# Token shape matches publish-workspace-server-image.yml: AUTO_SYNC_TOKEN
|
||||
# is the devops-engineer persona PAT, NOT the founder PAT (per
|
||||
# `feedback_per_agent_gitea_identity_default`). clone-manifest.sh
|
||||
# embeds it as basic-auth for the duration of the clones and strips
|
||||
# .git directories — the token never enters the resulting image.
|
||||
- name: Pre-clone manifest deps
|
||||
if: needs.detect-changes.outputs.run == 'true'
|
||||
env:
|
||||
MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [ -z "${MOLECULE_GITEA_TOKEN}" ]; then
|
||||
echo "::error::AUTO_SYNC_TOKEN secret is empty — register the devops-engineer persona PAT in repo Actions secrets"
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p .tenant-bundle-deps
|
||||
bash scripts/clone-manifest.sh \
|
||||
manifest.json \
|
||||
.tenant-bundle-deps/workspace-configs-templates \
|
||||
.tenant-bundle-deps/org-templates \
|
||||
.tenant-bundle-deps/plugins
|
||||
# Sanity-check counts so a silent partial clone fails fast
|
||||
# instead of producing a half-empty image.
|
||||
ws_count=$(find .tenant-bundle-deps/workspace-configs-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
|
||||
org_count=$(find .tenant-bundle-deps/org-templates -mindepth 1 -maxdepth 1 -type d | wc -l)
|
||||
plugins_count=$(find .tenant-bundle-deps/plugins -mindepth 1 -maxdepth 1 -type d | wc -l)
|
||||
echo "Cloned: ws=$ws_count org=$org_count plugins=$plugins_count"
|
||||
|
||||
- name: Install Python deps for replays
|
||||
# peer-discovery-404 (and future replays) eval Python against the
|
||||
# running tenant — importing workspace/a2a_client.py pulls in
|
||||
|
||||
59
.github/workflows/pr-guards.yml
vendored
59
.github/workflows/pr-guards.yml
vendored
@ -1,25 +1,14 @@
|
||||
name: pr-guards
|
||||
|
||||
# PR-time guards. Today the only guard is "disable auto-merge when a
|
||||
# new commit is pushed after auto-merge was enabled" — added 2026-04-27
|
||||
# after PR #2174 auto-merged with only its first commit because the
|
||||
# second commit was pushed after the merge queue had locked the PR's
|
||||
# SHA.
|
||||
# Thin caller that delegates to the molecule-ci reusable guard. Today
|
||||
# the guard is just "disable auto-merge when a new commit is pushed
|
||||
# after auto-merge was enabled" — added 2026-04-27 after PR #2174
|
||||
# auto-merged with only its first commit because the second commit
|
||||
# was pushed after the merge queue had locked the PR's SHA.
|
||||
#
|
||||
# Why this is inlined (not delegated to molecule-ci's reusable
|
||||
# workflow): the reusable workflow uses `gh pr merge --disable-auto`,
|
||||
# which calls GitHub's GraphQL API. Gitea has no GraphQL endpoint and
|
||||
# returns HTTP 405 on /api/graphql, so the job failed on every Gitea
|
||||
# PR push since the 2026-05-06 migration. Gitea also has no `--auto`
|
||||
# merge primitive that this job could be acting on, so the right
|
||||
# behaviour on Gitea is "no-op + green status" — not a 405.
|
||||
#
|
||||
# Inlining (vs. an `if:` on the `uses:` line) keeps the job ALWAYS
|
||||
# running, which matters for branch protection: required-check names
|
||||
# need a job that emits SUCCESS terminal state, not SKIPPED. See
|
||||
# `feedback_branch_protection_check_name_parity` and `feedback_pr_merge_safety_guards`.
|
||||
#
|
||||
# Issue #88 item 1.
|
||||
# When more PR-time guards land in molecule-ci, add them here as
|
||||
# additional jobs that share the same pull_request:synchronize
|
||||
# trigger.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@ -30,34 +19,4 @@ permissions:
|
||||
|
||||
jobs:
|
||||
disable-auto-merge-on-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Detect Gitea Actions. act_runner sets GITEA_ACTIONS=true in the
|
||||
# step env on every job. Belt-and-suspenders: also check the repo
|
||||
# url's host, which is independent of any runner-side env config
|
||||
# (covers a future Gitea host where the env var is forgotten).
|
||||
- name: Detect runner host
|
||||
id: host
|
||||
run: |
|
||||
if [[ "${GITEA_ACTIONS:-}" == "true" ]] || [[ "${{ github.server_url }}" == *moleculesai.app* ]] || [[ "${{ github.event.repository.html_url }}" == *moleculesai.app* ]]; then
|
||||
echo "is_gitea=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice::Gitea Actions detected — auto-merge gating is not applicable here (Gitea has no --auto merge primitive). Job will no-op."
|
||||
else
|
||||
echo "is_gitea=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Disable auto-merge (GitHub only)
|
||||
if: steps.host.outputs.is_gitea != 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
PR: ${{ github.event.pull_request.number }}
|
||||
REPO: ${{ github.repository }}
|
||||
NEW_SHA: ${{ github.sha }}
|
||||
run: |
|
||||
set -eu
|
||||
gh pr merge "$PR" --disable-auto -R "$REPO" || true
|
||||
gh pr comment "$PR" -R "$REPO" --body "🔒 Auto-merge disabled — new commit (\`${NEW_SHA:0:7}\`) pushed after auto-merge was enabled. The merge queue locks SHAs at entry, so subsequent pushes can race. Verify the new commit and re-enable with \`gh pr merge --auto\`."
|
||||
|
||||
- name: Gitea no-op
|
||||
if: steps.host.outputs.is_gitea == 'true'
|
||||
run: echo "Gitea Actions — auto-merge gating not applicable; no-op (job intentionally green so branch protection's required-check name lands SUCCESS)."
|
||||
uses: molecule-ai/molecule-ci/.github/workflows/disable-auto-merge-on-push.yml@main
|
||||
|
||||
16
.github/workflows/publish-canvas-image.yml
vendored
16
.github/workflows/publish-canvas-image.yml
vendored
@ -54,22 +54,6 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
|
||||
# Health check: verify Docker daemon is accessible before attempting any
|
||||
# build steps. This fails loudly at step 1 when the runner's docker.sock
|
||||
# is inaccessible rather than silently continuing to the build step
|
||||
# where docker build fails deep in ECR auth with a cryptic error.
|
||||
- name: Verify Docker daemon access
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "::group::Docker daemon health check"
|
||||
docker info 2>&1 | head -5 || {
|
||||
echo "::error::Docker daemon is not accessible at /var/run/docker.sock"
|
||||
echo "::error::Check: (1) daemon running, (2) runner user in docker group, (3) sock perms 660+"
|
||||
exit 1
|
||||
}
|
||||
echo "Docker daemon OK"
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Compute tags
|
||||
id: tags
|
||||
shell: bash
|
||||
|
||||
189
.github/workflows/publish-runtime.yml
vendored
189
.github/workflows/publish-runtime.yml
vendored
@ -1,15 +1,5 @@
|
||||
name: publish-runtime
|
||||
|
||||
# DEPRECATED on Gitea Actions — this file is kept for reference only.
|
||||
# Gitea Actions reads .gitea/workflows/, not .github/workflows/.
|
||||
# The canonical version is now: .gitea/workflows/publish-runtime.yml
|
||||
# That port:
|
||||
# - Drops OIDC trusted publisher (Gitea has no environments/OIDC)
|
||||
# - Uses PYPI_TOKEN secret instead of gh-action-pypi-publish
|
||||
# - Uses ${GITHUB_REF#refs/tags/} instead of github.ref_name
|
||||
# - Drops staging branch trigger (staging branch does not exist)
|
||||
# - Drops merge_group trigger (Gitea has no merge queue)
|
||||
#
|
||||
# Publishes molecule-ai-workspace-runtime to PyPI from monorepo workspace/.
|
||||
# Monorepo workspace/ is the only source-of-truth for runtime code; this
|
||||
# workflow is the bridge from monorepo edits to the PyPI artifact that
|
||||
@ -180,7 +170,7 @@ jobs:
|
||||
# environment pypi-publish. The action mints a short-lived OIDC
|
||||
# token and exchanges it for a PyPI upload credential — no static
|
||||
# API token in this repo's secrets.
|
||||
uses: pypa/gh-action-pypi-publish@cef221092ed1bacb1cc03d23a2d87d1d172e277b # release/v1
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
packages-dir: ${{ runner.temp }}/runtime-build/dist/
|
||||
|
||||
@ -292,33 +282,42 @@ jobs:
|
||||
echo "::error::Refusing to fan out cascade against stale or corrupt PyPI surfaces."
|
||||
exit 1
|
||||
|
||||
- name: Fan out via push to .runtime-version
|
||||
- name: Fan out repository_dispatch
|
||||
env:
|
||||
# Gitea PAT with write:repository scope on the 8 cascade-active
|
||||
# template repos. Used here for `git push` (NOT for an API
|
||||
# dispatch — Gitea 1.22.6 has no repository_dispatch endpoint;
|
||||
# empirically verified across 6 candidate paths in molecule-
|
||||
# core#20 issuecomment-913). The push trips each template's
|
||||
# existing `on: push: branches: [main]` trigger on
|
||||
# publish-image.yml, which then reads the updated
|
||||
# .runtime-version via its resolve-version job.
|
||||
DISPATCH_TOKEN: ${{ secrets.DISPATCH_TOKEN }}
|
||||
# Fine-grained PAT with `actions:write` on the 8 template repos.
|
||||
# GITHUB_TOKEN can't fire dispatches across repos — needs an explicit
|
||||
# token. Stored as a repo secret; rotate per the standard schedule.
|
||||
DISPATCH_TOKEN: ${{ secrets.TEMPLATE_DISPATCH_TOKEN }}
|
||||
# Single source of truth: the publish job's output, which handles
|
||||
# tag/manual-input/auto-bump uniformly. The previous fallback
|
||||
# (`steps.version.outputs.version` from inside the cascade job)
|
||||
# was a dead reference — different job, no shared step scope.
|
||||
RUNTIME_VERSION: ${{ needs.publish.outputs.version }}
|
||||
run: |
|
||||
set +e # don't abort on a single repo failure — collect them all
|
||||
|
||||
# Soft-skip on workflow_dispatch when the token is missing
|
||||
# (operator ad-hoc test); hard-fail on push so unattended
|
||||
# publishes can't silently skip the cascade. Same shape as
|
||||
# the original v1, intentional split per the schedule-vs-
|
||||
# dispatch hardening 2026-04-28.
|
||||
# Schedule-vs-dispatch behaviour split (hardened 2026-04-28
|
||||
# after the sweep-cf-orphans soft-skip incident — same class
|
||||
# of bug):
|
||||
#
|
||||
# The earlier "skipping cascade. templates will pick up the
|
||||
# new version on their own next rebuild" message was wrong —
|
||||
# templates only build on this dispatch trigger; without it
|
||||
# they stay pinned to whatever runtime version they last saw.
|
||||
# A silent skip here means "PyPI is current, templates are
|
||||
# not" and the gap is invisible until someone notices a
|
||||
# template still on the old version weeks later.
|
||||
#
|
||||
# - push → exit 1 (red CI surfaces the gap)
|
||||
# - workflow_dispatch → exit 0 with a warning (operator
|
||||
# ran this ad-hoc; let them rerun
|
||||
# after fixing the secret)
|
||||
if [ -z "$DISPATCH_TOKEN" ]; then
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "::warning::DISPATCH_TOKEN secret not set — skipping cascade."
|
||||
echo "::warning::TEMPLATE_DISPATCH_TOKEN secret not set — skipping cascade."
|
||||
echo "::warning::set it at Settings → Secrets and Variables → Actions, then rerun. Templates will stay on the prior runtime version until either this token is set or each template is rebuilt manually."
|
||||
exit 0
|
||||
fi
|
||||
echo "::error::DISPATCH_TOKEN secret missing — cascade cannot fan out."
|
||||
echo "::error::TEMPLATE_DISPATCH_TOKEN secret missing — cascade cannot fan out."
|
||||
echo "::error::PyPI was published, but the 8 template repos will NOT pick up the new version until this token is restored and a republish dispatches the cascade."
|
||||
echo "::error::set it at Settings → Secrets and Variables → Actions; then re-trigger publish-runtime via workflow_dispatch."
|
||||
exit 1
|
||||
@ -328,119 +327,37 @@ jobs:
|
||||
echo "::error::publish job did not expose a version output — cascade cannot fan out"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# All 9 workspace templates declared in manifest.json. The list
|
||||
# MUST stay aligned with manifest.json's workspace_templates —
|
||||
# cascade-list-drift-gate.yml enforces this in CI per the
|
||||
# codex-stuck-on-stale-runtime invariant from PR #2556.
|
||||
# Long-term goal: derive this list from manifest.json so it
|
||||
# can't drift even on a manifest edit (RFC #388 Phase-1).
|
||||
#
|
||||
# Per-template publish-image.yml presence is checked at
|
||||
# cascade-time below: codex doesn't ship one today, so the
|
||||
# cascade soft-skips it with an informational message rather
|
||||
# than dropping it from this list (which would re-introduce
|
||||
# the drift the gate exists to catch).
|
||||
GITEA_URL="${GITEA_URL:-https://git.moleculesai.app}"
|
||||
# All 9 active workspace template repos. The PR #2536 pruning
|
||||
# ("deprecated, no shipping images") was empirically wrong:
|
||||
# continuous-synth-e2e.yml defaults to langgraph as its primary
|
||||
# canary (line 44), and every excluded template had successful
|
||||
# publish-image runs as of 2026-05-03 — none were dormant.
|
||||
# Symptom of the prune: today's a2a-sdk strict-mode fix
|
||||
# (#2566 / commit e1628c4) cascaded to 4 templates but never
|
||||
# reached langgraph, so the synth-E2E correctly canary'd a fix
|
||||
# that had landed but not deployed. Re-added the 5 templates.
|
||||
# Long-term: derive this list from manifest.json so cascade
|
||||
# scope can't drift from E2E scope — tracked in RFC #388 as a
|
||||
# Phase-1 invariant.
|
||||
TEMPLATES="claude-code hermes openclaw codex langgraph crewai autogen deepagents gemini-cli"
|
||||
FAILED=""
|
||||
SKIPPED=""
|
||||
|
||||
# Configure git identity once. The persona owning DISPATCH_TOKEN
|
||||
# is the same identity that authored this commit on each
|
||||
# template; using a generic "publish-runtime cascade" co-author
|
||||
# trailer in the message keeps the audit trail honest about the
|
||||
# workflow-driven origin.
|
||||
git config --global user.name "publish-runtime cascade"
|
||||
git config --global user.email "publish-runtime@moleculesai.app"
|
||||
|
||||
WORKDIR="$(mktemp -d)"
|
||||
for tpl in $TEMPLATES; do
|
||||
REPO="molecule-ai/molecule-ai-workspace-template-$tpl"
|
||||
CLONE="$WORKDIR/$tpl"
|
||||
|
||||
# Pre-check: skip templates without a publish-image.yml.
|
||||
# The cascade's job is to trip the template's on-push
|
||||
# rebuild — if there's no rebuild workflow, pushing a
|
||||
# .runtime-version commit is just noise on the target
|
||||
# repo. Use the Gitea contents API (no clone required for
|
||||
# the probe). 200 = present; 404 = absent.
|
||||
HTTP=$(curl -sS -o /dev/null -w "%{http_code}" \
|
||||
-H "Authorization: token $DISPATCH_TOKEN" \
|
||||
"$GITEA_URL/api/v1/repos/$REPO/contents/.github/workflows/publish-image.yml")
|
||||
if [ "$HTTP" = "404" ]; then
|
||||
echo "↷ $tpl has no publish-image.yml — soft-skip (informational; manifest still tracks it)"
|
||||
SKIPPED="$SKIPPED $tpl"
|
||||
continue
|
||||
fi
|
||||
if [ "$HTTP" != "200" ]; then
|
||||
echo "::warning::$tpl publish-image.yml probe returned HTTP $HTTP — proceeding anyway, push will surface the real failure if any"
|
||||
fi
|
||||
|
||||
# Use a per-template attempt loop so a transient race (e.g.
|
||||
# human pushing to the same template at the same instant)
|
||||
# doesn't lose the cascade. Bounded retries (3) — beyond
|
||||
# that we surface the failure and let the operator retry.
|
||||
attempt=0
|
||||
success=false
|
||||
while [ $attempt -lt 3 ]; do
|
||||
attempt=$((attempt + 1))
|
||||
rm -rf "$CLONE"
|
||||
if ! git clone --depth=1 \
|
||||
"https://x-access-token:${DISPATCH_TOKEN}@${GITEA_URL#https://}/$REPO.git" \
|
||||
"$CLONE" >/tmp/clone.log 2>&1; then
|
||||
echo "::warning::clone $tpl attempt $attempt failed: $(tail -n3 /tmp/clone.log)"
|
||||
sleep 2
|
||||
continue
|
||||
fi
|
||||
|
||||
cd "$CLONE"
|
||||
echo "$VERSION" > .runtime-version
|
||||
|
||||
# Idempotency guard: if the file already matches, this
|
||||
# publish is a re-run for a version already cascaded.
|
||||
# Don't push a no-op commit (would spuriously re-trip the
|
||||
# template's on-push and rebuild for nothing).
|
||||
if git diff --quiet -- .runtime-version; then
|
||||
echo "✓ $tpl already at $VERSION — no commit needed (idempotent)"
|
||||
success=true
|
||||
cd - >/dev/null
|
||||
break
|
||||
fi
|
||||
|
||||
git add .runtime-version
|
||||
git commit -m "chore: pin runtime to $VERSION (publish-runtime cascade)" \
|
||||
-m "Co-Authored-By: publish-runtime cascade <publish-runtime@moleculesai.app>" \
|
||||
>/dev/null
|
||||
|
||||
if git push origin HEAD:main >/tmp/push.log 2>&1; then
|
||||
echo "✓ $tpl pushed $VERSION on attempt $attempt"
|
||||
success=true
|
||||
cd - >/dev/null
|
||||
break
|
||||
fi
|
||||
|
||||
# Likely a non-fast-forward — pull-rebase and retry.
|
||||
# Don't force-push: that would silently overwrite a racing
|
||||
# human/cascade commit.
|
||||
echo "::warning::push $tpl attempt $attempt failed, pull-rebasing: $(tail -n3 /tmp/push.log)"
|
||||
git pull --rebase origin main >/tmp/rebase.log 2>&1 || true
|
||||
cd - >/dev/null
|
||||
done
|
||||
|
||||
if [ "$success" != "true" ]; then
|
||||
STATUS=$(curl -sS -o /tmp/dispatch.out -w "%{http_code}" \
|
||||
-X POST "https://api.github.com/repos/$REPO/dispatches" \
|
||||
-H "Authorization: Bearer $DISPATCH_TOKEN" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
-d "{\"event_type\":\"runtime-published\",\"client_payload\":{\"runtime_version\":\"$VERSION\"}}")
|
||||
if [ "$STATUS" = "204" ]; then
|
||||
echo "✓ dispatched $tpl ($VERSION)"
|
||||
else
|
||||
echo "::warning::✗ failed to dispatch $tpl: HTTP $STATUS — $(cat /tmp/dispatch.out)"
|
||||
FAILED="$FAILED $tpl"
|
||||
fi
|
||||
done
|
||||
rm -rf "$WORKDIR"
|
||||
|
||||
if [ -n "$FAILED" ]; then
|
||||
echo "::error::Cascade incomplete after 3 retries each. Failed templates:$FAILED"
|
||||
echo "::error::PyPI publish succeeded; failed templates lag the new version. Re-run this workflow_dispatch with the same version to retry only the laggers (idempotent — already-cascaded templates skip)."
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$SKIPPED" ]; then
|
||||
echo "Cascade complete: pinned $VERSION on cascade-active templates. Soft-skipped (no publish-image.yml):$SKIPPED"
|
||||
else
|
||||
echo "Cascade complete: $VERSION pinned across all manifest workspace_templates."
|
||||
echo "::warning::Cascade incomplete. Failed templates:$FAILED"
|
||||
# Don't fail the whole job — PyPI publish already succeeded;
|
||||
# operators can retry the failed templates manually.
|
||||
fi
|
||||
|
||||
215
.github/workflows/publish-workspace-server-image.yml
vendored
215
.github/workflows/publish-workspace-server-image.yml
vendored
@ -32,7 +32,7 @@ name: publish-workspace-server-image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [staging, main]
|
||||
paths:
|
||||
- 'workspace-server/**'
|
||||
- 'canvas/**'
|
||||
@ -75,54 +75,45 @@ jobs:
|
||||
# plugin was dropped + workspace-server/Dockerfile no longer
|
||||
# COPYs it.
|
||||
|
||||
# ECR auth + buildx setup are now inline in each build step
|
||||
# below (Task #173, 2026-05-07).
|
||||
#
|
||||
# Why moved inline: aws-actions/configure-aws-credentials@v4 +
|
||||
# aws-actions/amazon-ecr-login@v2 + docker/setup-buildx-action
|
||||
# all left auth state in places that the actual `docker push`
|
||||
# couldn't see on Gitea Actions:
|
||||
# - The actions wrote to a step-scoped DOCKER_CONFIG path
|
||||
# that didn't survive into subsequent shell steps.
|
||||
# - Buildx couldn't bridge the runner container ↔
|
||||
# operator-host docker daemon auth gap (401 on the
|
||||
# docker-container driver, "no basic auth credentials"
|
||||
# with the action-driven login).
|
||||
#
|
||||
# Doing AWS+ECR auth inline (`aws ecr get-login-password |
|
||||
# docker login`) in the same shell step as `docker build` +
|
||||
# `docker push` is the operator-host manual approach, mapped
|
||||
# 1:1 into CI. Auth state is guaranteed to live in the env that
|
||||
# `docker push` actually runs from.
|
||||
#
|
||||
# Post-suspension target is the operator's ECR org
|
||||
# (153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/*),
|
||||
# which already hosts platform-tenant + workspace-template-* +
|
||||
# runner-base images. AWS creds come from the
|
||||
# AWS_ACCESS_KEY_ID/SECRET secrets bound to the molecule-cp
|
||||
# IAM user. Closes #161.
|
||||
- name: Configure AWS credentials for ECR
|
||||
# GHCR was the pre-suspension target; the molecule-ai org on
|
||||
# GitHub got swept 2026-05-06 and ghcr.io/molecule-ai/* is no
|
||||
# longer reachable. Post-suspension target is the operator's
|
||||
# ECR org (153263036946.dkr.ecr.us-east-2.amazonaws.com/
|
||||
# molecule-ai/*), which already hosts platform-tenant +
|
||||
# workspace-template-* + runner-base images. AWS creds come
|
||||
# from the AWS_ACCESS_KEY_ID/SECRET secrets bound to the
|
||||
# molecule-cp IAM user. Closes #161.
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
|
||||
- name: Log in to ECR
|
||||
id: ecr-login
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
# driver: docker — use the host docker daemon directly. The
|
||||
# default `docker-container` driver spawns a buildkit container
|
||||
# that doesn't share the host's ECR auth (set up by
|
||||
# amazon-ecr-login above) and silently 401s on push to ECR. With
|
||||
# driver: docker, buildx delegates to the host daemon which
|
||||
# already has the ECR creds. Caught on Gitea Actions run #893
|
||||
# post-Task-#173 (2026-05-07): the pre-clone fix worked and the
|
||||
# image built end-to-end, but `failed to push: 401 Unauthorized`
|
||||
# because the build container couldn't see the host's
|
||||
# ~/.docker/config.json.
|
||||
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
|
||||
with:
|
||||
driver: docker
|
||||
|
||||
- name: Compute tags
|
||||
id: tags
|
||||
run: |
|
||||
echo "sha=${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Health check: verify Docker daemon is accessible before attempting any
|
||||
# build steps. This fails loudly at step 1 when the runner's docker.sock
|
||||
# is inaccessible rather than silently continuing to the build step
|
||||
# where docker build fails deep in ECR auth with a cryptic error.
|
||||
- name: Verify Docker daemon access
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "::group::Docker daemon health check"
|
||||
docker info 2>&1 | head -5 || {
|
||||
echo "::error::Docker daemon is not accessible at /var/run/docker.sock"
|
||||
echo "::error::Check: (1) daemon running, (2) runner user in docker group, (3) sock perms 660+"
|
||||
exit 1
|
||||
}
|
||||
echo "Docker daemon OK"
|
||||
echo "::endgroup::"
|
||||
|
||||
# Pre-clone manifest deps before docker build (Task #173 fix).
|
||||
#
|
||||
# Why pre-clone: post-2026-05-06, every workspace-template-* repo on
|
||||
@ -197,82 +188,66 @@ jobs:
|
||||
# were running pre-RFC code. Adding the staging trigger above closes
|
||||
# that gap. Earlier 2026-04-24 incident: a static :staging-<sha> pin
|
||||
# drifted 10 days behind staging — same class of bug, different
|
||||
# mechanism. ECR repo molecule-ai/platform created 2026-05-07.
|
||||
# Build + push platform image with plain `docker` (no buildx).
|
||||
# GIT_SHA bakes into the Go binary via -ldflags so /buildinfo
|
||||
# returns it at runtime — see Dockerfile + buildinfo/buildinfo.go.
|
||||
# The OCI revision label below carries the same value for registry
|
||||
# tooling; the duplication is intentional.
|
||||
- name: Build & push platform image to ECR (staging-<sha> + staging-latest)
|
||||
env:
|
||||
IMAGE_NAME: ${{ env.IMAGE_NAME }}
|
||||
TAG_SHA: staging-${{ steps.tags.outputs.sha }}
|
||||
TAG_LATEST: staging-latest
|
||||
GIT_SHA: ${{ github.sha }}
|
||||
REPO: ${{ github.repository }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: us-east-2
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# ECR auth in-step so config.json is populated in the same
|
||||
# shell env that runs `docker push`. ECR get-login-password
|
||||
# tokens last 12h, plenty for a single-step build+push.
|
||||
ECR_REGISTRY="${IMAGE_NAME%%/*}"
|
||||
aws ecr get-login-password --region us-east-2 | \
|
||||
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
|
||||
docker build \
|
||||
--file ./workspace-server/Dockerfile \
|
||||
--build-arg GIT_SHA="${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.source=https://github.com/${REPO}" \
|
||||
--label "org.opencontainers.image.revision=${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.description=Molecule AI platform (Go API server) — pending canary verify" \
|
||||
--tag "${IMAGE_NAME}:${TAG_SHA}" \
|
||||
--tag "${IMAGE_NAME}:${TAG_LATEST}" \
|
||||
.
|
||||
docker push "${IMAGE_NAME}:${TAG_SHA}"
|
||||
docker push "${IMAGE_NAME}:${TAG_LATEST}"
|
||||
# mechanism.
|
||||
- name: Build & push platform image to GHCR (staging-<sha> + staging-latest)
|
||||
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
|
||||
with:
|
||||
context: .
|
||||
file: ./workspace-server/Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.IMAGE_NAME }}:staging-${{ steps.tags.outputs.sha }}
|
||||
${{ env.IMAGE_NAME }}:staging-latest
|
||||
# cache-from/cache-to: type=gha removed for Gitea Actions —
|
||||
# the GHA artifact cache backend is GitHub-specific; on Gitea
|
||||
# the cache endpoint is unreachable and times out
|
||||
# ("artifactcache/cache?keys=index-buildkit-... i/o timeout").
|
||||
# Driver `docker` (set above) doesn't support the gha cache
|
||||
# protocol either. Inline cache via type=registry could be
|
||||
# added back later if rebuild time becomes painful, but
|
||||
# 37-repo clone + Go/Node builds take <10min cold — fine for
|
||||
# now, and a noisy failure is worse than a slow success.
|
||||
# GIT_SHA bakes into the Go binary via -ldflags so /buildinfo
|
||||
# returns it at runtime — see Dockerfile + buildinfo/buildinfo.go.
|
||||
# This is the same value as the OCI revision label below; passing
|
||||
# it twice is intentional, the OCI label is for registry tooling
|
||||
# while /buildinfo is for the redeploy verification step.
|
||||
build-args: |
|
||||
GIT_SHA=${{ github.sha }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.description=Molecule AI platform (Go API server) — pending canary verify
|
||||
|
||||
# Canvas uses same-origin fetches. The tenant Go platform
|
||||
# reverse-proxies /cp/* to the SaaS CP via its CP_UPSTREAM_URL
|
||||
# env; the tenant's /canvas/viewport, /approvals/pending,
|
||||
# /org/templates etc. live on the tenant platform itself.
|
||||
# Both legs share one origin (the tenant subdomain) so
|
||||
# PLATFORM_URL="" forces canvas to fetch paths as relative,
|
||||
# which land same-origin.
|
||||
#
|
||||
# Self-hosted / private-label deployments override this at
|
||||
# build time with a specific backend (e.g. local dev:
|
||||
# NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080).
|
||||
- name: Build & push tenant image to ECR (staging-<sha> + staging-latest)
|
||||
env:
|
||||
TENANT_IMAGE_NAME: ${{ env.TENANT_IMAGE_NAME }}
|
||||
TAG_SHA: staging-${{ steps.tags.outputs.sha }}
|
||||
TAG_LATEST: staging-latest
|
||||
GIT_SHA: ${{ github.sha }}
|
||||
REPO: ${{ github.repository }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: us-east-2
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Re-login: the platform-image step's docker login wrote to
|
||||
# the same config.json, so this is technically redundant — but
|
||||
# making each push step self-contained keeps the workflow
|
||||
# robust to step reordering / future extraction.
|
||||
ECR_REGISTRY="${TENANT_IMAGE_NAME%%/*}"
|
||||
aws ecr get-login-password --region us-east-2 | \
|
||||
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
|
||||
docker build \
|
||||
--file ./workspace-server/Dockerfile.tenant \
|
||||
--build-arg NEXT_PUBLIC_PLATFORM_URL= \
|
||||
--build-arg GIT_SHA="${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.source=https://github.com/${REPO}" \
|
||||
--label "org.opencontainers.image.revision=${GIT_SHA}" \
|
||||
--label "org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify" \
|
||||
--tag "${TENANT_IMAGE_NAME}:${TAG_SHA}" \
|
||||
--tag "${TENANT_IMAGE_NAME}:${TAG_LATEST}" \
|
||||
.
|
||||
docker push "${TENANT_IMAGE_NAME}:${TAG_SHA}"
|
||||
docker push "${TENANT_IMAGE_NAME}:${TAG_LATEST}"
|
||||
- name: Build & push tenant image to GHCR (staging-<sha> + staging-latest)
|
||||
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
|
||||
with:
|
||||
context: .
|
||||
file: ./workspace-server/Dockerfile.tenant
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.TENANT_IMAGE_NAME }}:staging-${{ steps.tags.outputs.sha }}
|
||||
${{ env.TENANT_IMAGE_NAME }}:staging-latest
|
||||
# cache-from/cache-to: type=gha removed — see platform image
|
||||
# build step above for rationale. Same Gitea-Actions limitation.
|
||||
# Canvas uses same-origin fetches. The tenant Go platform
|
||||
# reverse-proxies /cp/* to the SaaS CP via its CP_UPSTREAM_URL
|
||||
# env; the tenant's /canvas/viewport, /approvals/pending,
|
||||
# /org/templates etc. live on the tenant platform itself.
|
||||
# Both legs share one origin (the tenant subdomain) so
|
||||
# PLATFORM_URL="" forces canvas to fetch paths as relative,
|
||||
# which land same-origin.
|
||||
#
|
||||
# Self-hosted / private-label deployments override this at
|
||||
# build time with a specific backend (e.g. local dev:
|
||||
# NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080).
|
||||
build-args: |
|
||||
NEXT_PUBLIC_PLATFORM_URL=
|
||||
GIT_SHA=${{ github.sha }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify
|
||||
|
||||
|
||||
34
.github/workflows/redeploy-tenants-on-main.yml
vendored
34
.github/workflows/redeploy-tenants-on-main.yml
vendored
@ -3,9 +3,9 @@ name: redeploy-tenants-on-main
|
||||
# Auto-refresh prod tenant EC2s after every main merge.
|
||||
#
|
||||
# Why this workflow exists: publish-workspace-server-image builds and
|
||||
# pushes a new platform-tenant :<sha> to ECR on every merge to main,
|
||||
# but running tenants pulled their image once at boot and never re-pull.
|
||||
# Users see stale code indefinitely.
|
||||
# pushes a new platform-tenant:latest + :<sha> to GHCR on every merge
|
||||
# to main, but running tenants pulled their image once at boot and
|
||||
# never re-pull. Users see stale code indefinitely.
|
||||
#
|
||||
# This workflow closes the gap by calling the control-plane admin
|
||||
# endpoint that performs a canary-first, batched, health-gated rolling
|
||||
@ -13,18 +13,12 @@ name: redeploy-tenants-on-main
|
||||
# molecule-controlplane as POST /cp/admin/tenants/redeploy-fleet
|
||||
# (feat/tenant-auto-redeploy, landing alongside this workflow).
|
||||
#
|
||||
# Registry: ECR (153263036946.dkr.ecr.us-east-2.amazonaws.com/
|
||||
# molecule-ai/platform-tenant). GHCR was retired 2026-05-07 during the
|
||||
# Gitea suspension migration. The canary-verify.yml promote step now
|
||||
# uses the same redeploy-fleet endpoint (fixes the silent-GHCR gap).
|
||||
#
|
||||
# Runtime ordering:
|
||||
# 1. publish-workspace-server-image completes → new :staging-<sha> in ECR.
|
||||
# 2. This workflow fires via workflow_run, calls redeploy-fleet with
|
||||
# target_tag=staging-<sha>. No CDN propagation wait needed —
|
||||
# ECR image manifest is consistent immediately after push.
|
||||
# 3. Calls redeploy-fleet with canary_slug (if set) and a soak
|
||||
# period. Canary proves the image boots; batches follow.
|
||||
# 1. publish-workspace-server-image completes → new :latest in GHCR.
|
||||
# 2. This workflow fires via workflow_run, waits 30s for GHCR's
|
||||
# CDN to propagate the new tag to the region the tenants pull from.
|
||||
# 3. Calls redeploy-fleet with canary_slug=hongming and a 60s
|
||||
# soak. Canary proves the image boots; batches follow.
|
||||
# 4. Any failure aborts the rollout and leaves older tenants on the
|
||||
# prior image — safer default than half-and-half state.
|
||||
#
|
||||
@ -114,11 +108,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 25
|
||||
steps:
|
||||
- name: Note on ECR propagation
|
||||
# ECR image manifests are consistent immediately after push — no
|
||||
# CDN cache to wait for. The old GHCR-based workflow had a 30s
|
||||
# sleep to avoid race conditions; ECR makes that unnecessary.
|
||||
run: echo "ECR image available immediately after push — proceeding."
|
||||
- name: Wait for GHCR tag propagation
|
||||
# GHCR's edge cache takes ~15-30s to consistently serve the new
|
||||
# manifest after the registry accepts the push. Without this
|
||||
# sleep, the first tenant's docker pull sometimes races and
|
||||
# fetches the previous digest; sleeping is the cheapest way to
|
||||
# reduce that without polling GHCR for the new digest.
|
||||
run: sleep 30
|
||||
|
||||
- name: Compute target tag
|
||||
id: tag
|
||||
|
||||
@ -36,7 +36,7 @@ on:
|
||||
workflow_run:
|
||||
workflows: ['publish-workspace-server-image']
|
||||
types: [completed]
|
||||
branches: [main]
|
||||
branches: [staging]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
target_tag:
|
||||
|
||||
105
.github/workflows/retarget-main-to-staging.yml
vendored
Normal file
105
.github/workflows/retarget-main-to-staging.yml
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
name: Retarget main PRs to staging
|
||||
|
||||
# Mechanical enforcement of SHARED_RULES rule 8 ("Staging-first workflow, no
|
||||
# exceptions"). When a bot opens a PR against main, retarget it to staging
|
||||
# automatically and leave an explanatory comment. Human CEO-authored PRs (the
|
||||
# staging→main promotion PR, etc.) are left alone — they're the authorised
|
||||
# exception to the rule.
|
||||
#
|
||||
# Why an Action instead of only a prompt rule: prompt rules depend on every
|
||||
# role's system-prompt.md staying in sync. Today 5 of 8 engineer roles
|
||||
# (core-be, core-fe, app-fe, app-qa, devops-engineer) don't have the
|
||||
# staging-first section — the bot keeps opening PRs to main. An Action
|
||||
# enforces the invariant regardless of prompt drift.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened]
|
||||
branches: [main]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
retarget:
|
||||
name: Retarget to staging
|
||||
runs-on: ubuntu-latest
|
||||
# Only fire for bot-authored PRs. Human CEO PRs (staging→main promotion)
|
||||
# are intentional and pass through.
|
||||
#
|
||||
# Head-ref guard: never retarget a PR whose head IS `staging` — those
|
||||
# are the auto-promote staging→main PRs (opened by molecule-ai[bot]
|
||||
# since #2586 switched to an App token, which now passes the bot
|
||||
# filter below). Retargeting head=staging onto base=staging fails
|
||||
# with HTTP 422 "no new commits between base 'staging' and head
|
||||
# 'staging'", which used to surface as a noisy red workflow run on
|
||||
# every auto-promote (caught 2026-05-03 on PR #2588).
|
||||
if: >-
|
||||
github.event.pull_request.head.ref != 'staging'
|
||||
&& (
|
||||
github.event.pull_request.user.type == 'Bot'
|
||||
|| endsWith(github.event.pull_request.user.login, '[bot]')
|
||||
|| github.event.pull_request.user.login == 'app/molecule-ai'
|
||||
|| github.event.pull_request.user.login == 'molecule-ai[bot]'
|
||||
)
|
||||
steps:
|
||||
- name: Retarget PR base to staging
|
||||
id: retarget
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
||||
# Issue #1884: when the bot opens a PR against main and there's
|
||||
# already another PR on the same head branch targeting staging,
|
||||
# GitHub's PATCH /pulls returns 422 with
|
||||
# "A pull request already exists for base branch 'staging' …".
|
||||
# The retarget can't proceed — but the right response is to
|
||||
# close the now-redundant main-PR, not to fail the workflow
|
||||
# noisily. Detect that specific 422 and close instead.
|
||||
run: |
|
||||
set +e
|
||||
echo "Retargeting PR #${PR_NUMBER} (author: ${PR_AUTHOR}) from main → staging"
|
||||
PATCH_OUTPUT=$(gh api -X PATCH \
|
||||
"repos/${{ github.repository }}/pulls/${PR_NUMBER}" \
|
||||
-f base=staging \
|
||||
--jq '.base.ref' 2>&1)
|
||||
PATCH_EXIT=$?
|
||||
set -e
|
||||
if [ "$PATCH_EXIT" -eq 0 ]; then
|
||||
echo "::notice::Retargeted PR #${PR_NUMBER} → staging"
|
||||
echo "outcome=retargeted" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
# Specifically match the 422 duplicate-base/head error so
|
||||
# any OTHER PATCH failure (auth, deleted PR, etc.) still
|
||||
# surfaces as a real workflow failure.
|
||||
if echo "$PATCH_OUTPUT" | grep -q "pull request already exists for base branch 'staging'"; then
|
||||
echo "::notice::PR #${PR_NUMBER}: duplicate target-staging PR exists on same head — closing this main-PR as redundant."
|
||||
gh pr close "$PR_NUMBER" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--comment "[retarget-bot] Closing — another PR on the same head branch already targets \`staging\`. This PR is redundant. See issue #1884 for the rationale."
|
||||
echo "outcome=closed-as-duplicate" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
echo "::error::Retarget PATCH failed and was NOT a duplicate-base error:"
|
||||
echo "$PATCH_OUTPUT" >&2
|
||||
exit 1
|
||||
|
||||
- name: Post explainer comment
|
||||
if: steps.retarget.outputs.outcome == 'retargeted'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
gh pr comment "$PR_NUMBER" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--body "$(cat <<'BODY'
|
||||
[retarget-bot] This PR was opened against `main` and has been retargeted to `staging` automatically.
|
||||
|
||||
**Why:** per [SHARED_RULES rule 8](https://github.com/molecule-ai/molecule-ai-org-template-molecule-dev/blob/main/SHARED_RULES.md), all feature work targets `staging` first; the CEO promotes `staging → main` separately.
|
||||
|
||||
**What changed:** just the base branch — no code change. CI will re-run against `staging`. If you get merge conflicts, rebase on `staging`.
|
||||
|
||||
**If this PR is the CEO's staging→main promotion:** the Action skipped you (only bot-authored PRs are retargeted). If you see this comment on your CEO PR, that's a bug — please tag @HongmingWang-Rabbit.
|
||||
BODY
|
||||
)"
|
||||
2
.github/workflows/secret-pattern-drift.yml
vendored
2
.github/workflows/secret-pattern-drift.yml
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
with:
|
||||
|
||||
@ -22,7 +22,7 @@ development workflow, conventions, and how to get your changes merged.
|
||||
|
||||
```bash
|
||||
# Clone the repo
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
|
||||
git clone https://github.com/Molecule-AI/molecule-core.git
|
||||
cd molecule-core
|
||||
|
||||
# Install git hooks
|
||||
@ -57,7 +57,7 @@ See `CLAUDE.md` for a full list of environment variables and their purposes.
|
||||
|
||||
This repo is scoped to **code** (canvas, workspace, workspace-server, related
|
||||
infra). Public content (blog posts, marketing copy, OG images, SEO briefs,
|
||||
DevRel demos) lives in [`Molecule-AI/docs`](https://git.moleculesai.app/molecule-ai/docs).
|
||||
DevRel demos) lives in [`Molecule-AI/docs`](https://github.com/Molecule-AI/docs).
|
||||
The `Block forbidden paths` CI gate fails any PR that writes to `marketing/`
|
||||
or other removed paths — open against `Molecule-AI/docs` instead.
|
||||
|
||||
@ -110,7 +110,7 @@ causing a render loop when any node position changed.
|
||||
|
||||
1. **Repo-wide:** "Automatically delete head branches" is on. Once a PR merges, the branch is deleted server-side. Any subsequent `git push` to that branch fails with `remote rejected — no such branch`.
|
||||
|
||||
2. **CI:** the `pr-guards` workflow (calling [molecule-ci `disable-auto-merge-on-push`](https://git.moleculesai.app/molecule-ai/molecule-ci/src/branch/main/.github/workflows/disable-auto-merge-on-push.yml)) fires on every push to an open PR. If auto-merge was already enabled, it's disabled and a comment is posted. You must explicitly re-enable after verifying the new commit.
|
||||
2. **CI:** the `pr-guards` workflow (calling [molecule-ci `disable-auto-merge-on-push`](https://github.com/Molecule-AI/molecule-ci/blob/main/.github/workflows/disable-auto-merge-on-push.yml)) fires on every push to an open PR. If auto-merge was already enabled, it's disabled and a comment is posted. You must explicitly re-enable after verifying the new commit.
|
||||
|
||||
**Workflow rules that follow from the guards:**
|
||||
- Push **all** commits before running `gh pr merge --auto`.
|
||||
@ -180,9 +180,9 @@ and run CI manually.
|
||||
Code in this repo lands in molecule-core. Some related runtime artifacts
|
||||
live in their own repos:
|
||||
|
||||
- [`Molecule-AI/molecule-ai-workspace-runtime`](https://git.moleculesai.app/molecule-ai/molecule-ai-workspace-runtime) — Python adapter SDK (`molecule_runtime`) that runs inside containerized Molecule workspaces. Bridges Claude Code SDK / hermes / langgraph / etc. → A2A queue.
|
||||
- [`Molecule-AI/molecule-sdk-python`](https://git.moleculesai.app/molecule-ai/molecule-sdk-python) — `A2AServer` + `RemoteAgentClient` for external agents that register over the public `/registry/register` flow.
|
||||
- [`Molecule-AI/molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel) — Claude Code channel plugin. Bridges A2A traffic into a running Claude Code session via MCP `notifications/claude/channel`. Polling-based (no tunnel required); install with `claude --channels plugin:molecule@Molecule-AI/molecule-mcp-claude-channel`.
|
||||
- [`Molecule-AI/molecule-ai-workspace-runtime`](https://github.com/Molecule-AI/molecule-ai-workspace-runtime) — Python adapter SDK (`molecule_runtime`) that runs inside containerized Molecule workspaces. Bridges Claude Code SDK / hermes / langgraph / etc. → A2A queue.
|
||||
- [`Molecule-AI/molecule-sdk-python`](https://github.com/Molecule-AI/molecule-sdk-python) — `A2AServer` + `RemoteAgentClient` for external agents that register over the public `/registry/register` flow.
|
||||
- [`Molecule-AI/molecule-mcp-claude-channel`](https://github.com/Molecule-AI/molecule-mcp-claude-channel) — Claude Code channel plugin. Bridges A2A traffic into a running Claude Code session via MCP `notifications/claude/channel`. Polling-based (no tunnel required); install with `claude --channels plugin:molecule@Molecule-AI/molecule-mcp-claude-channel`.
|
||||
|
||||
When extending the **A2A surface** in molecule-core (`workspace-server/internal/handlers/a2a_proxy.go` etc.), consider whether the change has a downstream impact on the runtime SDK or the channel plugin — they're versioned independently but share the wire shape.
|
||||
|
||||
|
||||
28
Makefile
28
Makefile
@ -1,28 +0,0 @@
|
||||
# Top-level Makefile — convenience wrappers around docker compose.
|
||||
#
|
||||
# Most molecule-core dev work happens via these shortcuts. CI doesn't
|
||||
# use this Makefile; CI calls docker compose / go test directly so the
|
||||
# Makefile can evolve without breaking the build.
|
||||
|
||||
.PHONY: help dev up down logs build test
|
||||
|
||||
help: ## Show this help.
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## ' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-12s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
dev: ## Start the full stack with air hot-reload for the platform service.
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up
|
||||
|
||||
up: ## Start the full stack in production-shape mode (no air, normal Dockerfile).
|
||||
docker compose up
|
||||
|
||||
down: ## Stop the stack and remove containers (volumes preserved).
|
||||
docker compose down
|
||||
|
||||
logs: ## Tail logs from all services (Ctrl-C to detach).
|
||||
docker compose logs -f
|
||||
|
||||
build: ## Force a fresh build of the platform image (no cache).
|
||||
docker compose build --no-cache platform
|
||||
|
||||
test: ## Run Go unit tests in workspace-server/.
|
||||
cd workspace-server && go test -race ./...
|
||||
77
README.md
77
README.md
@ -1,7 +1,7 @@
|
||||
<div align="center">
|
||||
|
||||
<p>
|
||||
<img src="./docs/assets/branding/molecule-icon.svg" alt="Molecule AI" width="160" />
|
||||
<img src="./docs/assets/branding/molecule-icon.png" alt="Molecule AI Icon Logo" width="160" />
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@ -39,8 +39,8 @@
|
||||
<a href="./docs/agent-runtime/workspace-runtime.md"><strong>Workspace Runtime</strong></a>
|
||||
</p>
|
||||
|
||||
[](https://railway.app/new/template?template=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://render.com/deploy?repo=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://railway.app/new/template?template=https://github.com/Molecule-AI/molecule-monorepo)
|
||||
[](https://render.com/deploy?repo=https://github.com/Molecule-AI/molecule-monorepo)
|
||||
|
||||
</div>
|
||||
|
||||
@ -53,8 +53,8 @@ Molecule AI is the most powerful way to govern an AI agent organization in produ
|
||||
It combines the parts that are usually scattered across demos, internal glue code, and framework-specific tooling into one product:
|
||||
|
||||
- one org-native control plane for teams, roles, hierarchy, and lifecycle
|
||||
- one runtime layer that lets **eight** agent runtimes — LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, **Hermes**, **Gemini CLI**, and OpenClaw — run side by side behind one workspace contract
|
||||
- one memory model that keeps recall, sharing, and skill evolution aligned with organizational boundaries (Memory v2 backed by pgvector for semantic recall)
|
||||
- one runtime layer that lets LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, and OpenClaw run side by side
|
||||
- one memory model that keeps recall, sharing, and skill evolution aligned with organizational boundaries
|
||||
- one operational surface for observing, pausing, restarting, inspecting, and improving live workspaces
|
||||
|
||||
Most teams can build a workflow, a strong single agent, a coding agent, or a custom multi-agent graph.
|
||||
@ -75,7 +75,7 @@ You do not wire collaboration paths by hand. Hierarchy defines the default commu
|
||||
|
||||
### 3. Runtime choice stops being a dead-end decision
|
||||
|
||||
LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, Hermes, Gemini CLI, and OpenClaw can all plug into the same workspace abstraction. Teams can standardize governance without forcing every group onto one runtime.
|
||||
LangGraph, DeepAgents, Claude Code, CrewAI, AutoGen, and OpenClaw can all plug into the same workspace abstraction. Teams can standardize governance without forcing every group onto one runtime.
|
||||
|
||||
### 4. Memory is treated like infrastructure
|
||||
|
||||
@ -117,8 +117,6 @@ Molecule AI is not trying to replace the frameworks below. It is the system that
|
||||
| **Claude Code** | Shipping on `main` | Real coding workflows, CLI-native continuity | Secure workspace abstraction, A2A delegation, org boundaries, shared control plane |
|
||||
| **CrewAI** | Shipping on `main` | Role-based crews | Persistent workspace identity, policy consistency, shared canvas and registry |
|
||||
| **AutoGen** | Shipping on `main` | Assistant/tool orchestration | Standardized deployment, hierarchy-aware collaboration, shared ops plane |
|
||||
| **Hermes 4** | Shipping on `main` | Hybrid reasoning, native tools, json_schema (NousResearch/hermes-agent) | Option B upstream hook, A2A bridge to OpenAI-compat API, multi-provider provider derivation |
|
||||
| **Gemini CLI** | Shipping on `main` | Google Gemini CLI continuity | Workspace lifecycle, A2A, hierarchy-aware collaboration, shared ops plane |
|
||||
| **OpenClaw** | Shipping on `main` | CLI-native runtime with its own session model | Workspace lifecycle, templates, activity logs, topology-aware collaboration |
|
||||
| **NemoClaw** | WIP on `feat/nemoclaw-t4-docker` | NVIDIA-oriented runtime path | Planned to join the same abstraction once merged; not yet part of `main` |
|
||||
|
||||
@ -184,10 +182,9 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
|
||||
## What Ships In `main`
|
||||
|
||||
### Canvas (v4)
|
||||
### Canvas
|
||||
|
||||
- Next.js 15 + React Flow + Zustand
|
||||
- **warm-paper theme system** — light / dark / follow-system, SSR cookie + nonce'd boot script + ThemeProvider; terminal + code surfaces stay dark unconditionally
|
||||
- drag-to-nest team building
|
||||
- empty-state deployment + onboarding wizard
|
||||
- template palette
|
||||
@ -196,9 +193,8 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
|
||||
### Platform
|
||||
|
||||
- Go 1.25 / Gin control plane (80+ HTTP endpoints + Gorilla WebSocket fanout)
|
||||
- workspace CRUD and provisioning (pluggable Provisioner — Docker locally, EC2 + SSM in production)
|
||||
- **A2A response path is a typed discriminated union (RFC #2967)** — frozen dataclasses + total parser; 100% unit + adversarial fuzz coverage
|
||||
- Go/Gin control plane
|
||||
- workspace CRUD and provisioning
|
||||
- registry and heartbeats
|
||||
- browser-safe A2A proxy
|
||||
- team expansion/collapse
|
||||
@ -208,10 +204,10 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
|
||||
### Runtime
|
||||
|
||||
- unified `workspace/` image; thin AMI in production (us-east-2)
|
||||
- adapter-driven execution across **8 runtimes** (Claude Code, Hermes, Gemini CLI, LangGraph, DeepAgents, CrewAI, AutoGen, OpenClaw)
|
||||
- unified `workspace/` image
|
||||
- adapter-driven execution
|
||||
- Agent Card registration
|
||||
- awareness-backed memory integration; **Memory v2 backed by pgvector** for semantic recall
|
||||
- awareness-backed memory integration
|
||||
- plugin-mounted shared rules/skills
|
||||
- hot-reloadable local skills
|
||||
- coordinator-only delegation path
|
||||
@ -225,21 +221,6 @@ The result is not just “an agent that learns.” It is **an organization that
|
||||
- runtime tiers
|
||||
- direct workspace inspection through terminal and files
|
||||
|
||||
### SaaS (via [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane))
|
||||
|
||||
- multi-tenant on AWS EC2 + Neon (per-tenant Postgres branch) + Cloudflare Tunnels (per-tenant, no public ports)
|
||||
- WorkOS AuthKit + Stripe Checkout + Customer Portal
|
||||
- AWS KMS envelope encryption (DB / Redis connection strings); AWS Secrets Manager for tenant bootstrap
|
||||
- `tenant_resources` audit table + 30-min boot-event-aware reconciler — every CF / AWS lifecycle event recorded, claim vs live state diffed
|
||||
|
||||
### Bring your own Claude Code session (via [`molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel))
|
||||
|
||||
- Claude Code plugin that bridges Molecule A2A traffic into a local Claude Code session via MCP
|
||||
- subscribe to one or more workspaces; peer messages surface as conversation turns; replies route back through Molecule's A2A
|
||||
- no tunnel, no public endpoint — the plugin self-registers each watched workspace as `delivery_mode=poll` and long-polls `/activity?since_id=…`
|
||||
- multi-tenant friendly: one plugin install can watch workspaces across multiple Molecule tenants (`MOLECULE_PLATFORM_URLS` per-workspace)
|
||||
- install via the standard marketplace flow: `/plugin marketplace add Molecule-AI/molecule-mcp-claude-channel` → `/plugin install molecule-channel@molecule-mcp-claude-channel`
|
||||
|
||||
## Built For Teams That Need More Than A Demo
|
||||
|
||||
Molecule AI is especially strong when you need to run:
|
||||
@ -252,30 +233,24 @@ Molecule AI is especially strong when you need to run:
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
Canvas (Next.js 15, warm-paper :3000) <--HTTP / WS--> Platform (Go 1.25 :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Provisioner: Docker (local) / EC2 + SSM (prod)
|
||||
| +--> bundles · templates · secrets · KMS
|
||||
Canvas (Next.js :3000) <--HTTP / WS--> Platform (Go :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Docker provisioner / bundles / templates / secrets
|
||||
|
|
||||
+------------------------- shows ------------------------> workspaces, teams, tasks, traces, events
|
||||
+-------------------- shows --------------------> workspaces, teams, tasks, traces, events
|
||||
|
||||
Workspace Runtime (Python ≥3.11, image with adapters)
|
||||
- 8 adapters: LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / Hermes / Gemini CLI / OpenClaw
|
||||
- Agent Card + A2A server (typed-SSOT response path, RFC #2967)
|
||||
- heartbeat + activity + awareness-backed memory (Memory v2 — pgvector semantic recall)
|
||||
Workspace Runtime (Python image with adapters)
|
||||
- LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / OpenClaw
|
||||
- Agent Card + A2A server
|
||||
- heartbeat + activity + awareness-backed memory
|
||||
- skills + plugins + hot reload
|
||||
|
||||
SaaS Control Plane (molecule-controlplane, private)
|
||||
- per-tenant EC2 + Neon (Postgres branch) + Cloudflare Tunnel
|
||||
- WorkOS · Stripe · KMS · AWS Secrets Manager
|
||||
- tenant_resources audit + 30-min reconciler
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
|
||||
cd molecule-core
|
||||
git clone https://github.com/Molecule-AI/molecule-monorepo.git
|
||||
cd molecule-monorepo
|
||||
|
||||
cp .env.example .env
|
||||
# Defaults boot the stack locally out of the box. See .env.example for
|
||||
@ -284,7 +259,7 @@ cp .env.example .env
|
||||
./infra/scripts/setup.sh
|
||||
# Boots Postgres (:5432), Redis (:6379), Langfuse (:3001),
|
||||
# and Temporal (:7233 gRPC, :8233 UI) on the shared
|
||||
# `molecule-core-net` Docker network. Temporal runs with
|
||||
# `molecule-monorepo-net` Docker network. Temporal runs with
|
||||
# no auth on localhost — dev-only; production must gate it.
|
||||
#
|
||||
# Also populates the template/plugin registry by cloning every repo
|
||||
@ -328,11 +303,7 @@ Then open `http://localhost:3000`:
|
||||
|
||||
## Current Scope
|
||||
|
||||
The current `main` branch ships the core platform, Canvas v4 (warm-paper themed), Memory v2 (pgvector semantic recall), the typed-SSOT A2A response path (RFC #2967), **eight production adapters** (Claude Code, Hermes, Gemini CLI, LangGraph, DeepAgents, CrewAI, AutoGen, OpenClaw), skill lifecycle, and operational surfaces.
|
||||
|
||||
The companion private repo [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) provides the SaaS surface — multi-tenant orchestration on EC2 + Neon + Cloudflare Tunnels, KMS envelope encryption, WorkOS auth, Stripe billing, and a `tenant_resources` audit table with a 30-min reconciler.
|
||||
|
||||
Adjacent runtime work such as **NemoClaw** remains branch-level until merged, and this README keeps that distinction explicit on purpose.
|
||||
The current `main` branch already includes the core platform, canvas, memory model, six production adapters, skill lifecycle, and operational surfaces. Adjacent runtime work such as **NemoClaw** remains branch-level until merged, and this README keeps that distinction explicit on purpose.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
<div align="center">
|
||||
|
||||
<p>
|
||||
<img src="./docs/assets/branding/molecule-icon.svg" alt="Molecule AI" width="160" />
|
||||
<img src="./docs/assets/branding/molecule-icon.png" alt="Molecule AI 图案 Logo" width="160" />
|
||||
</p>
|
||||
|
||||
<p>
|
||||
@ -38,8 +38,8 @@
|
||||
<a href="./docs/agent-runtime/workspace-runtime.md"><strong>Workspace Runtime</strong></a>
|
||||
</p>
|
||||
|
||||
[](https://railway.app/new/template?template=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://render.com/deploy?repo=https://git.moleculesai.app/molecule-ai/molecule-core)
|
||||
[](https://railway.app/new/template?template=https://github.com/Molecule-AI/molecule-core)
|
||||
[](https://render.com/deploy?repo=https://github.com/Molecule-AI/molecule-core)
|
||||
|
||||
</div>
|
||||
|
||||
@ -52,8 +52,8 @@ Molecule AI 是目前最强的 AI Agent 组织治理方案之一,用来把 age
|
||||
它把过去分散在 demo、内部胶水代码和各类 framework 私有工具里的关键能力,收敛成一个产品:
|
||||
|
||||
- 一套组织原生 control plane,管理团队、角色、层级和生命周期
|
||||
- 一套 runtime abstraction,让 **8 个** agent runtime —— LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、**Hermes**、**Gemini CLI**、OpenClaw —— 共用一套 workspace 契约
|
||||
- 一套与组织边界对齐的 memory 模型,把 recall、sharing 和 skill evolution 放进同一体系(Memory v2 由 pgvector 支撑语义召回)
|
||||
- 一套 runtime abstraction,让 LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、OpenClaw 并存运行
|
||||
- 一套与组织边界对齐的 memory 模型,把 recall、sharing 和 skill evolution 放进同一体系
|
||||
- 一套面向线上 workspace 的运维面,统一完成观测、暂停、重启、检查和持续改进
|
||||
|
||||
今天很多团队能做好 workflow、单 agent、coding agent,或者自定义 multi-agent graph 中的一种。
|
||||
@ -74,7 +74,7 @@ Molecule AI 填的就是这个空白。
|
||||
|
||||
### 3. Runtime 选择不再是死路
|
||||
|
||||
LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、Hermes、Gemini CLI、OpenClaw 都可以挂到同一个 workspace abstraction 下。团队可以统一治理方式,而不必统一到底层 runtime。
|
||||
LangGraph、DeepAgents、Claude Code、CrewAI、AutoGen、OpenClaw 都可以挂到同一个 workspace abstraction 下。团队可以统一治理方式,而不必统一到底层 runtime。
|
||||
|
||||
### 4. Memory 被当成基础设施来做
|
||||
|
||||
@ -116,8 +116,6 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
| **Claude Code** | `main` 已支持 | 真实编码工作流、CLI-native continuity | 安全 workspace 抽象、A2A delegation、组织边界、共享 control plane |
|
||||
| **CrewAI** | `main` 已支持 | 角色型 crew 模式清晰 | 持久 workspace 身份、统一策略、共享 Canvas 和 registry |
|
||||
| **AutoGen** | `main` 已支持 | assistant/tool orchestration | 统一部署、层级协作、共享运维平面 |
|
||||
| **Hermes 4** | `main` 已支持 | 混合推理、原生工具调用、json_schema 输出(NousResearch/hermes-agent) | Option B 上游 hook、A2A 桥接 OpenAI 兼容 API、多 provider 自动派生 |
|
||||
| **Gemini CLI** | `main` 已支持 | Google Gemini CLI 持续会话 | workspace 生命周期、A2A、层级感知协作、共享运维平面 |
|
||||
| **OpenClaw** | `main` 已支持 | CLI-native runtime,自有 session 模型 | workspace 生命周期、templates、activity logs、拓扑感知协作 |
|
||||
| **NemoClaw** | `feat/nemoclaw-t4-docker` 分支 WIP | NVIDIA 方向 runtime 路线 | 计划并入同一抽象层,但当前还不是 `main` 已合并能力 |
|
||||
|
||||
@ -183,10 +181,9 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
|
||||
## `main` 分支已经具备什么
|
||||
|
||||
### Canvas(v4)
|
||||
### Canvas
|
||||
|
||||
- Next.js 15 + React Flow + Zustand
|
||||
- **warm-paper 主题系统** —— light / dark / 跟随系统;SSR cookie + nonce'd boot 脚本 + ThemeProvider;终端与代码面板始终保持深色
|
||||
- drag-to-nest 团队构建
|
||||
- empty state + onboarding wizard
|
||||
- template palette
|
||||
@ -195,9 +192,8 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
|
||||
### Platform
|
||||
|
||||
- Go 1.25 / Gin control plane(80+ HTTP 端点 + Gorilla WebSocket fanout)
|
||||
- workspace CRUD 和 provisioning(可插拔 Provisioner —— 本地 Docker、生产 EC2 + SSM)
|
||||
- **A2A 响应路径已收敛为类型化的判别联合(RFC #2967)** —— 冻结 dataclass + 全量 parser;100% 单元测试 + 对抗性 fuzz 覆盖
|
||||
- Go/Gin control plane
|
||||
- workspace CRUD 和 provisioning
|
||||
- registry 与 heartbeat
|
||||
- 浏览器安全的 A2A proxy
|
||||
- team expansion/collapse
|
||||
@ -207,10 +203,10 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
|
||||
### Runtime
|
||||
|
||||
- 统一 `workspace/` 镜像;生产环境采用 thin AMI(us-east-2)
|
||||
- adapter 驱动执行,覆盖 **8 个 runtime**(Claude Code、Hermes、Gemini CLI、LangGraph、DeepAgents、CrewAI、AutoGen、OpenClaw)
|
||||
- 统一 `workspace/` 镜像
|
||||
- adapter 驱动执行
|
||||
- Agent Card 注册
|
||||
- awareness-backed memory;**Memory v2 由 pgvector 支撑**语义召回
|
||||
- awareness-backed memory
|
||||
- plugin 挂载共享 rules/skills
|
||||
- 本地 skills 热加载
|
||||
- coordinator-only delegation 路径
|
||||
@ -224,21 +220,6 @@ Molecule AI 并不是要替代下面这些 framework,而是把它们纳入更
|
||||
- runtime tiers
|
||||
- 终端与文件层面的 workspace 直接排障
|
||||
|
||||
### SaaS(由 [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) 提供)
|
||||
|
||||
- 多租户运行在 AWS EC2 + Neon(每租户一个 Postgres branch)+ Cloudflare Tunnels(每租户一条隧道,对外不开任何端口)
|
||||
- WorkOS AuthKit + Stripe Checkout + Customer Portal
|
||||
- AWS KMS 信封加密(DB / Redis 连接串);AWS Secrets Manager 负责租户 bootstrap
|
||||
- `tenant_resources` 审计表 + 30 分钟 boot-event-aware reconciler —— 每个 CF / AWS lifecycle 事件都有记录,每 30 分钟比对 claim 与实际状态
|
||||
|
||||
### 在 Claude Code 里直接接入(由 [`molecule-mcp-claude-channel`](https://git.moleculesai.app/molecule-ai/molecule-mcp-claude-channel) 提供)
|
||||
|
||||
- 把 Molecule A2A 流量桥接到本地 Claude Code 会话的 MCP 插件
|
||||
- 订阅一个或多个 workspace;peer 的消息会以 user-turn 出现,回复会经 Molecule A2A 路由出去
|
||||
- 无需公网隧道、无需公开端点 —— 插件启动时自动把每个 watched workspace 注册成 `delivery_mode=poll`,长轮询 `/activity?since_id=…`
|
||||
- 多租户友好:单次安装即可同时 watch 跨多个 Molecule 租户的 workspace(`MOLECULE_PLATFORM_URLS` 按 workspace 配置)
|
||||
- 通过标准 marketplace 流程安装:`/plugin marketplace add Molecule-AI/molecule-mcp-claude-channel` → `/plugin install molecule-channel@molecule-mcp-claude-channel`
|
||||
|
||||
## 适合什么团队
|
||||
|
||||
Molecule AI 特别适合下面这些场景:
|
||||
@ -251,29 +232,23 @@ Molecule AI 特别适合下面这些场景:
|
||||
## 架构总览
|
||||
|
||||
```text
|
||||
Canvas (Next.js 15, warm-paper :3000) <--HTTP / WS--> Platform (Go 1.25 :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Provisioner: Docker (本地) / EC2 + SSM (生产)
|
||||
| +--> bundles · templates · secrets · KMS
|
||||
Canvas (Next.js :3000) <--HTTP / WS--> Platform (Go :8080) <---> Postgres + Redis
|
||||
| |
|
||||
| +--> Docker provisioner / bundles / templates / secrets
|
||||
|
|
||||
+------------------------- 展示 ------------------------> workspaces, teams, tasks, traces, events
|
||||
+-------------------- 展示 --------------------> workspaces, teams, tasks, traces, events
|
||||
|
||||
Workspace Runtime (Python ≥3.11,含 adapter 集合的镜像)
|
||||
- 8 个 adapter: LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / Hermes / Gemini CLI / OpenClaw
|
||||
- Agent Card + A2A server(typed-SSOT 响应路径,RFC #2967)
|
||||
- heartbeat + activity + awareness-backed memory(Memory v2 —— pgvector 语义召回)
|
||||
Workspace Runtime (Python image with adapters)
|
||||
- LangGraph / DeepAgents / Claude Code / CrewAI / AutoGen / OpenClaw
|
||||
- Agent Card + A2A server
|
||||
- heartbeat + activity + awareness-backed memory
|
||||
- skills + plugins + hot reload
|
||||
|
||||
SaaS Control Plane (molecule-controlplane,私有)
|
||||
- 每租户 EC2 + Neon (Postgres branch) + Cloudflare Tunnel
|
||||
- WorkOS · Stripe · KMS · AWS Secrets Manager
|
||||
- tenant_resources 审计 + 30 分钟 reconciler
|
||||
```
|
||||
|
||||
## 快速开始
|
||||
|
||||
```bash
|
||||
git clone https://git.moleculesai.app/molecule-ai/molecule-core.git
|
||||
git clone https://github.com/Molecule-AI/molecule-core.git
|
||||
cd molecule-core
|
||||
|
||||
cp .env.example .env
|
||||
@ -283,7 +258,7 @@ cp .env.example .env
|
||||
./infra/scripts/setup.sh
|
||||
# 启动 Postgres (:5432)、Redis (:6379)、Langfuse (:3001)
|
||||
# 以及 Temporal (:7233 gRPC, :8233 UI),全部挂在共享的
|
||||
# `molecule-core-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# `molecule-monorepo-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# 仅用于本地开发;生产环境必须加 mTLS / API Key。
|
||||
#
|
||||
# 同时会根据 manifest.json 拉取所有模板/插件仓库到
|
||||
@ -321,11 +296,7 @@ npm run dev
|
||||
|
||||
## 当前范围说明
|
||||
|
||||
当前 `main` 已经包含核心平台、Canvas v4(warm-paper 主题)、Memory v2(pgvector 语义召回)、typed-SSOT A2A 响应路径(RFC #2967)、**8 个正式 adapter**(Claude Code、Hermes、Gemini CLI、LangGraph、DeepAgents、CrewAI、AutoGen、OpenClaw)、skill lifecycle,以及主要运维面。
|
||||
|
||||
配套的私有仓库 [`molecule-controlplane`](https://git.moleculesai.app/molecule-ai/molecule-controlplane) 提供 SaaS 层 —— 多租户编排(EC2 + Neon + Cloudflare Tunnels)、KMS 信封加密、WorkOS 鉴权、Stripe 计费,以及 `tenant_resources` 审计表加 30 分钟 reconciler。
|
||||
|
||||
像 **NemoClaw** 这样的相邻 runtime 路线仍然属于分支级工作,只有合并后才会进入正式支持列表,这里会明确区分。
|
||||
当前 `main` 已经包含核心平台、Canvas、memory model、6 个正式 adapter、skill lifecycle 和主要运维面。像 **NemoClaw** 这样的相邻 runtime 路线仍然属于分支级工作,只有合并后才会进入正式支持列表,这里会明确区分。
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@ -1,10 +0,0 @@
|
||||
# Excluded from `docker build` context. Without this, the COPY . . step in
|
||||
# canvas/Dockerfile clobbers the freshly-installed node_modules with the
|
||||
# host's (potentially broken / wrong-arch) copy — the @tailwindcss/oxide
|
||||
# native binary disagreed and broke `next build`.
|
||||
node_modules
|
||||
.next
|
||||
.git
|
||||
*.log
|
||||
.env*
|
||||
!.env.example
|
||||
@ -1,11 +1,7 @@
|
||||
FROM node:22-alpine@sha256:cb15fca92530d7ac113467696cf1001208dac49c3c64355fd1348c11a88ddf8f AS builder
|
||||
FROM node:22-alpine AS builder
|
||||
WORKDIR /app
|
||||
COPY package.json package-lock.json* ./
|
||||
# `npm ci` (not `install`) for lockfile-exact reproducibility.
|
||||
# `--include=optional` ensures the platform-specific @tailwindcss/oxide
|
||||
# native binary lands — without it, postcss fails with "Cannot read
|
||||
# properties of undefined (reading 'All')" at build time.
|
||||
RUN npm ci --include=optional
|
||||
RUN npm install
|
||||
COPY . .
|
||||
ARG NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080
|
||||
ARG NEXT_PUBLIC_WS_URL=ws://localhost:8080/ws
|
||||
@ -15,7 +11,7 @@ ENV NEXT_PUBLIC_WS_URL=$NEXT_PUBLIC_WS_URL
|
||||
ENV NEXT_PUBLIC_ADMIN_TOKEN=$NEXT_PUBLIC_ADMIN_TOKEN
|
||||
RUN npm run build
|
||||
|
||||
FROM node:22-alpine@sha256:cb15fca92530d7ac113467696cf1001208dac49c3c64355fd1348c11a88ddf8f
|
||||
FROM node:22-alpine
|
||||
WORKDIR /app
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
|
||||
@ -17,24 +17,6 @@ import { dirname, join } from "node:path";
|
||||
// update one heuristic. Production is unaffected: `output: "standalone"`
|
||||
// bakes resolved env into the build, and the marker file isn't shipped.
|
||||
loadMonorepoEnv();
|
||||
// Boot-time matched-pair guard for ADMIN_TOKEN / NEXT_PUBLIC_ADMIN_TOKEN.
|
||||
// When ADMIN_TOKEN is set on the workspace-server (server-side bearer
|
||||
// gate, wsauth_middleware.go ~L245), the canvas MUST send the matching
|
||||
// NEXT_PUBLIC_ADMIN_TOKEN as `Authorization: Bearer ...` on every API
|
||||
// call. If only one is set, every workspace API call 401s silently —
|
||||
// the canvas hydrates with empty data and the user sees a broken page
|
||||
// with no console hint about the auth-config mismatch.
|
||||
//
|
||||
// Pre-fix the matched-pair contract was descriptive only (a comment in
|
||||
// .env): future devs/agents could re-misconfigure with one of the two
|
||||
// unset and silently 401. Closes the post-PR-#174 self-review gap.
|
||||
//
|
||||
// Warn-only (not exit) — production canvas Docker images bake these
|
||||
// vars into the build at image-build time, and a missed pair there
|
||||
// would still emit the warning at runtime via the standalone server's
|
||||
// startup. Killing the process on misconfiguration would turn a
|
||||
// recoverable auth issue into a hard crashloop.
|
||||
checkAdminTokenPair();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
output: "standalone",
|
||||
@ -75,43 +57,6 @@ function loadMonorepoEnv() {
|
||||
);
|
||||
}
|
||||
|
||||
// Boot-time matched-pair guard. Runs after .env has been loaded so the
|
||||
// check sees the post-load state. The two env vars must be set or
|
||||
// unset together; one-without-the-other is the silent-401 footgun.
|
||||
//
|
||||
// Treats empty string ("") as unset. An explicitly-empty `KEY=` in
|
||||
// .env counts as set-to-empty in `process.env`, but for auth purposes
|
||||
// an empty bearer token is equivalent to no token — so both
|
||||
// `ADMIN_TOKEN=` and an unset ADMIN_TOKEN are equivalent relative to
|
||||
// the matched-pair invariant.
|
||||
//
|
||||
// Returns void; side effect is the console.error warning. Kept as a
|
||||
// separate function (exported) so a future test can reset env, call
|
||||
// this, and assert on captured stderr.
|
||||
export function checkAdminTokenPair(): void {
|
||||
const serverSet = !!process.env.ADMIN_TOKEN;
|
||||
const clientSet = !!process.env.NEXT_PUBLIC_ADMIN_TOKEN;
|
||||
if (serverSet === clientSet) return;
|
||||
// Distinct messages so the operator can tell which half is missing
|
||||
// — the fix is symmetric (set the other one) but the diagnostic
|
||||
// mentions which side is currently set so they don't have to grep.
|
||||
if (serverSet && !clientSet) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] ADMIN_TOKEN is set but NEXT_PUBLIC_ADMIN_TOKEN is not — " +
|
||||
"canvas will 401 against workspace-server because the bearer header " +
|
||||
"is never attached. Set both to the same value, or unset both.",
|
||||
);
|
||||
} else {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"[next.config] NEXT_PUBLIC_ADMIN_TOKEN is set but ADMIN_TOKEN is not — " +
|
||||
"workspace-server will reject the bearer because no AdminAuth gate " +
|
||||
"is configured. Set both to the same value, or unset both.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function findMonorepoRoot(start: string): string | null {
|
||||
let dir = start;
|
||||
for (let i = 0; i < 6; i++) {
|
||||
|
||||
@ -354,7 +354,7 @@ function OrgCTA({ org }: { org: Org }) {
|
||||
);
|
||||
}
|
||||
// provisioning / unknown — non-interactive
|
||||
return <span className="text-sm text-ink-mid">{org.status}…</span>;
|
||||
return <span className="text-sm text-ink-soft">{org.status}…</span>;
|
||||
}
|
||||
|
||||
function EmptyState({ banner }: { banner?: React.ReactNode }) {
|
||||
@ -420,7 +420,7 @@ function CreateOrgForm({ onCreated }: { onCreated: (slug: string) => void }) {
|
||||
aria-describedby="org-slug-hint"
|
||||
className="mt-1 w-full rounded border border-line bg-surface-card px-3 py-2 text-sm text-ink"
|
||||
/>
|
||||
<p id="org-slug-hint" className="mt-1 text-xs text-ink-mid">
|
||||
<p id="org-slug-hint" className="mt-1 text-xs text-ink-soft">
|
||||
Lowercase letters, numbers, and hyphens only. Cannot be changed later.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@ -56,7 +56,7 @@ export default function Home() {
|
||||
<div className="fixed inset-0 flex items-center justify-center bg-surface">
|
||||
<div role="status" aria-live="polite" className="flex flex-col items-center gap-3">
|
||||
<Spinner size="lg" />
|
||||
<span className="text-xs text-ink-mid">Loading canvas...</span>
|
||||
<span className="text-xs text-ink-soft">Loading canvas...</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
@ -119,11 +119,11 @@ function PlatformDownDiagnostic() {
|
||||
Most common cause on a dev host: one of those services stopped.
|
||||
</p>
|
||||
<div className="bg-surface-sunken/80 border border-line/50 rounded-lg px-4 py-3 max-w-lg w-full">
|
||||
<div className="text-[10px] uppercase tracking-wider text-ink-mid mb-2">Try first</div>
|
||||
<div className="text-[10px] uppercase tracking-wider text-ink-soft mb-2">Try first</div>
|
||||
<pre className="text-[12px] text-ink-mid font-mono whitespace-pre-wrap leading-relaxed">{`brew services start postgresql@14
|
||||
brew services start redis`}</pre>
|
||||
</div>
|
||||
<p className="text-[11px] text-ink-mid max-w-lg text-center">
|
||||
<p className="text-[11px] text-ink-soft max-w-lg text-center">
|
||||
If both are running, check <code className="font-mono">/tmp/molecule-server.log</code> for
|
||||
the underlying error. If you're on hosted SaaS, this is a platform incident — try again in a moment.
|
||||
</p>
|
||||
|
||||
@ -41,7 +41,7 @@ export default function PricingPage() {
|
||||
<p className="mt-2 text-ink-mid">
|
||||
We publish the{" "}
|
||||
<a
|
||||
href="https://git.moleculesai.app/molecule-ai/molecule-monorepo"
|
||||
href="https://github.com/Molecule-AI/molecule-monorepo"
|
||||
className="text-accent underline hover:text-accent"
|
||||
>
|
||||
full source on GitHub
|
||||
@ -55,13 +55,13 @@ export default function PricingPage() {
|
||||
</a>
|
||||
.
|
||||
</p>
|
||||
<p className="mt-6 text-sm text-ink-mid">
|
||||
<p className="mt-6 text-sm text-ink-soft">
|
||||
Prices shown in USD. Flat-rate per org — no per-seat fees on any paid tier.
|
||||
Enterprise / self-hosted licensing available — contact us.
|
||||
</p>
|
||||
</section>
|
||||
|
||||
<footer className="mx-auto mt-20 max-w-5xl border-t border-line px-6 py-6 text-center text-sm text-ink-mid">
|
||||
<footer className="mx-auto mt-20 max-w-5xl border-t border-line px-6 py-6 text-center text-sm text-ink-soft">
|
||||
<p>
|
||||
© {new Date().getFullYear()} Molecule AI, Inc. ·{" "}
|
||||
<a href="/legal/terms" className="hover:text-ink-mid">
|
||||
|
||||
@ -1,10 +1,9 @@
|
||||
'use client';
|
||||
|
||||
import { useEffect, useMemo, useCallback, useRef } from "react";
|
||||
import { useEffect, useMemo, useCallback } from "react";
|
||||
import { type Edge, MarkerType } from "@xyflow/react";
|
||||
import { api } from "@/lib/api";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
import { useSocketEvent } from "@/hooks/useSocketEvent";
|
||||
import type { ActivityEntry } from "@/types/activity";
|
||||
|
||||
// ── Constants ─────────────────────────────────────────────────────────────────
|
||||
@ -12,6 +11,9 @@ import type { ActivityEntry } from "@/types/activity";
|
||||
/** 60-minute look-back window for delegation activity */
|
||||
export const A2A_WINDOW_MS = 60 * 60 * 1000;
|
||||
|
||||
/** Polling interval — refresh edges every 60 seconds */
|
||||
export const A2A_POLL_MS = 60 * 1_000;
|
||||
|
||||
/** Threshold for "hot" edges: < 5 minutes → animated + violet stroke */
|
||||
export const A2A_HOT_MS = 5 * 60 * 1_000;
|
||||
|
||||
@ -129,20 +131,6 @@ export function buildA2AEdges(
|
||||
* `a2aEdges`. Canvas.tsx merges these with topology edges and passes the
|
||||
* combined list to ReactFlow.
|
||||
*
|
||||
* Update shape (issue #61 Stage 2, replaces the 60s polling loop):
|
||||
* - On mount (when showA2AEdges): one HTTP fan-out per visible workspace
|
||||
* (delegation rows, 60-min window). Bootstraps the local row buffer.
|
||||
* - Steady state: subscribes to ACTIVITY_LOGGED via useSocketEvent.
|
||||
* Each delegation event from a visible workspace is appended to the
|
||||
* buffer; edges are re-derived via the existing buildA2AEdges helper.
|
||||
* - showA2AEdges toggle off: clears edges + buffer.
|
||||
* - Visible-ID-set change: re-bootstraps so a freshly-shown workspace
|
||||
* backfills its 60-min history (existing visibleIdsKey selector
|
||||
* behaviour preserved — that's the 2026-05-04 render-loop fix).
|
||||
*
|
||||
* No interval poll. The singleton ReconnectingSocket already owns
|
||||
* reconnect / backoff / health-check; useSocketEvent inherits those.
|
||||
*
|
||||
* Mount this inside CanvasInner (no ReactFlow hook dependency).
|
||||
*/
|
||||
export function A2ATopologyOverlay() {
|
||||
@ -169,9 +157,7 @@ export function A2ATopologyOverlay() {
|
||||
// the symptom of this re-render storm.
|
||||
//
|
||||
// The fix is purely the dependency-stability change here; the fetch
|
||||
// logic is unchanged. Post-#61 the polling-driven fetch is gone, but
|
||||
// the visibleIdsKey gate is still required so a peer-discovery write
|
||||
// doesn't trigger a wasteful re-bootstrap.
|
||||
// logic is unchanged.
|
||||
const visibleIdsKey = useCanvasStore((s) =>
|
||||
s.nodes
|
||||
.filter((n) => !n.hidden)
|
||||
@ -185,42 +171,16 @@ export function A2ATopologyOverlay() {
|
||||
[visibleIdsKey]
|
||||
);
|
||||
|
||||
// Local rolling buffer of delegation rows. Pruned by A2A_WINDOW_MS on
|
||||
// each rebuild so a long-lived session doesn't accumulate unbounded
|
||||
// history. The buffer's high-water mark is approximately:
|
||||
// visibleIds.length × bootstrap-fetch-limit (500) + WS arrivals
|
||||
// Real-world ceiling: ~3000 entries at the 60-min boundary, all of
|
||||
// which buildA2AEdges aggregates into at most N² edges.
|
||||
const bufferRef = useRef<ActivityEntry[]>([]);
|
||||
// visibleIdsRef gives the WS handler the latest visible-ID set without
|
||||
// re-subscribing on every render. The bus listener is registered
|
||||
// exactly once per mount; subscriber-side filtering reads from this ref.
|
||||
const visibleIdsRef = useRef(visibleIds);
|
||||
visibleIdsRef.current = visibleIds;
|
||||
|
||||
// Re-derive overlay edges from the current buffer + push to store.
|
||||
// Prunes by A2A_WINDOW_MS first so memory stays bounded across long
|
||||
// sessions and the aggregation cost stays O(window-size).
|
||||
const recomputeAndPush = useCallback(() => {
|
||||
const cutoff = Date.now() - A2A_WINDOW_MS;
|
||||
bufferRef.current = bufferRef.current.filter(
|
||||
(r) => new Date(r.created_at).getTime() > cutoff
|
||||
);
|
||||
setA2AEdges(buildA2AEdges(bufferRef.current));
|
||||
}, [setA2AEdges]);
|
||||
|
||||
// Bootstrap fan-out — one HTTP per visible workspace. Replaces the
|
||||
// 60s polling loop entirely. Race-aware: any WS arrivals that landed
|
||||
// in the buffer DURING the fetch (between the await and resume) are
|
||||
// preserved by id-dedup-with-fetched-first ordering.
|
||||
const bootstrap = useCallback(async () => {
|
||||
// Fetch delegation activity for all visible workspaces and rebuild overlay edges.
|
||||
const fetchAndUpdate = useCallback(async () => {
|
||||
if (visibleIds.length === 0) {
|
||||
bufferRef.current = [];
|
||||
setA2AEdges([]);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const fetchedRows = (
|
||||
// Fan-out — one request per visible workspace.
|
||||
// Per-request failures are swallowed so one broken workspace doesn't blank the overlay.
|
||||
const allRows = (
|
||||
await Promise.all(
|
||||
visibleIds.map((id) =>
|
||||
api
|
||||
@ -232,76 +192,24 @@ export function A2ATopologyOverlay() {
|
||||
)
|
||||
).flat();
|
||||
|
||||
// Merge: fetched rows first, then any in-flight WS arrivals that
|
||||
// accumulated during the await. Dedup by id so rows that appear
|
||||
// in both paths are not double-counted in the aggregation.
|
||||
const merged = [...fetchedRows, ...bufferRef.current];
|
||||
const seen = new Set<string>();
|
||||
bufferRef.current = merged.filter((r) => {
|
||||
if (seen.has(r.id)) return false;
|
||||
seen.add(r.id);
|
||||
return true;
|
||||
});
|
||||
recomputeAndPush();
|
||||
setA2AEdges(buildA2AEdges(allRows));
|
||||
} catch {
|
||||
// Overlay failure is non-critical — canvas remains functional
|
||||
}
|
||||
}, [visibleIds, setA2AEdges, recomputeAndPush]);
|
||||
}, [visibleIds, setA2AEdges]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!showA2AEdges) {
|
||||
// Clear edges + buffer immediately when toggled off
|
||||
bufferRef.current = [];
|
||||
// Clear edges immediately when toggled off
|
||||
setA2AEdges([]);
|
||||
return;
|
||||
}
|
||||
void bootstrap();
|
||||
}, [showA2AEdges, bootstrap, setA2AEdges]);
|
||||
|
||||
// Live-update path. Filters server-side ACTIVITY_LOGGED events down
|
||||
// to delegation initiations from visible workspaces and appends each
|
||||
// into the rolling buffer, re-deriving edges via buildA2AEdges.
|
||||
//
|
||||
// Only `method === "delegate"` rows count — the same filter
|
||||
// buildA2AEdges applies — so delegate_result rows arriving over the
|
||||
// wire don't double-count.
|
||||
useSocketEvent((msg) => {
|
||||
if (!showA2AEdges) return;
|
||||
if (msg.event !== "ACTIVITY_LOGGED") return;
|
||||
|
||||
const p = (msg.payload || {}) as Record<string, unknown>;
|
||||
if (p.activity_type !== "delegation") return;
|
||||
if (p.method !== "delegate") return;
|
||||
|
||||
const wsId = msg.workspace_id;
|
||||
if (!visibleIdsRef.current.includes(wsId)) return;
|
||||
|
||||
// Synthesise an ActivityEntry from the WS payload so buildA2AEdges
|
||||
// (which the bootstrap path also feeds) handles it identically.
|
||||
const entry: ActivityEntry = {
|
||||
id:
|
||||
(p.id as string) ||
|
||||
`ws-push-${msg.timestamp || Date.now()}-${wsId}`,
|
||||
workspace_id: wsId,
|
||||
activity_type: "delegation",
|
||||
source_id: (p.source_id as string | null) ?? null,
|
||||
target_id: (p.target_id as string | null) ?? null,
|
||||
method: "delegate",
|
||||
summary: (p.summary as string | null) ?? null,
|
||||
request_body: null,
|
||||
response_body: null,
|
||||
duration_ms: (p.duration_ms as number | null) ?? null,
|
||||
status: (p.status as string) || "ok",
|
||||
error_detail: null,
|
||||
created_at:
|
||||
(p.created_at as string) ||
|
||||
msg.timestamp ||
|
||||
new Date().toISOString(),
|
||||
};
|
||||
|
||||
bufferRef.current = [...bufferRef.current, entry];
|
||||
recomputeAndPush();
|
||||
});
|
||||
// Initial fetch, then poll every 60 s
|
||||
void fetchAndUpdate();
|
||||
const timer = setInterval(() => void fetchAndUpdate(), A2A_POLL_MS);
|
||||
return () => clearInterval(timer);
|
||||
}, [showA2AEdges, fetchAndUpdate, setA2AEdges]);
|
||||
|
||||
// Pure side-effect — renders nothing
|
||||
return null;
|
||||
|
||||
@ -127,7 +127,7 @@ export function AuditTrailPanel({ workspaceId }: Props) {
|
||||
if (loading) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-32">
|
||||
<span className="text-xs text-ink-mid">Loading audit trail…</span>
|
||||
<span className="text-xs text-ink-soft">Loading audit trail…</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@ -145,7 +145,7 @@ export function AuditTrailPanel({ workspaceId }: Props) {
|
||||
className={`px-2 py-1 text-[10px] rounded-md font-medium transition-all shrink-0 ${
|
||||
filter === f.id
|
||||
? "bg-surface-card text-ink ring-1 ring-zinc-600"
|
||||
: "text-ink-mid hover:text-ink-mid hover:bg-surface-card/60"
|
||||
: "text-ink-soft hover:text-ink-mid hover:bg-surface-card/60"
|
||||
}`}
|
||||
>
|
||||
{f.label}
|
||||
@ -174,9 +174,9 @@ export function AuditTrailPanel({ workspaceId }: Props) {
|
||||
{entries.length === 0 ? (
|
||||
/* Empty state */
|
||||
<div className="flex flex-col items-center justify-center py-16 gap-3 text-center">
|
||||
<span className="text-4xl text-ink-mid" aria-hidden="true">⊟</span>
|
||||
<span className="text-4xl text-ink-soft" aria-hidden="true">⊟</span>
|
||||
<p className="text-sm font-medium text-ink-mid">No audit events yet</p>
|
||||
<p className="text-[11px] text-ink-mid max-w-[200px] leading-relaxed">
|
||||
<p className="text-[11px] text-ink-soft max-w-[200px] leading-relaxed">
|
||||
Delegation, decision, gate, and human-in-the-loop events will appear here.
|
||||
</p>
|
||||
</div>
|
||||
@ -203,7 +203,7 @@ export function AuditTrailPanel({ workspaceId }: Props) {
|
||||
)}
|
||||
|
||||
{/* Entry count footer */}
|
||||
<p className="mt-3 text-center text-[9px] text-ink-mid">
|
||||
<p className="mt-3 text-center text-[9px] text-ink-soft">
|
||||
{entries.length} event{entries.length !== 1 ? "s" : ""} loaded
|
||||
{cursor ? " · more available" : " · all loaded"}
|
||||
</p>
|
||||
@ -265,7 +265,7 @@ export function AuditEntryRow({ entry, now }: AuditEntryRowProps) {
|
||||
)}
|
||||
|
||||
{/* Relative timestamp */}
|
||||
<span className="shrink-0 text-[9px] text-ink-mid">
|
||||
<span className="shrink-0 text-[9px] text-ink-soft">
|
||||
{formatAuditRelativeTime(entry.created_at, now)}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
@ -125,7 +125,7 @@ export function BundleDropZone() {
|
||||
<div className="bg-surface-sunken/95 border border-accent/50 rounded-2xl px-8 py-6 shadow-2xl text-center">
|
||||
<div className="text-3xl mb-2" aria-hidden="true">📦</div>
|
||||
<div className="text-sm font-semibold text-ink">Drop Bundle to Import</div>
|
||||
<div className="text-xs text-ink-mid mt-1">.bundle.json files only</div>
|
||||
<div className="text-xs text-ink-soft mt-1">.bundle.json files only</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
"use client";
|
||||
|
||||
import { useCallback, useEffect, useMemo, useRef } from "react";
|
||||
import { useCallback, useMemo } from "react";
|
||||
import {
|
||||
ReactFlow,
|
||||
ReactFlowProvider,
|
||||
@ -187,23 +187,6 @@ function CanvasInner() {
|
||||
// Pan-to-node / zoom-to-team CustomEvent listeners + viewport save.
|
||||
const { onMoveEnd } = useCanvasViewport();
|
||||
|
||||
// Screen-reader announcements — read liveAnnouncement from the store and
|
||||
// immediately clear it so the same announcement doesn't re-fire on
|
||||
// re-render. Using a ref avoids a setState loop while keeping the
|
||||
// effect reactive to new announcement strings.
|
||||
const liveAnnouncement = useCanvasStore((s) => s.liveAnnouncement);
|
||||
const clearAnnouncement = useCanvasStore((s) => s.setLiveAnnouncement);
|
||||
const prevAnnouncement = useRef("");
|
||||
useEffect(() => {
|
||||
if (liveAnnouncement && liveAnnouncement !== prevAnnouncement.current) {
|
||||
prevAnnouncement.current = liveAnnouncement;
|
||||
// Small delay so the DOM update lands before clearing, giving
|
||||
// screen readers time to pick up the new text.
|
||||
const timer = setTimeout(() => clearAnnouncement(""), 500);
|
||||
return () => clearTimeout(timer);
|
||||
}
|
||||
}, [liveAnnouncement, clearAnnouncement]);
|
||||
|
||||
// Delete-confirmation lives in the store so the dialog survives ContextMenu
|
||||
// unmounting — the prior local-in-ContextMenu state raced with the menu's
|
||||
// outside-click handler.
|
||||
@ -343,21 +326,11 @@ function CanvasInner() {
|
||||
<DropTargetBadge />
|
||||
</ReactFlow>
|
||||
|
||||
{/* Screen-reader live region — announces workspace count on initial load and
|
||||
live status updates from WebSocket events (online, offline, provisioning, etc.).
|
||||
The liveAnnouncement text is cleared after the screen reader has had time
|
||||
to read it so the same message doesn't re-announce on re-render. */}
|
||||
<div
|
||||
role="status"
|
||||
aria-live="polite"
|
||||
aria-atomic="true"
|
||||
className="sr-only"
|
||||
>
|
||||
{liveAnnouncement || (
|
||||
nodes.filter((n) => !n.parentId).length === 0
|
||||
? "No workspaces on canvas"
|
||||
: `${nodes.filter((n) => !n.parentId).length} workspace${nodes.filter((n) => !n.parentId).length !== 1 ? "s" : ""} on canvas`
|
||||
)}
|
||||
{/* Screen-reader live region: announces workspace count on canvas load or change */}
|
||||
<div role="status" aria-live="polite" className="sr-only">
|
||||
{nodes.filter((n) => !n.parentId).length === 0
|
||||
? "No workspaces on canvas"
|
||||
: `${nodes.filter((n) => !n.parentId).length} workspace${nodes.filter((n) => !n.parentId).length !== 1 ? "s" : ""} on canvas`}
|
||||
</div>
|
||||
|
||||
{nodes.length === 0 && <EmptyState />}
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import { useState, useEffect, useCallback, useRef } from "react";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
import { api } from "@/lib/api";
|
||||
import { useSocketEvent } from "@/hooks/useSocketEvent";
|
||||
import { COMM_TYPE_LABELS } from "@/lib/design-tokens";
|
||||
|
||||
interface Communication {
|
||||
@ -19,71 +18,32 @@ interface Communication {
|
||||
durationMs: number | null;
|
||||
}
|
||||
|
||||
/** Workspace-server `ACTIVITY_LOGGED` payload shape. Pulled out so the
|
||||
* WS handler below has a typed view of the same fields the HTTP
|
||||
* bootstrap consumes — drift between the two paths is a class of bug
|
||||
* AgentCommsPanel hit historically. */
|
||||
interface ActivityLoggedPayload {
|
||||
id?: string;
|
||||
activity_type?: string;
|
||||
source_id?: string | null;
|
||||
target_id?: string | null;
|
||||
workspace_id?: string;
|
||||
summary?: string | null;
|
||||
status?: string;
|
||||
duration_ms?: number | null;
|
||||
created_at?: string;
|
||||
}
|
||||
|
||||
/** Fan-out cap for the bootstrap HTTP fetch on mount / on visibility
|
||||
* re-open. Kept at 3 (carried over from the 2026-05-04 fix) so a
|
||||
* freshly-mounted overlay on a 15-workspace tenant only spends 3
|
||||
* round-trips bootstrapping. Live updates after that arrive via the
|
||||
* WS subscription below — no polling, no fan-out to maintain. */
|
||||
const BOOTSTRAP_FAN_OUT_CAP = 3;
|
||||
|
||||
/** Cap on the rendered list. Bootstrap + every WS push prepends, the
|
||||
* list is sliced to this size after each update. Mirrors the prior
|
||||
* polling-loop behaviour. */
|
||||
const COMMS_RENDER_CAP = 20;
|
||||
|
||||
/**
|
||||
* Overlay showing recent A2A communications between workspaces.
|
||||
*
|
||||
* Update shape (issue #61 Stage 1, replaces the 30s polling loop):
|
||||
* - On mount (when visible): one HTTP bootstrap per online workspace,
|
||||
* capped at BOOTSTRAP_FAN_OUT_CAP. Yields the initial recent-comms
|
||||
* window without waiting for live events.
|
||||
* - Steady state: subscribes to ACTIVITY_LOGGED via useSocketEvent.
|
||||
* Each event with a matching activity_type from a visible online
|
||||
* workspace gets synthesised into a Communication and prepended.
|
||||
* - Visibility re-open: re-bootstraps so the user sees the freshest
|
||||
* window even if WS was idle while collapsed.
|
||||
*
|
||||
* No interval poll. The singleton ReconnectingSocket in `store/socket.ts`
|
||||
* already owns reconnect/backoff/health-check, and `useSocketEvent`
|
||||
* inherits those guarantees. If WS is genuinely unhealthy, the overlay
|
||||
* shows the bootstrap snapshot until the next visibility re-open or
|
||||
* the next WS reconnect (which fires its own rehydrate burst).
|
||||
* Renders as a floating log panel that auto-updates.
|
||||
*/
|
||||
export function CommunicationOverlay() {
|
||||
const [comms, setComms] = useState<Communication[]>([]);
|
||||
const [visible, setVisible] = useState(true);
|
||||
const selectedNodeId = useCanvasStore((s) => s.selectedNodeId);
|
||||
const nodes = useCanvasStore((s) => s.nodes);
|
||||
// nodesRef gives the WS handler current node-name resolution without
|
||||
// re-subscribing on every node-list change. The bus listener is
|
||||
// registered exactly once per mount; subscriber-side filtering reads
|
||||
// the latest value via this ref.
|
||||
const nodesRef = useRef(nodes);
|
||||
nodesRef.current = nodes;
|
||||
|
||||
const bootstrapComms = useCallback(async () => {
|
||||
const fetchComms = useCallback(async () => {
|
||||
try {
|
||||
// Fan-out cap: each polled workspace = 1 round-trip. The platform
|
||||
// rate limits at 600 req/min/IP; combined with heartbeats + other
|
||||
// canvas polling, every workspace polled here costs ~6 req/min
|
||||
// (1 every 30s × 1 per workspace). Capping at 3 keeps this
|
||||
// overlay's footprint at 18 req/min worst case — well under
|
||||
// budget even with 8+ workspaces visible. Caught 2026-05-04 when
|
||||
// a user with 8+ workspaces (Design Director + 6 sub-agents +
|
||||
// 3 standalones) saw sustained 429s in canvas console.
|
||||
const onlineNodes = nodesRef.current.filter((n) => n.data.status === "online");
|
||||
const allComms: Communication[] = [];
|
||||
|
||||
for (const node of onlineNodes.slice(0, BOOTSTRAP_FAN_OUT_CAP)) {
|
||||
for (const node of onlineNodes.slice(0, 3)) {
|
||||
try {
|
||||
const activities = await api.get<Array<{
|
||||
id: string;
|
||||
@ -99,8 +59,8 @@ export function CommunicationOverlay() {
|
||||
|
||||
for (const a of activities) {
|
||||
if (a.activity_type === "a2a_send" || a.activity_type === "a2a_receive") {
|
||||
const sourceNode = nodesRef.current.find((n) => n.id === (a.source_id || a.workspace_id));
|
||||
const targetNode = nodesRef.current.find((n) => n.id === (a.target_id || ""));
|
||||
const sourceNode = nodes.find((n) => n.id === (a.source_id || a.workspace_id));
|
||||
const targetNode = nodes.find((n) => n.id === (a.target_id || ""));
|
||||
allComms.push({
|
||||
id: a.id,
|
||||
sourceId: a.source_id || a.workspace_id,
|
||||
@ -116,12 +76,11 @@ export function CommunicationOverlay() {
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Per-workspace failures must not blank the panel — the same
|
||||
// robustness the polling version had.
|
||||
// Skip workspaces that fail
|
||||
}
|
||||
}
|
||||
|
||||
// Newest-first with id-dedup, capped at COMMS_RENDER_CAP.
|
||||
// Sort by timestamp, newest first, dedupe
|
||||
const seen = new Set<string>();
|
||||
const sorted = allComms
|
||||
.sort((a, b) => b.timestamp.localeCompare(a.timestamp))
|
||||
@ -130,78 +89,29 @@ export function CommunicationOverlay() {
|
||||
seen.add(c.id);
|
||||
return true;
|
||||
})
|
||||
.slice(0, COMMS_RENDER_CAP);
|
||||
.slice(0, 20);
|
||||
|
||||
setComms(sorted);
|
||||
} catch {
|
||||
// Bootstrap failure is non-blocking — the WS subscription below
|
||||
// will populate the panel as live events arrive.
|
||||
// Silently handle API errors
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Bootstrap once on mount + every time the user re-opens after a
|
||||
// collapse. Closed-panel state intentionally drops live updates so
|
||||
// the panel doesn't churn invisible state — the next open reloads.
|
||||
useEffect(() => {
|
||||
// Gate polling on visibility — when the user collapses the overlay
|
||||
// the data isn't being read, so the per-workspace fan-out becomes
|
||||
// pure rate-limit overhead. Pre-fix this overlay polled regardless
|
||||
// of whether the panel was shown, costing ~36 req/min from a
|
||||
// hidden surface.
|
||||
if (!visible) return;
|
||||
bootstrapComms();
|
||||
}, [bootstrapComms, visible]);
|
||||
|
||||
// Live-update path. Filters server-side ACTIVITY_LOGGED events down
|
||||
// to the comm-overlay-relevant subset and prepends each into the
|
||||
// rendered list with the same dedup the bootstrap path uses.
|
||||
//
|
||||
// Scope guard: ignore events for workspaces not in the visible online
|
||||
// set, so a user collapsing one workspace doesn't see its comms
|
||||
// continue to scroll in. Same shape the bootstrap path applies.
|
||||
useSocketEvent((msg) => {
|
||||
if (!visible) return;
|
||||
if (msg.event !== "ACTIVITY_LOGGED") return;
|
||||
|
||||
const p = (msg.payload || {}) as ActivityLoggedPayload;
|
||||
const type = p.activity_type;
|
||||
if (type !== "a2a_send" && type !== "a2a_receive" && type !== "task_update") return;
|
||||
|
||||
const wsId = msg.workspace_id;
|
||||
const onlineSet = new Set(
|
||||
nodesRef.current.filter((n) => n.data.status === "online").map((n) => n.id),
|
||||
);
|
||||
if (!onlineSet.has(wsId)) return;
|
||||
|
||||
const sourceId = p.source_id || wsId;
|
||||
const targetId = p.target_id || "";
|
||||
const sourceNode = nodesRef.current.find((n) => n.id === sourceId);
|
||||
const targetNode = nodesRef.current.find((n) => n.id === targetId);
|
||||
|
||||
const incoming: Communication = {
|
||||
id: p.id || `${msg.timestamp || Date.now()}:${sourceId}:${targetId}`,
|
||||
sourceId,
|
||||
targetId,
|
||||
sourceName: sourceNode?.data.name || "Unknown",
|
||||
targetName: targetNode?.data.name || "Unknown",
|
||||
type: type as Communication["type"],
|
||||
summary: p.summary || "",
|
||||
status: p.status || "ok",
|
||||
timestamp: p.created_at || msg.timestamp || new Date().toISOString(),
|
||||
durationMs: p.duration_ms ?? null,
|
||||
};
|
||||
|
||||
setComms((prev) => {
|
||||
// Prepend, dedup by id, re-cap. Functional setState is necessary
|
||||
// because two ACTIVITY_LOGGED events arriving in the same React
|
||||
// batch would otherwise read a stale `comms` from the closure.
|
||||
const seen = new Set<string>();
|
||||
const merged = [incoming, ...prev]
|
||||
.sort((a, b) => b.timestamp.localeCompare(a.timestamp))
|
||||
.filter((c) => {
|
||||
if (seen.has(c.id)) return false;
|
||||
seen.add(c.id);
|
||||
return true;
|
||||
})
|
||||
.slice(0, COMMS_RENDER_CAP);
|
||||
return merged;
|
||||
});
|
||||
});
|
||||
fetchComms();
|
||||
// 30s cadence (was 10s). At 3-workspace fan-out that's 6 req/min
|
||||
// worst case from this overlay. Combined with heartbeats (~30/min)
|
||||
// and other canvas polling, leaves ample headroom under the 600/
|
||||
// min/IP server-side rate limit even at 8+ workspace tenants.
|
||||
const interval = setInterval(fetchComms, 30000);
|
||||
return () => clearInterval(interval);
|
||||
}, [fetchComms, visible]);
|
||||
|
||||
if (!visible || comms.length === 0) {
|
||||
return (
|
||||
@ -226,7 +136,7 @@ export function CommunicationOverlay() {
|
||||
type="button"
|
||||
onClick={() => setVisible(false)}
|
||||
aria-label="Close communications panel"
|
||||
className="text-ink-mid hover:text-ink-mid text-xs"
|
||||
className="text-ink-soft hover:text-ink-mid text-xs"
|
||||
>
|
||||
<span aria-hidden="true">✕</span>
|
||||
</button>
|
||||
@ -268,7 +178,7 @@ export function CommunicationOverlay() {
|
||||
</div>
|
||||
</div>
|
||||
{c.summary && (
|
||||
<div className="text-ink-mid truncate mt-0.5 pl-4">{c.summary}</div>
|
||||
<div className="text-ink-soft truncate mt-0.5 pl-4">{c.summary}</div>
|
||||
)}
|
||||
{c.durationMs && (
|
||||
<div className="text-ink-mid pl-4">{c.durationMs}ms</div>
|
||||
|
||||
@ -103,7 +103,7 @@ export function ConsoleModal({ workspaceId, workspaceName, open, onClose }: Prop
|
||||
EC2 console output
|
||||
</h3>
|
||||
{workspaceName && (
|
||||
<div className="text-[11px] text-ink-mid mt-0.5 truncate max-w-[600px]">
|
||||
<div className="text-[11px] text-ink-soft mt-0.5 truncate max-w-[600px]">
|
||||
{workspaceName}
|
||||
</div>
|
||||
)}
|
||||
@ -124,7 +124,7 @@ export function ConsoleModal({ workspaceId, workspaceName, open, onClose }: Prop
|
||||
|
||||
<div className="flex-1 overflow-auto bg-black/80 p-4">
|
||||
{loading && (
|
||||
<div className="text-[12px] text-ink-mid" data-testid="console-loading">
|
||||
<div className="text-[12px] text-ink-soft" data-testid="console-loading">
|
||||
Loading console output…
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -311,7 +311,7 @@ export function ContextMenu() {
|
||||
aria-hidden="true"
|
||||
className={`w-1.5 h-1.5 rounded-full ${statusDotClass(contextMenu.nodeData.status)}`}
|
||||
/>
|
||||
<span className="text-[10px] text-ink-mid">{contextMenu.nodeData.status}</span>
|
||||
<span className="text-[10px] text-ink-soft">{contextMenu.nodeData.status}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@ -13,8 +13,7 @@ interface Props {
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
/** Exported for unit testing — see ConversationTraceModal.test.ts */
|
||||
export function extractMessageText(body: Record<string, unknown> | null): string {
|
||||
function extractMessageText(body: Record<string, unknown> | null): string {
|
||||
if (!body) return "";
|
||||
try {
|
||||
// Simple task format from MCP server: {task: "..."}
|
||||
@ -107,7 +106,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
|
||||
<Dialog.Title className="text-sm font-semibold text-ink">
|
||||
Conversation Trace
|
||||
</Dialog.Title>
|
||||
<p className="text-[10px] text-ink-mid mt-0.5">
|
||||
<p className="text-[10px] text-ink-soft mt-0.5">
|
||||
{entries.length} events across all workspaces
|
||||
</p>
|
||||
</div>
|
||||
@ -115,7 +114,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
|
||||
<button
|
||||
type="button"
|
||||
aria-label="Close conversation trace"
|
||||
className="text-ink-mid hover:text-ink-mid text-lg px-2"
|
||||
className="text-ink-soft hover:text-ink-mid text-lg px-2"
|
||||
>
|
||||
✕
|
||||
</button>
|
||||
@ -125,13 +124,13 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
|
||||
{/* Timeline */}
|
||||
<div className="flex-1 overflow-y-auto px-5 py-4">
|
||||
{loading && (
|
||||
<div className="text-xs text-ink-mid text-center py-8">
|
||||
<div className="text-xs text-ink-soft text-center py-8">
|
||||
Loading trace from all workspaces...
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!loading && entries.length === 0 && (
|
||||
<div className="text-xs text-ink-mid text-center py-8">
|
||||
<div className="text-xs text-ink-soft text-center py-8">
|
||||
No activity found
|
||||
</div>
|
||||
)}
|
||||
@ -251,7 +250,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
|
||||
{/* Message content — show request and/or response */}
|
||||
{requestText && (
|
||||
<div className="mt-1.5 bg-surface/60 border border-line/50 rounded-lg px-3 py-2 max-h-32 overflow-y-auto">
|
||||
<div className="text-[8px] text-ink-mid uppercase mb-1">
|
||||
<div className="text-[8px] text-ink-soft uppercase mb-1">
|
||||
{isSend ? "Task" : "Request"}
|
||||
</div>
|
||||
<div className="text-[10px] text-ink-mid whitespace-pre-wrap break-words leading-relaxed">
|
||||
|
||||
@ -338,7 +338,7 @@ export function CreateWorkspaceButton() {
|
||||
<Dialog.Title className="text-base font-semibold text-ink mb-1">
|
||||
Create Workspace
|
||||
</Dialog.Title>
|
||||
<p className="text-xs text-ink-mid mb-5">
|
||||
<p className="text-xs text-ink-soft mb-5">
|
||||
Add a new workspace node to the canvas
|
||||
</p>
|
||||
|
||||
@ -376,7 +376,7 @@ export function CreateWorkspaceButton() {
|
||||
/>
|
||||
<div className="text-xs">
|
||||
<div className="text-ink font-medium">External agent (bring your own compute)</div>
|
||||
<div className="text-ink-mid mt-0.5">
|
||||
<div className="text-ink-soft mt-0.5">
|
||||
Skip the container. We'll return a workspace_id + auth token + ready-to-paste snippet so an agent running on your laptop / server / CI can register via A2A.
|
||||
</div>
|
||||
</div>
|
||||
@ -456,7 +456,7 @@ export function CreateWorkspaceButton() {
|
||||
<p className="text-[11px] font-semibold text-violet-400 uppercase tracking-wide">
|
||||
Hermes Provider
|
||||
</p>
|
||||
<p className="text-[11px] text-ink-mid -mt-1">
|
||||
<p className="text-[11px] text-ink-soft -mt-1">
|
||||
Choose the AI provider and paste your API key. The key is
|
||||
stored as an encrypted workspace secret.
|
||||
</p>
|
||||
@ -534,7 +534,7 @@ export function CreateWorkspaceButton() {
|
||||
(m) => <option key={m} value={m} />,
|
||||
)}
|
||||
</datalist>
|
||||
<p className="text-[10px] text-ink-mid mt-1">
|
||||
<p className="text-[10px] text-ink-soft mt-1">
|
||||
Slug determines which provider hermes routes to at install time.
|
||||
</p>
|
||||
</div>
|
||||
@ -626,7 +626,7 @@ function InputField({
|
||||
className={`w-full bg-surface-card/60 border border-line/50 rounded-lg px-3 py-2 text-sm text-ink placeholder-ink-soft focus:outline-none focus:border-accent/60 focus:ring-1 focus:ring-accent/20 transition-colors ${mono ? "font-mono text-xs" : ""}`}
|
||||
/>
|
||||
{helper && (
|
||||
<p className="mt-1 text-xs text-ink-mid">{helper}</p>
|
||||
<p className="mt-1 text-xs text-ink-soft">{helper}</p>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
||||
@ -129,11 +129,11 @@ export function EmptyState() {
|
||||
T{t.tier}
|
||||
</span>
|
||||
</div>
|
||||
<p className="text-[11px] text-ink-mid line-clamp-2 leading-relaxed">
|
||||
<p className="text-[11px] text-ink-soft line-clamp-2 leading-relaxed">
|
||||
{t.description || "No description"}
|
||||
</p>
|
||||
{t.skill_count > 0 && (
|
||||
<p className="text-[9px] text-ink-mid mt-1.5">
|
||||
<p className="text-[9px] text-ink-soft mt-1.5">
|
||||
{t.skill_count} skill{t.skill_count !== 1 ? "s" : ""}
|
||||
{t.model ? ` · ${t.model}` : ""}
|
||||
</p>
|
||||
@ -174,10 +174,10 @@ export function EmptyState() {
|
||||
<div className="mt-5 pt-4 border-t border-line/50">
|
||||
<div className="flex items-center justify-center gap-6 text-[10px] text-ink-mid">
|
||||
<span>Drag to nest workspaces into teams</span>
|
||||
<span className="text-ink-mid">|</span>
|
||||
<span className="text-ink-soft">|</span>
|
||||
<span>Right-click for actions</span>
|
||||
<span className="text-ink-mid">|</span>
|
||||
<span>Press <kbd className="px-1 py-0.5 bg-surface-card rounded text-ink-mid font-mono">⌘K</kbd> to search</span>
|
||||
<span className="text-ink-soft">|</span>
|
||||
<span>Press <kbd className="px-1 py-0.5 bg-surface-card rounded text-ink-soft font-mono">⌘K</kbd> to search</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -201,7 +201,7 @@ export function ExternalConnectModal({ info, onClose }: Props) {
|
||||
className={`px-3 py-2 text-sm border-b-2 -mb-px transition-colors ${
|
||||
tab === t
|
||||
? "border-accent text-ink"
|
||||
: "border-transparent text-ink-mid hover:text-ink-mid"
|
||||
: "border-transparent text-ink-soft hover:text-ink-mid"
|
||||
}`}
|
||||
>
|
||||
{t === "claude"
|
||||
@ -335,7 +335,7 @@ function SnippetBlock({
|
||||
return (
|
||||
<div>
|
||||
<div className="flex items-center justify-between pb-1">
|
||||
<span className="text-xs text-ink-mid">{label}</span>
|
||||
<span className="text-xs text-ink-soft">{label}</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={onCopy}
|
||||
@ -366,7 +366,7 @@ function Field({
|
||||
}) {
|
||||
return (
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-xs text-ink-mid w-36 shrink-0">{label}</span>
|
||||
<span className="text-xs text-ink-soft w-36 shrink-0">{label}</span>
|
||||
<code
|
||||
className={`flex-1 text-xs bg-surface border border-line rounded px-2 py-1 text-ink break-all ${mono ? "font-mono" : ""}`}
|
||||
>
|
||||
|
||||
@ -1,235 +0,0 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { createPortal } from "react-dom";
|
||||
|
||||
interface ShortcutGroup {
|
||||
title: string;
|
||||
shortcuts: Array<{ keys: string[]; description: string }>;
|
||||
}
|
||||
|
||||
const SHORTCUT_GROUPS: ShortcutGroup[] = [
|
||||
{
|
||||
title: "Canvas",
|
||||
shortcuts: [
|
||||
{
|
||||
keys: ["Esc"],
|
||||
description: "Close context menu, clear selection, or deselect",
|
||||
},
|
||||
{
|
||||
keys: ["↑↓←→"],
|
||||
description: "Nudge selected node 10px; hold Shift for 50px",
|
||||
},
|
||||
{
|
||||
keys: ["Cmd", "↑↓←→"],
|
||||
description: "Resize selected node (↑↓ height, ←→ width); hold Shift for fine control (2px)",
|
||||
},
|
||||
{
|
||||
keys: ["Enter"],
|
||||
description: "Descend into selected node's first child",
|
||||
},
|
||||
{
|
||||
keys: ["Shift", "Enter"],
|
||||
description: "Ascend to selected node's parent",
|
||||
},
|
||||
{
|
||||
keys: ["Cmd", "]"],
|
||||
description: "Bring selected node forward in z-order",
|
||||
},
|
||||
{
|
||||
keys: ["Cmd", "["],
|
||||
description: "Send selected node backward in z-order",
|
||||
},
|
||||
{
|
||||
keys: ["Z"],
|
||||
description: "Zoom to fit the selected team and its sub-workspaces",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: "Navigation",
|
||||
shortcuts: [
|
||||
{
|
||||
keys: ["⌘K"],
|
||||
description: "Open workspace search",
|
||||
},
|
||||
{
|
||||
keys: ["Palette"],
|
||||
description: "Open the template palette to deploy a new workspace",
|
||||
},
|
||||
{
|
||||
keys: ["Dbl-click"],
|
||||
description: "Zoom canvas to fit a team node and all its sub-workspaces",
|
||||
},
|
||||
{
|
||||
keys: ["Right-click"],
|
||||
description: "Open the workspace context menu",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: "Agent",
|
||||
shortcuts: [
|
||||
{
|
||||
keys: ["Chat"],
|
||||
description: "Send a message or resume a running task",
|
||||
},
|
||||
{
|
||||
keys: ["Config"],
|
||||
description: "Edit skills, model, secrets, and runtime settings",
|
||||
},
|
||||
{
|
||||
keys: ["Audit"],
|
||||
description: "View the activity ledger for the selected workspace",
|
||||
},
|
||||
],
|
||||
},
|
||||
];
|
||||
|
||||
interface Props {
|
||||
open: boolean;
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
export function KeyboardShortcutsDialog({ open, onClose }: Props) {
|
||||
const dialogRef = useRef<HTMLDivElement>(null);
|
||||
const [mounted, setMounted] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
setMounted(true);
|
||||
}, []);
|
||||
|
||||
// Move focus into the dialog when it opens (WCAG 2.1 SC 2.4.3)
|
||||
useEffect(() => {
|
||||
if (!open || !mounted) return;
|
||||
const raf = requestAnimationFrame(() => {
|
||||
dialogRef.current?.querySelector<HTMLElement>("button")?.focus();
|
||||
});
|
||||
return () => cancelAnimationFrame(raf);
|
||||
}, [open, mounted]);
|
||||
|
||||
// Keyboard: Escape closes, Tab is trapped
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
const handler = (e: KeyboardEvent) => {
|
||||
if (e.key === "Escape") {
|
||||
onClose();
|
||||
return;
|
||||
}
|
||||
if (e.key === "Tab" && dialogRef.current) {
|
||||
const focusable = Array.from(
|
||||
dialogRef.current.querySelectorAll<HTMLElement>(
|
||||
'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])'
|
||||
)
|
||||
).filter((el) => !el.hasAttribute("disabled"));
|
||||
if (focusable.length === 0) {
|
||||
e.preventDefault();
|
||||
return;
|
||||
}
|
||||
const first = focusable[0];
|
||||
const last = focusable[focusable.length - 1];
|
||||
if (e.shiftKey) {
|
||||
if (document.activeElement === first) {
|
||||
e.preventDefault();
|
||||
last.focus();
|
||||
}
|
||||
} else {
|
||||
if (document.activeElement === last) {
|
||||
e.preventDefault();
|
||||
first.focus();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
window.addEventListener("keydown", handler);
|
||||
return () => window.removeEventListener("keydown", handler);
|
||||
}, [open, onClose]);
|
||||
|
||||
if (!open || !mounted) return null;
|
||||
|
||||
return createPortal(
|
||||
<div className="fixed inset-0 z-[9999] flex items-center justify-center">
|
||||
{/* Backdrop */}
|
||||
<div
|
||||
className="absolute inset-0 bg-black/60 backdrop-blur-sm"
|
||||
onClick={onClose}
|
||||
/>
|
||||
|
||||
{/* Dialog */}
|
||||
<div
|
||||
ref={dialogRef}
|
||||
role="dialog"
|
||||
aria-modal="true"
|
||||
aria-labelledby="keyboard-shortcuts-title"
|
||||
className="relative bg-surface border border-line rounded-xl shadow-2xl shadow-black/60 max-w-[480px] w-full mx-4 overflow-hidden max-h-[80vh] flex flex-col"
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-5 py-4 border-b border-line shrink-0">
|
||||
<h2
|
||||
id="keyboard-shortcuts-title"
|
||||
className="text-sm font-semibold text-ink"
|
||||
>
|
||||
Keyboard Shortcuts
|
||||
</h2>
|
||||
<button
|
||||
type="button"
|
||||
onClick={onClose}
|
||||
aria-label="Close keyboard shortcuts"
|
||||
className="w-7 h-7 flex items-center justify-center rounded-lg text-ink-mid hover:text-ink hover:bg-surface-sunken transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/40"
|
||||
>
|
||||
×
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="overflow-y-auto p-5 space-y-5">
|
||||
{SHORTCUT_GROUPS.map((group) => (
|
||||
<div key={group.title}>
|
||||
<h3 className="text-[10px] font-semibold uppercase tracking-[0.2em] text-ink-mid mb-2.5">
|
||||
{group.title}
|
||||
</h3>
|
||||
<div className="space-y-2">
|
||||
{group.shortcuts.map((shortcut, i) => (
|
||||
<div
|
||||
key={i}
|
||||
className="flex items-center justify-between gap-4"
|
||||
>
|
||||
<span className="text-[13px] text-ink-mid">
|
||||
{shortcut.description}
|
||||
</span>
|
||||
<kbd className="flex items-center gap-0.5 shrink-0">
|
||||
{shortcut.keys.map((k, j) => (
|
||||
<span key={j} className="flex items-center gap-0.5">
|
||||
{j > 0 && (
|
||||
<span className="text-[9px] text-ink-mid mx-0.5">
|
||||
+
|
||||
</span>
|
||||
)}
|
||||
<span className="inline-flex items-center rounded-md border border-line/70 bg-surface-sunken/70 px-2 py-0.5 text-[11px] font-medium text-ink tabular-nums font-mono">
|
||||
{k}
|
||||
</span>
|
||||
</span>
|
||||
))}
|
||||
</kbd>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="px-5 py-3 border-t border-line bg-surface-sunken/30 shrink-0">
|
||||
<p className="text-[10px] text-ink-mid text-center">
|
||||
Press{" "}
|
||||
<kbd className="inline-flex items-center rounded border border-line/70 bg-surface-sunken/70 px-1.5 py-0.5 text-[10px] font-medium text-ink font-mono">
|
||||
Esc
|
||||
</kbd>{" "}
|
||||
to close
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>,
|
||||
document.body
|
||||
);
|
||||
}
|
||||
@ -97,7 +97,7 @@ export function Legend() {
|
||||
// 24×24 touch target (was ~10×16, well under WCAG 2.5.5 min).
|
||||
// Negative margin keeps the visual position the same as before
|
||||
// — only the hit area + focus ring are larger.
|
||||
className="-mt-1.5 -mr-1.5 w-6 h-6 inline-flex items-center justify-center rounded text-[14px] leading-none text-ink-mid hover:text-ink hover:bg-surface-card/40 focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/60 transition-colors"
|
||||
className="-mt-1.5 -mr-1.5 w-6 h-6 inline-flex items-center justify-center rounded text-[14px] leading-none text-ink-soft hover:text-ink hover:bg-surface-card/40 focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/60 transition-colors"
|
||||
>
|
||||
×
|
||||
</button>
|
||||
@ -105,7 +105,7 @@ export function Legend() {
|
||||
|
||||
{/* Status */}
|
||||
<div className="mb-2">
|
||||
<div className="text-[11px] text-ink-mid font-medium mb-1">Status</div>
|
||||
<div className="text-[11px] text-ink-soft font-medium mb-1">Status</div>
|
||||
<div className="flex flex-wrap gap-x-3 gap-y-1">
|
||||
{LEGEND_STATUSES.map((s) => (
|
||||
<StatusItem key={s} color={STATUS_CONFIG[s].dot} label={STATUS_CONFIG[s].label} />
|
||||
@ -115,7 +115,7 @@ export function Legend() {
|
||||
|
||||
{/* Tiers */}
|
||||
<div className="mb-2">
|
||||
<div className="text-[11px] text-ink-mid font-medium mb-1">Tier</div>
|
||||
<div className="text-[11px] text-ink-soft font-medium mb-1">Tier</div>
|
||||
<div className="flex flex-wrap gap-x-3 gap-y-1">
|
||||
{LEGEND_TIERS.map(({ tier, label }) => (
|
||||
<TierItem key={tier} tier={tier} label={label} color={TIER_CONFIG[tier].border} />
|
||||
@ -125,7 +125,7 @@ export function Legend() {
|
||||
|
||||
{/* Communication */}
|
||||
<div>
|
||||
<div className="text-[11px] text-ink-mid font-medium mb-1">Communication</div>
|
||||
<div className="text-[11px] text-ink-soft font-medium mb-1">Communication</div>
|
||||
<div className="flex flex-wrap gap-x-3 gap-y-1">
|
||||
<CommItem icon="↗" color="text-cyan-400" label="A2A Out" />
|
||||
<CommItem icon="↙" color="text-accent" label="A2A In" />
|
||||
|
||||
@ -288,7 +288,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {
|
||||
if (loading && entries.length === 0 && !error && !pluginUnavailable) {
|
||||
return (
|
||||
<div className="flex items-center justify-center h-32">
|
||||
<span className="text-xs text-ink-mid">Loading memories…</span>
|
||||
<span className="text-xs text-ink-soft">Loading memories…</span>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@ -311,7 +311,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {
|
||||
{/* Namespace dropdown */}
|
||||
<div className="px-4 pt-3 pb-2 border-b border-line/40 shrink-0 space-y-2">
|
||||
<div className="flex items-center gap-2">
|
||||
<label htmlFor="namespace-dropdown" className="text-[10px] text-ink-mid shrink-0">
|
||||
<label htmlFor="namespace-dropdown" className="text-[10px] text-ink-soft shrink-0">
|
||||
Namespace:
|
||||
</label>
|
||||
<select
|
||||
@ -337,7 +337,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {
|
||||
height="12"
|
||||
viewBox="0 0 16 16"
|
||||
fill="none"
|
||||
className="absolute left-2.5 text-ink-mid pointer-events-none shrink-0"
|
||||
className="absolute left-2.5 text-ink-soft pointer-events-none shrink-0"
|
||||
aria-hidden="true"
|
||||
>
|
||||
<circle cx="7" cy="7" r="4.5" stroke="currentColor" strokeWidth="1.5" />
|
||||
@ -360,7 +360,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {
|
||||
setDebouncedQuery('');
|
||||
}}
|
||||
aria-label="Clear search"
|
||||
className="absolute right-2 text-ink-mid hover:text-ink transition-colors text-sm leading-none"
|
||||
className="absolute right-2 text-ink-soft hover:text-ink transition-colors text-sm leading-none"
|
||||
>
|
||||
×
|
||||
</button>
|
||||
@ -370,7 +370,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {
|
||||
|
||||
{/* Toolbar */}
|
||||
<div className="px-4 py-2.5 border-b border-line/40 flex items-center justify-between shrink-0">
|
||||
<span className="text-[11px] text-ink-mid">
|
||||
<span className="text-[11px] text-ink-soft">
|
||||
{debouncedQuery
|
||||
? `${entries.length} result${entries.length !== 1 ? 's' : ''}`
|
||||
: entries.length === 1
|
||||
@ -446,11 +446,11 @@ function EmptyState({
|
||||
// mirror it so the operator sees both signals.
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-16 gap-3 text-center">
|
||||
<span className="text-4xl text-ink-mid" aria-hidden="true">
|
||||
<span className="text-4xl text-ink-soft" aria-hidden="true">
|
||||
◇
|
||||
</span>
|
||||
<p className="text-sm font-medium text-ink-mid">Memory plugin disabled</p>
|
||||
<p className="text-[11px] text-ink-mid max-w-[220px] leading-relaxed">
|
||||
<p className="text-[11px] text-ink-soft max-w-[220px] leading-relaxed">
|
||||
See banner above for the operator-side fix.
|
||||
</p>
|
||||
</div>
|
||||
@ -459,11 +459,11 @@ function EmptyState({
|
||||
if (query) {
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-16 gap-3 text-center">
|
||||
<span className="text-4xl text-ink-mid" aria-hidden="true">
|
||||
<span className="text-4xl text-ink-soft" aria-hidden="true">
|
||||
◇
|
||||
</span>
|
||||
<p className="text-sm font-medium text-ink-mid">No memories match your search</p>
|
||||
<p className="text-[11px] text-ink-mid max-w-[200px] leading-relaxed">
|
||||
<p className="text-[11px] text-ink-soft max-w-[200px] leading-relaxed">
|
||||
Try a different query or clear the search.
|
||||
</p>
|
||||
</div>
|
||||
@ -471,11 +471,11 @@ function EmptyState({
|
||||
}
|
||||
return (
|
||||
<div className="flex flex-col items-center justify-center py-16 gap-3 text-center">
|
||||
<span className="text-4xl text-ink-mid" aria-hidden="true">
|
||||
<span className="text-4xl text-ink-soft" aria-hidden="true">
|
||||
◇
|
||||
</span>
|
||||
<p className="text-sm font-medium text-ink-mid">No memories yet</p>
|
||||
<p className="text-[11px] text-ink-mid max-w-[220px] leading-relaxed">
|
||||
<p className="text-[11px] text-ink-soft max-w-[220px] leading-relaxed">
|
||||
Agents commit memories via MCP tools (commit_memory, commit_summary). They
|
||||
appear here once written.
|
||||
</p>
|
||||
@ -558,7 +558,7 @@ function MemoryEntryRow({ entry, onDelete }: MemoryEntryRowProps) {
|
||||
|
||||
{/* Namespace tag */}
|
||||
<span
|
||||
className="text-[9px] shrink-0 font-mono text-ink-mid truncate max-w-[100px]"
|
||||
className="text-[9px] shrink-0 font-mono text-ink-soft truncate max-w-[100px]"
|
||||
title={entry.namespace}
|
||||
>
|
||||
{entry.namespace}
|
||||
@ -598,10 +598,10 @@ function MemoryEntryRow({ entry, onDelete }: MemoryEntryRowProps) {
|
||||
)}
|
||||
|
||||
|
||||
<span className="text-[9px] text-ink-mid shrink-0">
|
||||
<span className="text-[9px] text-ink-soft shrink-0">
|
||||
{formatRelativeTime(entry.created_at)}
|
||||
</span>
|
||||
<span className="text-[9px] text-ink-mid shrink-0" aria-hidden="true">
|
||||
<span className="text-[9px] text-ink-soft shrink-0" aria-hidden="true">
|
||||
{expanded ? '▼' : '▶'}
|
||||
</span>
|
||||
</button>
|
||||
@ -618,7 +618,7 @@ function MemoryEntryRow({ entry, onDelete }: MemoryEntryRowProps) {
|
||||
{entry.content}
|
||||
</pre>
|
||||
<div className="flex items-center justify-between gap-2">
|
||||
<span className="text-[9px] text-ink-mid">
|
||||
<span className="text-[9px] text-ink-soft">
|
||||
Created: {new Date(entry.created_at).toLocaleString()}
|
||||
{entry.expires_at && ` · Expires: ${new Date(entry.expires_at).toLocaleString()}`}
|
||||
</span>
|
||||
|
||||
@ -421,7 +421,7 @@ function ProviderPickerModal({
|
||||
<div className="text-[11px] text-ink-mid font-medium">
|
||||
{getKeyLabel(entry.key)}
|
||||
</div>
|
||||
<div className="text-[9px] font-mono text-ink-mid">{entry.key}</div>
|
||||
<div className="text-[9px] font-mono text-ink-soft">{entry.key}</div>
|
||||
</div>
|
||||
{entry.saved && (
|
||||
<span className="text-[9px] text-good bg-emerald-900/30 px-1.5 py-0.5 rounded flex items-center gap-1">
|
||||
@ -675,7 +675,7 @@ function AllKeysModal({
|
||||
<div className="text-[11px] text-ink-mid font-medium">
|
||||
{getKeyLabel(entry.key)}
|
||||
</div>
|
||||
<div className="text-[9px] font-mono text-ink-mid">{entry.key}</div>
|
||||
<div className="text-[9px] font-mono text-ink-soft">{entry.key}</div>
|
||||
</div>
|
||||
{entry.saved && (
|
||||
<span className="text-[9px] text-good bg-emerald-900/30 px-1.5 py-0.5 rounded flex items-center gap-1">
|
||||
|
||||
@ -247,7 +247,7 @@ export function OrgImportPreflightModal({
|
||||
<h2 id="org-preflight-title" className="text-sm font-semibold text-ink">
|
||||
Deploy {orgName}
|
||||
</h2>
|
||||
<p className="mt-0.5 text-[11px] text-ink-mid">
|
||||
<p className="mt-0.5 text-[11px] text-ink-soft">
|
||||
{workspaceCount} workspace{workspaceCount === 1 ? "" : "s"}.
|
||||
Review the credentials needed before import.
|
||||
</p>
|
||||
@ -400,7 +400,7 @@ function StrictEnvRow({
|
||||
<li className="flex items-center gap-2 rounded bg-surface-sunken/70 border border-line px-2 py-1.5">
|
||||
<code
|
||||
className={`text-[11px] font-mono flex-1 ${
|
||||
configured ? "text-ink-mid line-through" : "text-ink"
|
||||
configured ? "text-ink-soft line-through" : "text-ink"
|
||||
}`}
|
||||
>
|
||||
{envKey}
|
||||
@ -492,7 +492,7 @@ function AnyOfEnvGroup({
|
||||
>
|
||||
<code
|
||||
className={`text-[11px] font-mono flex-1 ${
|
||||
isConfigured ? "text-ink-mid line-through" : "text-ink"
|
||||
isConfigured ? "text-ink-soft line-through" : "text-ink"
|
||||
}`}
|
||||
>
|
||||
{m}
|
||||
|
||||
@ -356,7 +356,7 @@ export function ProviderModelSelector({
|
||||
<div>
|
||||
<label
|
||||
htmlFor={providerSelectId}
|
||||
className="text-[10px] uppercase tracking-wide text-ink-mid font-semibold mb-1.5 block"
|
||||
className="text-[10px] uppercase tracking-wide text-ink-soft font-semibold mb-1.5 block"
|
||||
>
|
||||
Provider <span aria-hidden="true" className="text-bad">*</span>
|
||||
<span className="sr-only"> (required)</span>
|
||||
@ -382,13 +382,13 @@ export function ProviderModelSelector({
|
||||
{selected?.tooltip && (
|
||||
<p
|
||||
id={`${providerSelectId}-help`}
|
||||
className="text-[9px] text-ink-mid mt-1 leading-relaxed"
|
||||
className="text-[9px] text-ink-soft mt-1 leading-relaxed"
|
||||
>
|
||||
{selected.tooltip}
|
||||
</p>
|
||||
)}
|
||||
{selected && selected.envVars.length > 0 && (
|
||||
<p className="text-[9px] text-ink-mid mt-0.5 font-mono">
|
||||
<p className="text-[9px] text-ink-soft mt-0.5 font-mono">
|
||||
requires: {selected.envVars.join(", ")}
|
||||
</p>
|
||||
)}
|
||||
@ -397,7 +397,7 @@ export function ProviderModelSelector({
|
||||
<div>
|
||||
<label
|
||||
htmlFor={modelSelectId}
|
||||
className="text-[10px] uppercase tracking-wide text-ink-mid font-semibold mb-1.5 block"
|
||||
className="text-[10px] uppercase tracking-wide text-ink-soft font-semibold mb-1.5 block"
|
||||
>
|
||||
Model <span aria-hidden="true" className="text-bad">*</span>
|
||||
<span className="sr-only"> (required)</span>
|
||||
@ -422,7 +422,7 @@ export function ProviderModelSelector({
|
||||
data-testid="model-input"
|
||||
className="w-full bg-surface-sunken border border-line rounded px-2 py-1.5 text-[11px] text-ink font-mono focus:outline-none focus:border-accent focus:ring-1 focus:ring-accent/20 transition-colors disabled:opacity-50"
|
||||
/>
|
||||
<p className="text-[9px] text-ink-mid mt-1 leading-relaxed">
|
||||
<p className="text-[9px] text-ink-soft mt-1 leading-relaxed">
|
||||
{selected?.wildcard
|
||||
? wildcardHelpText(selected)
|
||||
: "Free-text model id. Make sure the provider can resolve it."}
|
||||
|
||||
@ -157,7 +157,7 @@ export function PurchaseSuccessModal() {
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between gap-3 px-6 py-3 border-t border-line bg-surface/50">
|
||||
<span className="font-mono text-[10.5px] uppercase tracking-[0.12em] text-ink-mid">
|
||||
<span className="font-mono text-[10.5px] uppercase tracking-[0.12em] text-ink-soft">
|
||||
auto-dismiss · {AUTO_DISMISS_MS / 1000}s
|
||||
</span>
|
||||
<button
|
||||
|
||||
@ -104,7 +104,7 @@ export function SearchDialog() {
|
||||
>
|
||||
{/* Search input */}
|
||||
<div className="flex items-center gap-3 px-4 py-3 border-b border-line/40">
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" className="shrink-0 text-ink-mid" aria-hidden="true">
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" className="shrink-0 text-ink-soft" aria-hidden="true">
|
||||
<circle cx="7" cy="7" r="5.5" stroke="currentColor" strokeWidth="1.5" />
|
||||
<path d="M11 11l3.5 3.5" stroke="currentColor" strokeWidth="1.5" strokeLinecap="round" />
|
||||
</svg>
|
||||
@ -156,7 +156,7 @@ export function SearchDialog() {
|
||||
<div className="min-w-0 flex-1">
|
||||
<div className="text-sm text-ink truncate">{node.data.name}</div>
|
||||
{node.data.role && (
|
||||
<div className="text-[10px] text-ink-mid truncate">{node.data.role}</div>
|
||||
<div className="text-[10px] text-ink-soft truncate">{node.data.role}</div>
|
||||
)}
|
||||
</div>
|
||||
<span
|
||||
|
||||
@ -165,12 +165,12 @@ export function SidePanel() {
|
||||
</h2>
|
||||
<div className="flex items-center gap-2 mt-0.5">
|
||||
{node.data.role && (
|
||||
<span className="text-[10px] text-ink-mid truncate">
|
||||
<span className="text-[10px] text-ink-soft truncate">
|
||||
{node.data.role}
|
||||
</span>
|
||||
)}
|
||||
<span className={`text-[9px] px-1.5 py-0.5 rounded-md font-mono ${
|
||||
isOnline ? "text-good bg-emerald-950/30" : "text-ink-mid bg-surface-card/50"
|
||||
isOnline ? "text-good bg-emerald-950/30" : "text-ink-soft bg-surface-card/50"
|
||||
}`}>
|
||||
T{node.data.tier}
|
||||
</span>
|
||||
@ -181,7 +181,7 @@ export function SidePanel() {
|
||||
type="button"
|
||||
onClick={() => selectNode(null)}
|
||||
aria-label="Close workspace panel"
|
||||
className="w-7 h-7 flex items-center justify-center rounded-lg text-ink-mid hover:text-ink hover:bg-surface-card/60 transition-colors"
|
||||
className="w-7 h-7 flex items-center justify-center rounded-lg text-ink-soft hover:text-ink hover:bg-surface-card/60 transition-colors"
|
||||
>
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" aria-hidden="true">
|
||||
<path d="M1 1l10 10M11 1L1 11" stroke="currentColor" strokeWidth="1.5" strokeLinecap="round" />
|
||||
@ -296,7 +296,7 @@ export function SidePanel() {
|
||||
|
||||
{/* Footer — workspace ID */}
|
||||
<div className="px-5 py-2 border-t border-line/40 bg-surface-sunken/20">
|
||||
<span className="text-[9px] font-mono text-ink-mid select-all">
|
||||
<span className="text-[9px] font-mono text-ink-soft select-all">
|
||||
{selectedNodeId}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
@ -236,7 +236,7 @@ export function OrgTemplatesSection() {
|
||||
onClick={() => setExpanded((v) => !v)}
|
||||
aria-expanded={expanded}
|
||||
aria-controls="org-templates-body"
|
||||
className="flex items-center gap-1.5 text-[10px] uppercase tracking-wide text-ink-mid hover:text-ink-mid font-semibold transition-colors"
|
||||
className="flex items-center gap-1.5 text-[10px] uppercase tracking-wide text-ink-soft hover:text-ink-mid font-semibold transition-colors"
|
||||
>
|
||||
<span
|
||||
aria-hidden="true"
|
||||
@ -246,7 +246,7 @@ export function OrgTemplatesSection() {
|
||||
</span>
|
||||
Org Templates
|
||||
{orgs.length > 0 && (
|
||||
<span className="text-ink-mid normal-case tracking-normal">
|
||||
<span className="text-ink-soft normal-case tracking-normal">
|
||||
({orgs.length})
|
||||
</span>
|
||||
)}
|
||||
@ -255,7 +255,7 @@ export function OrgTemplatesSection() {
|
||||
type="button"
|
||||
onClick={loadOrgs}
|
||||
aria-label="Refresh org templates"
|
||||
className="text-[10px] text-ink-mid hover:text-ink-mid"
|
||||
className="text-[10px] text-ink-soft hover:text-ink-mid"
|
||||
>
|
||||
↻
|
||||
</button>
|
||||
@ -264,14 +264,14 @@ export function OrgTemplatesSection() {
|
||||
{expanded && (
|
||||
<div id="org-templates-body" className="space-y-2">
|
||||
{loading && (
|
||||
<div role="status" aria-live="polite" className="flex items-center gap-1.5 text-[10px] text-ink-mid">
|
||||
<div role="status" aria-live="polite" className="flex items-center gap-1.5 text-[10px] text-ink-soft">
|
||||
<Spinner size="sm" />
|
||||
Loading…
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!loading && orgs.length === 0 && (
|
||||
<div className="text-[10px] text-ink-mid">
|
||||
<div className="text-[10px] text-ink-soft">
|
||||
No org templates in <code>org-templates/</code>
|
||||
</div>
|
||||
)}
|
||||
@ -298,7 +298,7 @@ export function OrgTemplatesSection() {
|
||||
</span>
|
||||
</div>
|
||||
{o.description && (
|
||||
<p className="text-[10px] text-ink-mid mb-2.5 line-clamp-2 leading-relaxed">
|
||||
<p className="text-[10px] text-ink-soft mb-2.5 line-clamp-2 leading-relaxed">
|
||||
{o.description}
|
||||
</p>
|
||||
)}
|
||||
@ -499,7 +499,7 @@ export function TemplatePalette() {
|
||||
<div className="fixed top-0 left-0 h-full w-[280px] bg-surface-sunken/95 backdrop-blur-md border-r border-line/60 z-30 flex flex-col shadow-2xl shadow-black/40">
|
||||
<div className="px-4 pt-14 pb-3 border-b border-line/60">
|
||||
<h2 className="text-sm font-semibold text-ink">Templates</h2>
|
||||
<p className="text-[10px] text-ink-mid mt-0.5">Click to deploy a workspace</p>
|
||||
<p className="text-[10px] text-ink-soft mt-0.5">Click to deploy a workspace</p>
|
||||
</div>
|
||||
|
||||
<div className="flex-1 overflow-y-auto p-3 space-y-2">
|
||||
@ -509,14 +509,14 @@ export function TemplatePalette() {
|
||||
<OrgTemplatesSection />
|
||||
|
||||
{loading && (
|
||||
<div role="status" aria-live="polite" className="flex items-center justify-center gap-2 text-xs text-ink-mid text-center py-8">
|
||||
<div role="status" aria-live="polite" className="flex items-center justify-center gap-2 text-xs text-ink-soft text-center py-8">
|
||||
<Spinner />
|
||||
Loading…
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!loading && templates.length === 0 && (
|
||||
<div role="status" aria-live="polite" className="text-xs text-ink-mid text-center py-8">
|
||||
<div role="status" aria-live="polite" className="text-xs text-ink-soft text-center py-8">
|
||||
No templates found in<br />workspace-configs-templates/
|
||||
</div>
|
||||
)}
|
||||
@ -549,7 +549,7 @@ export function TemplatePalette() {
|
||||
</div>
|
||||
|
||||
{t.description && (
|
||||
<p className="text-[10px] text-ink-mid mb-2 line-clamp-2 leading-relaxed">
|
||||
<p className="text-[10px] text-ink-soft mb-2 line-clamp-2 leading-relaxed">
|
||||
{t.description}
|
||||
</p>
|
||||
)}
|
||||
@ -562,7 +562,7 @@ export function TemplatePalette() {
|
||||
</span>
|
||||
))}
|
||||
{t.skills.length > 3 && (
|
||||
<span className="text-[8px] text-ink-mid">+{t.skills.length - 3}</span>
|
||||
<span className="text-[8px] text-ink-soft">+{t.skills.length - 3}</span>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
@ -580,7 +580,7 @@ export function TemplatePalette() {
|
||||
<button
|
||||
type="button"
|
||||
onClick={loadTemplates}
|
||||
className="text-[10px] text-ink-mid hover:text-ink-mid transition-colors block"
|
||||
className="text-[10px] text-ink-soft hover:text-ink-mid transition-colors block"
|
||||
>
|
||||
Refresh templates
|
||||
</button>
|
||||
|
||||
@ -124,7 +124,7 @@ export function TermsGate({ children }: { children: React.ReactNode }) {
|
||||
</a>
|
||||
. Click agree to continue.
|
||||
</p>
|
||||
<p className="mt-3 text-xs text-ink-mid">
|
||||
<p className="mt-3 text-xs text-ink-soft">
|
||||
By agreeing you acknowledge that workspace data is stored in AWS us-east-2 (Ohio, United States).
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@ -57,7 +57,7 @@ export function ThemeToggle({ className = "" }: { className?: string }) {
|
||||
"flex h-6 w-6 items-center justify-center rounded transition-colors " +
|
||||
(active
|
||||
? "bg-surface-elevated text-ink shadow-sm"
|
||||
: "text-ink-mid hover:text-ink-mid")
|
||||
: "text-ink-soft hover:text-ink-mid")
|
||||
}
|
||||
>
|
||||
<svg
|
||||
|
||||
@ -9,7 +9,6 @@ import { ConfirmDialog } from "@/components/ConfirmDialog";
|
||||
import { showToast } from "@/components/Toaster";
|
||||
import { ThemeToggle } from "@/components/ThemeToggle";
|
||||
import { statusDotClass } from "@/lib/design-tokens";
|
||||
import { KeyboardShortcutsDialog } from "@/components/KeyboardShortcutsDialog";
|
||||
|
||||
export function Toolbar() {
|
||||
const nodes = useCanvasStore((s) => s.nodes);
|
||||
@ -34,7 +33,6 @@ export function Toolbar() {
|
||||
const [restartingAll, setRestartingAll] = useState(false);
|
||||
const [restartConfirmOpen, setRestartConfirmOpen] = useState(false);
|
||||
const [helpOpen, setHelpOpen] = useState(false);
|
||||
const [shortcutsOpen, setShortcutsOpen] = useState(false);
|
||||
const helpRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// Suppress toast on the very first connect at page load; only fire on reconnects.
|
||||
@ -129,29 +127,6 @@ export function Toolbar() {
|
||||
};
|
||||
}, []);
|
||||
|
||||
// Global ? shortcut opens the shortcuts dialog (mirrors the help button).
|
||||
// Skip when the user is typing in an input so ? in a text field doesn't
|
||||
// steal focus. Also skip when a modal/dialog is already open.
|
||||
useEffect(() => {
|
||||
const handler = (e: KeyboardEvent) => {
|
||||
if (e.key !== "?") return;
|
||||
const tag = (e.target as HTMLElement).tagName;
|
||||
const inInput =
|
||||
tag === "INPUT" ||
|
||||
tag === "TEXTAREA" ||
|
||||
tag === "SELECT" ||
|
||||
(e.target as HTMLElement).isContentEditable;
|
||||
if (inInput) return;
|
||||
// Don't fire when a modal/dialog is already mounted (canvas modals,
|
||||
// side panel, etc. use z-50 or above).
|
||||
if (document.querySelector('[role="dialog"][aria-modal="true"]')) return;
|
||||
e.preventDefault();
|
||||
setShortcutsOpen(true);
|
||||
};
|
||||
window.addEventListener("keydown", handler);
|
||||
return () => window.removeEventListener("keydown", handler);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div
|
||||
className="fixed top-3 left-1/2 -translate-x-1/2 z-20 flex items-center gap-3 bg-surface-sunken/80 backdrop-blur-md border border-line/60 rounded-xl px-4 py-2 shadow-xl shadow-black/20 transition-[margin-left] duration-200"
|
||||
@ -317,7 +292,7 @@ export function Toolbar() {
|
||||
onClick={() => setHelpOpen((open) => !open)}
|
||||
className="flex items-center justify-center w-7 h-7 bg-surface-card hover:bg-surface-card/70 border border-line rounded-lg transition-colors text-ink-mid hover:text-ink focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/40"
|
||||
aria-expanded={helpOpen}
|
||||
aria-label="Open shortcuts and tips"
|
||||
aria-label="Open quick help"
|
||||
title="Help — shortcuts & quick start"
|
||||
>
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" fill="none" aria-hidden="true">
|
||||
@ -327,44 +302,25 @@ export function Toolbar() {
|
||||
</button>
|
||||
|
||||
{helpOpen && (
|
||||
<div
|
||||
role="dialog"
|
||||
aria-label="Shortcuts and tips"
|
||||
aria-modal="false"
|
||||
className="absolute right-0 top-full mt-2 w-80 rounded-xl border border-line/60 bg-surface/95 p-3 shadow-2xl shadow-black/50 backdrop-blur-md z-50"
|
||||
>
|
||||
<div className="mb-3 flex items-center justify-between">
|
||||
<span className="text-[10px] font-semibold uppercase tracking-[0.24em] text-ink-mid">Shortcuts & tips</span>
|
||||
<div className="absolute right-0 top-full mt-2 w-72 rounded-xl border border-line/60 bg-surface/95 p-3 shadow-2xl shadow-black/50 backdrop-blur-md">
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
<span className="text-[10px] font-semibold uppercase tracking-[0.24em] text-ink-mid">Quick start</span>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setHelpOpen(false)}
|
||||
aria-label="Close help dialog"
|
||||
className="text-[10px] text-ink-mid hover:text-ink transition-colors focus:outline-none focus-visible:underline"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
<div className="space-y-1.5">
|
||||
<div className="space-y-2">
|
||||
<HelpRow shortcut="⌘K" text="Search workspaces and jump straight into Details or Chat." />
|
||||
<HelpRow shortcut="Esc" text="Clear selection, close menus, dismiss dialogs." />
|
||||
<HelpRow shortcut="Enter" text="Zoom into selected team and select its first child node." />
|
||||
<HelpRow shortcut="Shift+Enter" text="Select the parent of the selected node." />
|
||||
<HelpRow shortcut="⌘]" text="Bring selected node forward in the z-order." />
|
||||
<HelpRow shortcut="⌘[" text="Send selected node backward in the z-order." />
|
||||
<HelpRow shortcut="Z" text="Zoom canvas to fit a team node and all its sub-workspaces." />
|
||||
<HelpRow shortcut="Palette" text="Open the template palette to deploy a new workspace." />
|
||||
<HelpRow shortcut="Right-click" text="Use node actions for duplicate, export, restart, or delete." />
|
||||
<HelpRow shortcut="Dbl-click" text="On a team node: expand and zoom to show all sub-workspaces." />
|
||||
<HelpRow shortcut="Shift+click" text="Multi-select: add or remove a node from the batch selection." />
|
||||
<HelpRow shortcut="Chat" text="If a task is still running, the chat tab resumes that session automatically." />
|
||||
<HelpRow shortcut="Config" text="Use the Config tab for skills, model, secrets, and runtime settings." />
|
||||
<HelpRow shortcut="Dbl-click / Z" text="Zoom canvas to fit a team node and all its sub-workspaces." />
|
||||
</div>
|
||||
{/* Link to the full keyboard shortcuts dialog */}
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => { setHelpOpen(false); setShortcutsOpen(true); }}
|
||||
className="mt-3 w-full text-center text-[10px] text-ink-mid hover:text-accent transition-colors focus:outline-none focus-visible:underline"
|
||||
>
|
||||
See all shortcuts →
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@ -384,11 +340,6 @@ export function Toolbar() {
|
||||
onConfirm={restartAll}
|
||||
onCancel={() => setRestartConfirmOpen(false)}
|
||||
/>
|
||||
|
||||
<KeyboardShortcutsDialog
|
||||
open={shortcutsOpen}
|
||||
onClose={() => setShortcutsOpen(false)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
"use client";
|
||||
|
||||
import { useCallback, useMemo, type KeyboardEvent } from "react";
|
||||
import { useCallback, useMemo } from "react";
|
||||
import { Handle, NodeResizer, Position, type NodeProps, type Node } from "@xyflow/react";
|
||||
import { useCanvasStore, type WorkspaceNodeData } from "@/store/canvas";
|
||||
import { getConfigurationError, getConfigurationStatus } from "@/store/canvas-topology";
|
||||
@ -191,23 +191,7 @@ export function WorkspaceNode({ id, data }: NodeProps<Node<WorkspaceNodeData>>)
|
||||
<Handle
|
||||
type="target"
|
||||
position={Position.Top}
|
||||
tabIndex={0}
|
||||
role="button"
|
||||
aria-label={`Extract ${data.name} from its parent (Enter or Space)`}
|
||||
onKeyDown={(e: KeyboardEvent<HTMLDivElement>) => {
|
||||
if (e.key === "Enter" || e.key === " ") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
// Keyboard accessibility for edge anchors: pressing Enter/Space on
|
||||
// the top handle extracts this node from its current parent,
|
||||
// moving it to the root level. Mirrors the Figma/Excalidraw
|
||||
// pattern of using the connector dot as a keyboard affordance.
|
||||
if (data.parentId) {
|
||||
void nestNode(id, null);
|
||||
}
|
||||
}
|
||||
}}
|
||||
className="!w-2.5 !h-1 !rounded-full !bg-surface-card/80 !border-0 !-top-0.5 hover:!bg-blue-400 hover:!h-1.5 focus-visible:!bg-blue-400 focus-visible:!h-1.5 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-blue-400/60 focus-visible:ring-offset-1 focus-visible:ring-offset-zinc-950 transition-all"
|
||||
className="!w-2.5 !h-1 !rounded-full !bg-surface-card/80 !border-0 !-top-0.5 hover:!bg-blue-400 hover:!h-1.5 transition-all"
|
||||
/>
|
||||
|
||||
<div className="relative px-3.5 py-2.5">
|
||||
@ -374,23 +358,7 @@ export function WorkspaceNode({ id, data }: NodeProps<Node<WorkspaceNodeData>>)
|
||||
<Handle
|
||||
type="source"
|
||||
position={Position.Bottom}
|
||||
tabIndex={0}
|
||||
role="button"
|
||||
aria-label={`Nest selected workspace inside ${data.name} (Enter or Space)`}
|
||||
onKeyDown={(e: KeyboardEvent<HTMLDivElement>) => {
|
||||
if (e.key === "Enter" || e.key === " ") {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
// Keyboard accessibility for edge anchors: pressing Enter/Space on
|
||||
// the bottom handle nests the currently-selected node as a child
|
||||
// of this node. Requires another node to be selected first.
|
||||
const selected = selectedNodeId;
|
||||
if (selected && selected !== id) {
|
||||
void nestNode(selected, id);
|
||||
}
|
||||
}
|
||||
}}
|
||||
className="!w-2.5 !h-1 !rounded-full !bg-surface-card/80 !border-0 !-bottom-0.5 hover:!bg-blue-400 hover:!h-1.5 focus-visible:!bg-blue-400 focus-visible:!h-1.5 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-blue-400/60 focus-visible:ring-offset-1 focus-visible:ring-offset-zinc-950 transition-all"
|
||||
className="!w-2.5 !h-1 !rounded-full !bg-surface-card/80 !border-0 !-bottom-0.5 hover:!bg-blue-400 hover:!h-1.5 transition-all"
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
|
||||
@ -55,7 +55,7 @@ export function WorkspaceUsage({ workspaceId }: WorkspaceUsageProps) {
|
||||
</h4>
|
||||
{!loading && metrics && (
|
||||
<span
|
||||
className="text-[10px] text-ink-mid font-mono"
|
||||
className="text-[10px] text-ink-soft font-mono"
|
||||
data-testid="usage-period"
|
||||
>
|
||||
{formatPeriod(metrics.period_start, metrics.period_end)}
|
||||
@ -131,7 +131,7 @@ function StatRow({
|
||||
}) {
|
||||
return (
|
||||
<div className="flex justify-between items-center" data-testid={testId}>
|
||||
<span className="text-xs text-ink-mid">{label}</span>
|
||||
<span className="text-xs text-ink-soft">{label}</span>
|
||||
<span className="text-xs text-ink-mid font-mono">{value}</span>
|
||||
</div>
|
||||
);
|
||||
|
||||
@ -41,10 +41,6 @@ vi.mock("@/store/canvas", () => ({
|
||||
// ── Imports (after mocks) ─────────────────────────────────────────────────────
|
||||
|
||||
import { api } from "@/lib/api";
|
||||
import {
|
||||
emitSocketEvent,
|
||||
_resetSocketEventListenersForTests,
|
||||
} from "@/store/socket-events";
|
||||
import {
|
||||
buildA2AEdges,
|
||||
formatA2ARelativeTime,
|
||||
@ -346,151 +342,6 @@ describe("A2ATopologyOverlay component", () => {
|
||||
expect(mockGet.mock.calls.length).toBe(callsAfterMount);
|
||||
});
|
||||
|
||||
// ── #61 Stage 2: ACTIVITY_LOGGED subscription tests ────────────────────────
|
||||
//
|
||||
// Pin the post-#61 behaviour: WS push for delegation contributes to
|
||||
// the overlay's edge buffer with NO additional HTTP fetch. Same shape
|
||||
// as Stage 1 (CommunicationOverlay).
|
||||
|
||||
describe("#61 stage 2 — ACTIVITY_LOGGED subscription", () => {
|
||||
beforeEach(() => {
|
||||
_resetSocketEventListenersForTests();
|
||||
});
|
||||
afterEach(() => {
|
||||
_resetSocketEventListenersForTests();
|
||||
});
|
||||
|
||||
function emitDelegation(overrides: {
|
||||
workspaceId?: string;
|
||||
sourceId?: string;
|
||||
targetId?: string;
|
||||
method?: string;
|
||||
activityType?: string;
|
||||
} = {}) {
|
||||
// Use Date.now() (real time, fake-timer-frozen) rather than the
|
||||
// hardcoded NOW constant — buildA2AEdges prunes by Date.now() -
|
||||
// A2A_WINDOW_MS, so a row dated against the wrong epoch silently
|
||||
// falls outside the window and the test fails for a confusing
|
||||
// reason ("edges array empty" vs "filter dropped my row").
|
||||
const realNow = Date.now();
|
||||
emitSocketEvent({
|
||||
event: "ACTIVITY_LOGGED",
|
||||
workspace_id: overrides.workspaceId ?? "ws-a",
|
||||
timestamp: new Date(realNow).toISOString(),
|
||||
payload: {
|
||||
id: `act-${Math.random().toString(36).slice(2)}`,
|
||||
activity_type: overrides.activityType ?? "delegation",
|
||||
method: overrides.method ?? "delegate",
|
||||
source_id: overrides.sourceId ?? "ws-a",
|
||||
target_id: overrides.targetId ?? "ws-b",
|
||||
status: "ok",
|
||||
created_at: new Date(realNow - 30_000).toISOString(),
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
it("does NOT poll on a 60s interval after bootstrap (post-#61)", async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
render(<A2ATopologyOverlay />);
|
||||
await act(async () => { await Promise.resolve(); });
|
||||
const callsAfterBootstrap = mockGet.mock.calls.length;
|
||||
expect(callsAfterBootstrap).toBe(2); // ws-a + ws-b
|
||||
|
||||
// Pre-#61: a 60s clock tick would fire a fresh fan-out (2 more
|
||||
// calls). Post-#61: no interval, no extra calls.
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(120_000);
|
||||
});
|
||||
expect(mockGet.mock.calls.length).toBe(callsAfterBootstrap);
|
||||
});
|
||||
|
||||
it("WS push for a delegation event from a visible workspace updates edges with NO HTTP call", async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
render(<A2ATopologyOverlay />);
|
||||
await act(async () => { await Promise.resolve(); await Promise.resolve(); });
|
||||
mockGet.mockClear();
|
||||
mockStoreState.setA2AEdges.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitDelegation({ sourceId: "ws-a", targetId: "ws-b" });
|
||||
});
|
||||
|
||||
// Edges-set called with at least one a2a edge for the new push.
|
||||
const calls = mockStoreState.setA2AEdges.mock.calls;
|
||||
expect(calls.length).toBeGreaterThanOrEqual(1);
|
||||
const lastCall = calls[calls.length - 1][0] as Array<{ id: string }>;
|
||||
expect(lastCall.some((e) => e.id === "a2a-ws-a-ws-b")).toBe(true);
|
||||
|
||||
// Critical: no HTTP fetch fired during the WS path.
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push for a non-delegation activity_type is ignored", async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
render(<A2ATopologyOverlay />);
|
||||
await act(async () => { await Promise.resolve(); });
|
||||
mockStoreState.setA2AEdges.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitDelegation({ activityType: "a2a_send" });
|
||||
});
|
||||
|
||||
// setA2AEdges must not be called by the WS handler — the only
|
||||
// setA2AEdges calls in this test came from the initial bootstrap.
|
||||
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push for a delegate_result row is ignored (mirrors buildA2AEdges filter)", async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
render(<A2ATopologyOverlay />);
|
||||
await act(async () => { await Promise.resolve(); });
|
||||
mockStoreState.setA2AEdges.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitDelegation({ method: "delegate_result" });
|
||||
});
|
||||
|
||||
// delegate_result rows do not contribute to the edge count — they
|
||||
// are completion signals, not initiations.
|
||||
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push from a hidden workspace is ignored", async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
render(<A2ATopologyOverlay />);
|
||||
await act(async () => { await Promise.resolve(); });
|
||||
mockStoreState.setA2AEdges.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitDelegation({ workspaceId: "ws-hidden" });
|
||||
});
|
||||
|
||||
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push while showA2AEdges is false is ignored", async () => {
|
||||
mockStoreState.showA2AEdges = false;
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
render(<A2ATopologyOverlay />);
|
||||
// The mount path with showA2AEdges=false calls setA2AEdges([])
|
||||
// once — clear that to isolate the WS path.
|
||||
mockStoreState.setA2AEdges.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitDelegation();
|
||||
});
|
||||
|
||||
expect(mockStoreState.setA2AEdges).not.toHaveBeenCalled();
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
it("re-fetches when the visible ID set actually changes", async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
mockGet.mockResolvedValue([] as any);
|
||||
|
||||
@ -36,10 +36,6 @@ vi.mock("@/hooks/useWorkspaceName", () => ({
|
||||
useWorkspaceName: () => () => "Test WS",
|
||||
}));
|
||||
|
||||
import {
|
||||
emitSocketEvent,
|
||||
_resetSocketEventListenersForTests,
|
||||
} from "@/store/socket-events";
|
||||
import { ActivityTab } from "../tabs/ActivityTab";
|
||||
|
||||
// ── Fixtures ──────────────────────────────────────────────────────────────────
|
||||
@ -362,191 +358,6 @@ describe("ActivityTab — refresh button", () => {
|
||||
});
|
||||
});
|
||||
|
||||
// ── Suite 6.5: ACTIVITY_LOGGED subscription (#61 stage 3) ─────────────────────
|
||||
//
|
||||
// Pin the post-#61 behaviour: WS push extends the rendered list with NO
|
||||
// additional HTTP fetch. The 5s polling loop is gone; live updates
|
||||
// arrive over the WebSocket bus.
|
||||
|
||||
describe("ActivityTab — #61 stage 3: ACTIVITY_LOGGED subscription", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockGet.mockResolvedValue([]);
|
||||
_resetSocketEventListenersForTests();
|
||||
});
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
_resetSocketEventListenersForTests();
|
||||
});
|
||||
|
||||
function emitActivity(overrides: {
|
||||
workspaceId?: string;
|
||||
activityType?: string;
|
||||
summary?: string;
|
||||
id?: string;
|
||||
} = {}) {
|
||||
const realNow = Date.now();
|
||||
emitSocketEvent({
|
||||
event: "ACTIVITY_LOGGED",
|
||||
workspace_id: overrides.workspaceId ?? "ws-1",
|
||||
timestamp: new Date(realNow).toISOString(),
|
||||
payload: {
|
||||
id: overrides.id ?? `act-${Math.random().toString(36).slice(2)}`,
|
||||
activity_type: overrides.activityType ?? "agent_log",
|
||||
source_id: null,
|
||||
target_id: null,
|
||||
method: null,
|
||||
summary: overrides.summary ?? "live-pushed",
|
||||
status: "ok",
|
||||
created_at: new Date(realNow - 5_000).toISOString(),
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
it("WS push for matching workspace prepends to the list with NO HTTP call", async () => {
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/0 activities|no activity/i)).toBeTruthy();
|
||||
});
|
||||
mockGet.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitActivity({ summary: "live-row-from-bus" });
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/live-row-from-bus/)).toBeTruthy();
|
||||
});
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push for a different workspace is ignored", async () => {
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
await waitFor(() => screen.getByText(/no activity/i));
|
||||
|
||||
await act(async () => {
|
||||
emitActivity({
|
||||
workspaceId: "ws-other",
|
||||
summary: "should-not-render-other-ws",
|
||||
});
|
||||
});
|
||||
|
||||
expect(screen.queryByText(/should-not-render-other-ws/)).toBeNull();
|
||||
});
|
||||
|
||||
it("WS push respects the active filter — non-matching activity_type is ignored", async () => {
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
await waitFor(() => screen.getByText(/no activity/i));
|
||||
|
||||
// Apply "Tasks" filter.
|
||||
clickButton(/tasks/i);
|
||||
await waitFor(() => {
|
||||
expect(
|
||||
screen.getByRole("button", { name: /tasks/i }).getAttribute("aria-pressed"),
|
||||
).toBe("true");
|
||||
});
|
||||
|
||||
// Push an a2a_send (does NOT match task_update filter).
|
||||
await act(async () => {
|
||||
emitActivity({
|
||||
activityType: "a2a_send",
|
||||
summary: "should-not-render-filter-mismatch",
|
||||
});
|
||||
});
|
||||
|
||||
expect(
|
||||
screen.queryByText(/should-not-render-filter-mismatch/),
|
||||
).toBeNull();
|
||||
});
|
||||
|
||||
it("WS push respects the active filter — matching activity_type is rendered", async () => {
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
await waitFor(() => screen.getByText(/no activity/i));
|
||||
|
||||
clickButton(/tasks/i);
|
||||
await waitFor(() => {
|
||||
expect(
|
||||
screen.getByRole("button", { name: /tasks/i }).getAttribute("aria-pressed"),
|
||||
).toBe("true");
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
emitActivity({
|
||||
activityType: "task_update",
|
||||
summary: "task-filter-match",
|
||||
});
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/task-filter-match/)).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("WS push while autoRefresh is paused is ignored", async () => {
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
await waitFor(() => screen.getByText(/no activity/i));
|
||||
|
||||
// Toggle Live → Paused.
|
||||
clickButton(/live/i);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/Paused/)).toBeTruthy();
|
||||
});
|
||||
|
||||
await act(async () => {
|
||||
emitActivity({ summary: "should-not-render-paused" });
|
||||
});
|
||||
|
||||
expect(screen.queryByText(/should-not-render-paused/)).toBeNull();
|
||||
});
|
||||
|
||||
it("WS push for a row already in the list is deduped (no double-render)", async () => {
|
||||
// Bootstrap with one row — same id as the WS push to trigger dedup.
|
||||
mockGet.mockResolvedValueOnce([
|
||||
makeEntry({ id: "shared-id", summary: "bootstrap-summary" }),
|
||||
]);
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/bootstrap-summary/)).toBeTruthy();
|
||||
});
|
||||
mockGet.mockClear();
|
||||
|
||||
// Push a row with the SAME id but a different summary — must not
|
||||
// render the new summary; original row stays.
|
||||
await act(async () => {
|
||||
emitActivity({
|
||||
id: "shared-id",
|
||||
summary: "should-not-replace-existing",
|
||||
});
|
||||
});
|
||||
|
||||
expect(screen.queryByText(/should-not-replace-existing/)).toBeNull();
|
||||
// Also verify count didn't grow.
|
||||
expect(screen.getByText(/1 activities/)).toBeTruthy();
|
||||
});
|
||||
|
||||
it("does NOT poll on a 5s interval after mount (post-#61)", async () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
render(<ActivityTab workspaceId="ws-1" />);
|
||||
// Drain the mount-time bootstrap promise.
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
await Promise.resolve();
|
||||
});
|
||||
const callsAfterBootstrap = mockGet.mock.calls.length;
|
||||
expect(callsAfterBootstrap).toBeGreaterThanOrEqual(1);
|
||||
|
||||
// Pre-#61: a 30s clock advance fires 6 more polls. Post-#61: 0.
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(30_000);
|
||||
});
|
||||
expect(mockGet.mock.calls.length).toBe(callsAfterBootstrap);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ── Suite 7: Activity count ───────────────────────────────────────────────────
|
||||
|
||||
describe("ActivityTab — activity count", () => {
|
||||
|
||||
@ -1,285 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for ApprovalBanner component.
|
||||
*
|
||||
* Covers: renders nothing when no approvals, polls /approvals/pending,
|
||||
* shows approval cards, approve/deny decisions, toast notifications.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, waitFor, act } from "@testing-library/react";
|
||||
import { afterEach, describe, expect, it, vi, beforeEach } from "vitest";
|
||||
import { ApprovalBanner } from "../ApprovalBanner";
|
||||
import { showToast } from "@/components/Toaster";
|
||||
import { api } from "@/lib/api";
|
||||
|
||||
vi.mock("@/components/Toaster", () => ({
|
||||
showToast: vi.fn(),
|
||||
}));
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
const pendingApproval = (id = "a1", workspaceId = "ws-1"): {
|
||||
id: string;
|
||||
workspace_id: string;
|
||||
workspace_name: string;
|
||||
action: string;
|
||||
reason: string | null;
|
||||
status: string;
|
||||
created_at: string;
|
||||
} => ({
|
||||
id,
|
||||
workspace_id: workspaceId,
|
||||
workspace_name: "Test Workspace",
|
||||
action: "Run code execution",
|
||||
reason: "Requires human approval due to workspace policy",
|
||||
status: "pending",
|
||||
created_at: "2026-05-10T10:00:00Z",
|
||||
});
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("ApprovalBanner — empty state", () => {
|
||||
it("renders nothing when there are no pending approvals", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.queryByRole("alert")).toBeNull();
|
||||
});
|
||||
|
||||
it("does not render any approve/deny buttons when list is empty", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.queryByRole("button", { name: /approve/i })).toBeNull();
|
||||
expect(screen.queryByRole("button", { name: /deny/i })).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ApprovalBanner — renders approval cards", () => {
|
||||
it("renders an alert card for each pending approval", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([
|
||||
pendingApproval("a1"),
|
||||
pendingApproval("a2", "ws-2"),
|
||||
]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
const alerts = screen.getAllByRole("alert");
|
||||
expect(alerts).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("displays the workspace name and action text", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByText("Test Workspace needs approval")).toBeTruthy();
|
||||
expect(screen.getByText("Run code execution")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("displays the reason when present", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByText(/Requires human approval/i)).toBeTruthy();
|
||||
});
|
||||
|
||||
it("omits the reason div when reason is null", async () => {
|
||||
const approval = pendingApproval("a1");
|
||||
approval.reason = null;
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([approval]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.queryByText(/Requires human approval/i)).toBeNull();
|
||||
});
|
||||
|
||||
it("renders both Approve and Deny buttons per card", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByRole("button", { name: /approve/i })).toBeTruthy();
|
||||
expect(screen.getByRole("button", { name: /deny/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("has aria-live=assertive on the alert container", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
const alert = screen.getByRole("alert");
|
||||
expect(alert.getAttribute("aria-live")).toBe("assertive");
|
||||
});
|
||||
});
|
||||
|
||||
describe("ApprovalBanner — polling", () => {
|
||||
let clearIntervalSpy: ReturnType<typeof vi.spyOn>;
|
||||
|
||||
beforeEach(() => {
|
||||
clearIntervalSpy = vi.spyOn(global, "clearInterval").mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
clearIntervalSpy.mockRestore();
|
||||
});
|
||||
|
||||
it("clears the polling interval on unmount", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
const { unmount } = render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
unmount();
|
||||
expect(clearIntervalSpy).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ApprovalBanner — decisions", () => {
|
||||
it("calls POST /workspaces/:id/approvals/:id/decide on Approve click", async () => {
|
||||
const approval = pendingApproval("a1", "ws-1");
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([approval]);
|
||||
const postSpy = vi.spyOn(api, "post").mockResolvedValueOnce(undefined);
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /approve/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(postSpy).toHaveBeenCalledWith(
|
||||
"/workspaces/ws-1/approvals/a1/decide",
|
||||
{ decision: "approved", decided_by: "human" }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("calls POST with decision=denied on Deny click", async () => {
|
||||
const approval = pendingApproval("a1", "ws-1");
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([approval]);
|
||||
const postSpy = vi.spyOn(api, "post").mockResolvedValueOnce(undefined);
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /deny/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(postSpy).toHaveBeenCalledWith(
|
||||
"/workspaces/ws-1/approvals/a1/decide",
|
||||
{ decision: "denied", decided_by: "human" }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("removes the card from state after a successful decision", async () => {
|
||||
const approval = pendingApproval("a1", "ws-1");
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([approval]);
|
||||
vi.spyOn(api, "post").mockResolvedValueOnce(undefined);
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
// One alert initially
|
||||
expect(screen.getAllByRole("alert")).toHaveLength(1);
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /approve/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByRole("alert")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
it("shows a success toast on approve", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
vi.spyOn(api, "post").mockResolvedValueOnce(undefined);
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /approve/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(showToast).toHaveBeenCalledWith("Approved", "success");
|
||||
});
|
||||
});
|
||||
|
||||
it("shows an info toast on deny", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
vi.spyOn(api, "post").mockResolvedValueOnce(undefined);
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /deny/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(showToast).toHaveBeenCalledWith("Denied", "info");
|
||||
});
|
||||
});
|
||||
|
||||
it("shows an error toast when POST fails", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
vi.spyOn(api, "post").mockRejectedValueOnce(new Error("Network error"));
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /approve/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(showToast).toHaveBeenCalledWith("Failed to submit decision", "error");
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps the card visible when the POST fails", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([pendingApproval("a1")]);
|
||||
vi.spyOn(api, "post").mockRejectedValueOnce(new Error("Network error"));
|
||||
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /approve/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
// Card still shown because the request failed
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("ApprovalBanner — handles empty list from server", () => {
|
||||
it("shows nothing when the API returns an empty array on first poll", async () => {
|
||||
vi.spyOn(api, "get").mockResolvedValueOnce([]);
|
||||
render(<ApprovalBanner />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.queryByRole("alert")).toBeNull();
|
||||
});
|
||||
});
|
||||
@ -1,317 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for BundleDropZone component.
|
||||
*
|
||||
* Covers: drag-over/drag-leave state, drop of valid/invalid files,
|
||||
* keyboard file input, import success, import error, auto-clear timeout.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { BundleDropZone } from "../BundleDropZone";
|
||||
import { api } from "@/lib/api";
|
||||
|
||||
vi.mock("@/lib/api", () => ({
|
||||
api: {
|
||||
post: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
// ─── Test helper ──────────────────────────────────────────────────────────────
|
||||
|
||||
function makeBundle(name = "test-workspace"): File {
|
||||
const content = JSON.stringify({
|
||||
name,
|
||||
tier: 2,
|
||||
skills: [],
|
||||
config: {},
|
||||
});
|
||||
return new File([content], "test.bundle.json", {
|
||||
type: "application/json",
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("BundleDropZone — render", () => {
|
||||
it("renders a hidden file input with correct accept and aria-label", () => {
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
expect(input.getAttribute("type")).toBe("file");
|
||||
expect(input.getAttribute("accept")).toBe(".bundle.json");
|
||||
});
|
||||
|
||||
it("renders the keyboard-accessible import button with aria-label", () => {
|
||||
render(<BundleDropZone />);
|
||||
const btn = screen.getByRole("button", { name: /import bundle/i });
|
||||
expect(btn).toBeTruthy();
|
||||
expect(btn.getAttribute("aria-controls")).toBe("bundle-file-input");
|
||||
});
|
||||
});
|
||||
|
||||
describe("BundleDropZone — drag state", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("shows the drop overlay when a file is dragged over", () => {
|
||||
render(<BundleDropZone />);
|
||||
const overlay = screen.getByText("Drop Bundle to Import").closest("div");
|
||||
expect(overlay?.className).toContain("fixed");
|
||||
|
||||
// Simulate drag-over on the invisible drop zone
|
||||
const zone = document.body.querySelector('[class*="fixed inset-0 z-10"]') as HTMLElement;
|
||||
if (zone) {
|
||||
fireEvent.dragOver(zone);
|
||||
} else {
|
||||
// Fallback: dispatch on the component's outer div
|
||||
const container = document.body.querySelector('[class*="pointer-events-none"]') as HTMLElement;
|
||||
if (container) {
|
||||
fireEvent.dragOver(container);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("hides the drop overlay when not dragging", () => {
|
||||
render(<BundleDropZone />);
|
||||
// By default (no drag), the overlay should not be visible
|
||||
expect(screen.queryByText("Drop Bundle to Import")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("BundleDropZone — keyboard file input (WCAG 2.1.1)", () => {
|
||||
it("triggers the hidden file input when the import button is clicked", () => {
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file") as HTMLInputElement;
|
||||
const clickSpy = vi.spyOn(input, "click");
|
||||
fireEvent.click(screen.getByRole("button", { name: /import bundle/i }));
|
||||
expect(clickSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("processes a selected file when the file input changes", async () => {
|
||||
vi.useFakeTimers();
|
||||
const postMock = vi.mocked(api.post).mockResolvedValueOnce({
|
||||
workspace_id: "ws-new",
|
||||
name: "Imported Workspace",
|
||||
status: "online",
|
||||
});
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = makeBundle("My Bundle");
|
||||
Object.defineProperty(input, "files", {
|
||||
value: [file],
|
||||
writable: false,
|
||||
});
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
|
||||
expect(postMock).toHaveBeenCalledWith(
|
||||
"/bundles/import",
|
||||
expect.objectContaining({ name: "My Bundle" })
|
||||
);
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
describe("BundleDropZone — import success", () => {
|
||||
it("shows success toast after successful import", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.mocked(api.post).mockResolvedValueOnce({
|
||||
workspace_id: "ws-new",
|
||||
name: "My Workspace",
|
||||
status: "online",
|
||||
});
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = makeBundle("Success Workspace");
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
|
||||
// Success toast should be visible
|
||||
expect(screen.getByText(/imported "my workspace" successfully/i)).toBeTruthy();
|
||||
|
||||
// Toast auto-clears after 4000ms
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(5000);
|
||||
});
|
||||
expect(screen.queryByRole("status")).toBeNull();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("clears the result toast after 4000ms", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.mocked(api.post).mockResolvedValueOnce({
|
||||
workspace_id: "ws-new",
|
||||
name: "Timed Workspace",
|
||||
status: "online",
|
||||
});
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = makeBundle("Timed Workspace");
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByText(/timed workspace/i)).toBeTruthy();
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(4500);
|
||||
});
|
||||
expect(screen.queryByText(/timed workspace/i)).toBeNull();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
describe("BundleDropZone — import error", () => {
|
||||
it("shows error toast when the API call fails", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.mocked(api.post).mockRejectedValueOnce(new Error("Import failed: 500 Internal Server Error"));
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = makeBundle("Failed Workspace");
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
|
||||
expect(screen.getByText(/import failed: 500 internal server error/i)).toBeTruthy();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("shows error when file is not a .bundle.json", async () => {
|
||||
vi.useFakeTimers();
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = new File(["{}"], "readme.txt", { type: "text/plain" });
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
|
||||
expect(screen.getByText(/only .bundle.json files are accepted/i)).toBeTruthy();
|
||||
// Error clears after 3000ms
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(3500);
|
||||
});
|
||||
expect(screen.queryByText(/only .bundle.json/i)).toBeNull();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("clears error after 4000ms", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.mocked(api.post).mockRejectedValueOnce(new Error("Network error"));
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = makeBundle("Error Workspace");
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByText(/network error/i)).toBeTruthy();
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(5000);
|
||||
});
|
||||
expect(screen.queryByText(/network error/i)).toBeNull();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
describe("BundleDropZone — importing state", () => {
|
||||
it("shows 'Importing bundle...' status while API call is in flight", async () => {
|
||||
vi.useFakeTimers();
|
||||
let resolve: (v: unknown) => void;
|
||||
const pending = new Promise((r) => { resolve = r; });
|
||||
vi.mocked(api.post).mockReturnValueOnce(pending as unknown as ReturnType<typeof api.post>);
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file");
|
||||
|
||||
const file = makeBundle("Pending Workspace");
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
// Advance timer to allow the state update to flush
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(100);
|
||||
});
|
||||
|
||||
expect(screen.getByText("Importing bundle...")).toBeTruthy();
|
||||
expect(screen.getByRole("status")).toBeTruthy();
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
describe("BundleDropZone — file input reset", () => {
|
||||
it("resets the file input value after processing so the same file can be re-selected", async () => {
|
||||
vi.useFakeTimers();
|
||||
vi.mocked(api.post).mockResolvedValueOnce({
|
||||
workspace_id: "ws-new",
|
||||
name: "Reset Workspace",
|
||||
status: "online",
|
||||
});
|
||||
|
||||
render(<BundleDropZone />);
|
||||
const input = screen.getByLabelText("Import bundle file") as HTMLInputElement;
|
||||
|
||||
const file = makeBundle("Reset Test");
|
||||
Object.defineProperty(input, "files", { value: [file], writable: false });
|
||||
|
||||
fireEvent.change(input);
|
||||
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
|
||||
// The component calls e.target.value = "" after processing
|
||||
expect(input.value).toBe("");
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
@ -1,28 +1,18 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* CommunicationOverlay tests — pin both the 2026-05-04 fan-out cap fix
|
||||
* AND the 2026-05-07 polling → ACTIVITY_LOGGED-subscriber refactor
|
||||
* (issue #61 stage 1).
|
||||
* CommunicationOverlay tests — pin the rate-limit fix shipped 2026-05-04.
|
||||
*
|
||||
* The overlay used to poll /workspaces/:id/activity?limit=5 on a 30s
|
||||
* interval per online workspace (capped at 3). Post-#61: it bootstraps
|
||||
* once on mount via the same HTTP path (cap of 3 retained), then
|
||||
* subscribes to ACTIVITY_LOGGED via the global socket bus for live
|
||||
* updates. No interval poll.
|
||||
* The overlay polls /workspaces/:id/activity?limit=5 for each online
|
||||
* workspace. Pre-fix it (a) polled regardless of visibility and (b)
|
||||
* fanned out to 6 workspaces every 10s. With 8+ workspaces a user
|
||||
* triggered sustained 429s (server-side rate limit is 600 req/min/IP).
|
||||
*
|
||||
* These tests pin:
|
||||
* 1. Bootstrap fan-out cap of 3 — even with 6 online nodes, only 3
|
||||
* HTTP fetches on mount.
|
||||
* 2. Visibility gate — when collapsed, no HTTP fetches; re-open
|
||||
* re-bootstraps.
|
||||
* 3. NO interval polling — advancing the clock past 30s does not fire
|
||||
* additional HTTP calls.
|
||||
* 4. WS push extends the rendered list without firing any HTTP call.
|
||||
* 5. WS push for an offline workspace is ignored.
|
||||
* 6. WS push for a non-comm activity_type is ignored.
|
||||
* 1. Fan-out cap of 3 — even with 6 online nodes, only 3 fetches
|
||||
* 2. Visibility gate — when collapsed, no polling
|
||||
*
|
||||
* If a future refactor regresses any of these, CI fails before the
|
||||
* regression hits a paying tenant.
|
||||
* If a future refactor pushes either dial back up, CI fails before
|
||||
* the regression hits a paying tenant.
|
||||
*/
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { render, cleanup, act, fireEvent } from "@testing-library/react";
|
||||
@ -33,7 +23,7 @@ vi.mock("@/lib/api", () => ({
|
||||
api: { get: vi.fn() },
|
||||
}));
|
||||
|
||||
// Six online nodes — enough to verify the bootstrap cap of 3.
|
||||
// Six online nodes — enough to verify the cap of 3.
|
||||
const mockStoreState = {
|
||||
selectedNodeId: null as string | null,
|
||||
nodes: [
|
||||
@ -66,10 +56,6 @@ vi.mock("@/lib/design-tokens", () => ({
|
||||
// ── Imports (after mocks) ─────────────────────────────────────────────────────
|
||||
|
||||
import { api } from "@/lib/api";
|
||||
import {
|
||||
emitSocketEvent,
|
||||
_resetSocketEventListenersForTests,
|
||||
} from "@/store/socket-events";
|
||||
import { CommunicationOverlay } from "../CommunicationOverlay";
|
||||
|
||||
const mockGet = vi.mocked(api.get);
|
||||
@ -80,34 +66,30 @@ beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
mockGet.mockReset();
|
||||
mockGet.mockResolvedValue([]);
|
||||
// Drop any subscribers the previous test left on the singleton bus —
|
||||
// each render adds one via useSocketEvent.
|
||||
_resetSocketEventListenersForTests();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
_resetSocketEventListenersForTests();
|
||||
});
|
||||
|
||||
// ── Tests ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("CommunicationOverlay — bootstrap fan-out cap", () => {
|
||||
it("bootstraps at most 3 of 6 online workspaces (rate-limit floor preserved post-#61)", async () => {
|
||||
describe("CommunicationOverlay — fan-out cap", () => {
|
||||
it("polls at most 3 of 6 online workspaces (rate-limit floor)", async () => {
|
||||
await act(async () => {
|
||||
render(<CommunicationOverlay />);
|
||||
});
|
||||
// Mount fires the bootstrap synchronously — pre-#61 this was the
|
||||
// first poll cycle; post-#61 it's the only HTTP fetch (live updates
|
||||
// arrive via WS push). 6 nodes → 3 fetches.
|
||||
// Mount fires the first poll synchronously (no interval tick yet).
|
||||
// Pre-fix: 6 calls. Post-fix: 3.
|
||||
expect(mockGet).toHaveBeenCalledTimes(3);
|
||||
// Verify the calls are for the FIRST 3 online nodes (slice order).
|
||||
expect(mockGet).toHaveBeenCalledWith("/workspaces/ws-1/activity?limit=5");
|
||||
expect(mockGet).toHaveBeenCalledWith("/workspaces/ws-2/activity?limit=5");
|
||||
expect(mockGet).toHaveBeenCalledWith("/workspaces/ws-3/activity?limit=5");
|
||||
});
|
||||
|
||||
it("never bootstraps offline workspaces", async () => {
|
||||
it("never polls offline workspaces", async () => {
|
||||
await act(async () => {
|
||||
render(<CommunicationOverlay />);
|
||||
});
|
||||
@ -117,39 +99,40 @@ describe("CommunicationOverlay — bootstrap fan-out cap", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("CommunicationOverlay — no interval polling (post-#61)", () => {
|
||||
// The pre-#61 implementation re-fetched every 30s per workspace.
|
||||
// Post-#61 the only HTTP path is the bootstrap on mount + on
|
||||
// visibility-toggle. This test pins the absence of any interval
|
||||
// poll: a 60s clock advance must not produce a second round of
|
||||
// fetches.
|
||||
it("does NOT poll on a 30s interval after bootstrap", async () => {
|
||||
describe("CommunicationOverlay — cadence", () => {
|
||||
it("uses 30s interval cadence (was 10s pre-fix)", async () => {
|
||||
await act(async () => {
|
||||
render(<CommunicationOverlay />);
|
||||
});
|
||||
expect(mockGet).toHaveBeenCalledTimes(3); // initial bootstrap
|
||||
mockGet.mockClear();
|
||||
expect(mockGet).toHaveBeenCalledTimes(3); // initial mount poll
|
||||
|
||||
// Advance 60s — well past any plausible cadence the prior version
|
||||
// could have used.
|
||||
// Advance 10s — pre-fix this would fire another poll. Post-fix: silent.
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(60_000);
|
||||
vi.advanceTimersByTime(10_000);
|
||||
});
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
expect(mockGet).toHaveBeenCalledTimes(3);
|
||||
|
||||
// Advance to 30s — interval fires.
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(20_000);
|
||||
});
|
||||
expect(mockGet).toHaveBeenCalledTimes(6); // +3 from second tick
|
||||
});
|
||||
});
|
||||
|
||||
describe("CommunicationOverlay — visibility gate", () => {
|
||||
// The visibility gate now does two things post-#61:
|
||||
// - while closed, the WS handler short-circuits (no setComms churn)
|
||||
// - re-opening triggers a fresh bootstrap so the list reflects
|
||||
// anything that happened while the panel was collapsed
|
||||
// The visibility gate is the dial that drops collapsed-panel polling
|
||||
// to ZERO. The cadence test above can't catch its removal — if a
|
||||
// refactor dropped `if (!visible) return`, the cadence test would
|
||||
// still pass because the effect would still fire every 30s.
|
||||
//
|
||||
// Direct probe: render with comms-returning mock so the panel
|
||||
// actually renders (close button only exists in the expanded panel,
|
||||
// not the collapsed button-state). Click close, advance the clock,
|
||||
// assert no further fetches.
|
||||
it("stops fetching while collapsed and re-bootstraps on re-open", async () => {
|
||||
it("stops polling after the user collapses the panel", async () => {
|
||||
// Mock returns one a2a_send so comms.length > 0 → panel renders →
|
||||
// close button accessible.
|
||||
mockGet.mockResolvedValue([
|
||||
{
|
||||
id: "act-1",
|
||||
@ -167,202 +150,29 @@ describe("CommunicationOverlay — visibility gate", () => {
|
||||
const { getByLabelText } = await act(async () => {
|
||||
return render(<CommunicationOverlay />);
|
||||
});
|
||||
// Drain pending microtasks (resolves the await in bootstrap) so
|
||||
// setComms lands and the panel renders. Don't advance time — it's
|
||||
// not load-bearing for the gate test, but matches the pattern used
|
||||
// pre-#61 for stability.
|
||||
// Drain pending microtasks (resolves the await in fetchComms) so
|
||||
// setComms lands and the panel renders. Don't advance time — that
|
||||
// would fire the next interval tick and pollute the assertion.
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
await Promise.resolve();
|
||||
await Promise.resolve();
|
||||
});
|
||||
expect(mockGet).toHaveBeenCalledTimes(3); // initial bootstrap
|
||||
// Initial mount polled 3 workspaces.
|
||||
expect(mockGet).toHaveBeenCalledTimes(3);
|
||||
mockGet.mockClear();
|
||||
|
||||
// Click close. While closed, no fetches and no WS-driven updates.
|
||||
// Click the close button. Synchronous getByLabelText avoids
|
||||
// findBy's internal setTimeout (deadlocks under useFakeTimers).
|
||||
const closeBtn = getByLabelText("Close communications panel");
|
||||
await act(async () => {
|
||||
fireEvent.click(closeBtn);
|
||||
});
|
||||
|
||||
// Advance well past the 30s cadence — gate should suppress the tick.
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(60_000);
|
||||
});
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
|
||||
// Re-open via the collapsed button. Must trigger a fresh bootstrap.
|
||||
const openBtn = getByLabelText("Show communications panel");
|
||||
await act(async () => {
|
||||
fireEvent.click(openBtn);
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
await Promise.resolve();
|
||||
});
|
||||
expect(mockGet).toHaveBeenCalledTimes(3); // re-bootstrap on re-open
|
||||
});
|
||||
});
|
||||
|
||||
describe("CommunicationOverlay — WS subscription (#61 stage 1 core)", () => {
|
||||
// The load-bearing post-#61 behaviour. Every test in this block must
|
||||
// verify (a) the WS push DID update the rendered comms list, and
|
||||
// (b) NO additional HTTP call was fired — the whole point of the
|
||||
// refactor is to remove the polling-driven HTTP traffic.
|
||||
function emitActivityLogged(overrides: Partial<{
|
||||
workspaceId: string;
|
||||
payload: Record<string, unknown>;
|
||||
}> = {}) {
|
||||
emitSocketEvent({
|
||||
event: "ACTIVITY_LOGGED",
|
||||
workspace_id: overrides.workspaceId ?? "ws-1",
|
||||
timestamp: new Date().toISOString(),
|
||||
payload: {
|
||||
id: `act-${Math.random().toString(36).slice(2)}`,
|
||||
activity_type: "a2a_send",
|
||||
source_id: "ws-1",
|
||||
target_id: "ws-2",
|
||||
summary: "live push",
|
||||
status: "ok",
|
||||
duration_ms: 42,
|
||||
created_at: new Date().toISOString(),
|
||||
...overrides.payload,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
it("WS push for a comm activity_type extends the rendered list with NO additional HTTP call", async () => {
|
||||
const { container } = await act(async () => {
|
||||
return render(<CommunicationOverlay />);
|
||||
});
|
||||
expect(mockGet).toHaveBeenCalledTimes(3); // bootstrap
|
||||
mockGet.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitActivityLogged({ payload: { summary: "hello" } });
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
// Two pins:
|
||||
// 1. comms list reflects the live push (look for the summary text)
|
||||
// 2. zero HTTP fetches fired during the WS path
|
||||
expect(container.textContent).toContain("hello");
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push for an offline workspace is ignored", async () => {
|
||||
const { container } = await act(async () => {
|
||||
return render(<CommunicationOverlay />);
|
||||
});
|
||||
mockGet.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitActivityLogged({
|
||||
workspaceId: "ws-offline",
|
||||
payload: { source_id: "ws-offline", summary: "should-not-render" },
|
||||
});
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
expect(container.textContent).not.toContain("should-not-render");
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push for a non-comm activity_type is ignored (e.g. delegation)", async () => {
|
||||
const { container } = await act(async () => {
|
||||
return render(<CommunicationOverlay />);
|
||||
});
|
||||
mockGet.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitActivityLogged({
|
||||
payload: {
|
||||
activity_type: "delegation",
|
||||
summary: "should-not-render-delegation",
|
||||
},
|
||||
});
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
expect(container.textContent).not.toContain("should-not-render-delegation");
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("WS push while the panel is collapsed is ignored (no churn on hidden state)", async () => {
|
||||
// Bootstrap with one comm so the panel renders → close button
|
||||
// accessible. Then collapse, emit a WS push, re-open: the rendered
|
||||
// list must come from the re-bootstrap, NOT from the WS-push that
|
||||
// arrived during the closed state. Also: nothing visible while
|
||||
// closed (the collapsed button shows only the count, not summaries).
|
||||
mockGet.mockResolvedValue([
|
||||
{
|
||||
id: "act-bootstrap",
|
||||
workspace_id: "ws-1",
|
||||
activity_type: "a2a_send",
|
||||
source_id: "ws-1",
|
||||
target_id: "ws-2",
|
||||
summary: "bootstrap-summary",
|
||||
status: "ok",
|
||||
duration_ms: 1,
|
||||
created_at: new Date().toISOString(),
|
||||
},
|
||||
]);
|
||||
const { getByLabelText, container } = await act(async () => {
|
||||
return render(<CommunicationOverlay />);
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
// Collapse.
|
||||
const closeBtn = getByLabelText("Close communications panel");
|
||||
await act(async () => {
|
||||
fireEvent.click(closeBtn);
|
||||
});
|
||||
|
||||
// Bootstrap mock returns nothing on the re-open path so we can
|
||||
// distinguish "WS push leaked through the gate" from "re-bootstrap
|
||||
// refilled the list."
|
||||
mockGet.mockReset();
|
||||
mockGet.mockResolvedValue([]);
|
||||
|
||||
await act(async () => {
|
||||
emitActivityLogged({
|
||||
payload: { summary: "leaked-while-closed" },
|
||||
});
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
// Closed state: rendered DOM must not show any push-derived text.
|
||||
expect(container.textContent).not.toContain("leaked-while-closed");
|
||||
});
|
||||
|
||||
it("non-ACTIVITY_LOGGED events are ignored (e.g. WORKSPACE_OFFLINE)", async () => {
|
||||
const { container } = await act(async () => {
|
||||
return render(<CommunicationOverlay />);
|
||||
});
|
||||
mockGet.mockClear();
|
||||
|
||||
await act(async () => {
|
||||
emitSocketEvent({
|
||||
event: "WORKSPACE_OFFLINE",
|
||||
workspace_id: "ws-1",
|
||||
timestamp: new Date().toISOString(),
|
||||
payload: { summary: "should-not-render-event" },
|
||||
});
|
||||
});
|
||||
await act(async () => {
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
expect(container.textContent).not.toContain("should-not-render-event");
|
||||
expect(mockGet).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@ -1,376 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for ContextMenu component.
|
||||
*
|
||||
* Covers: null guard, node header (name + status), outside-click close,
|
||||
* Escape close, arrow-key navigation, conditional menu items by status,
|
||||
* danger items, dividers, rAF position clamping.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { ContextMenu } from "../ContextMenu";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
import { showToast } from "../Toaster";
|
||||
|
||||
// ─── Mock Toaster ─────────────────────────────────────────────────────────────
|
||||
|
||||
vi.mock("../Toaster", () => ({
|
||||
showToast: vi.fn(),
|
||||
}));
|
||||
|
||||
// ─── Mock API ────────────────────────────────────────────────────────────────
|
||||
|
||||
const apiPost = vi.fn().mockResolvedValue(undefined as void);
|
||||
const apiPatch = vi.fn().mockResolvedValue(undefined as void);
|
||||
vi.mock("@/lib/api", () => ({
|
||||
api: {
|
||||
post: apiPost,
|
||||
patch: apiPatch,
|
||||
get: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
// ─── Mock store ──────────────────────────────────────────────────────────────
|
||||
|
||||
const mockStoreState = {
|
||||
contextMenu: null as {
|
||||
x: number;
|
||||
y: number;
|
||||
nodeId: string;
|
||||
nodeData: {
|
||||
name: string;
|
||||
status: string;
|
||||
tier: number;
|
||||
role: string;
|
||||
parentId?: string | null;
|
||||
collapsed?: boolean;
|
||||
};
|
||||
} | null,
|
||||
closeContextMenu: vi.fn(),
|
||||
updateNodeData: vi.fn(),
|
||||
selectNode: vi.fn(),
|
||||
setPanelTab: vi.fn(),
|
||||
nestNode: vi.fn().mockResolvedValue(undefined as void),
|
||||
setPendingDelete: vi.fn(),
|
||||
setCollapsed: vi.fn(),
|
||||
arrangeChildren: vi.fn(),
|
||||
nodes: [] as Array<{
|
||||
id: string;
|
||||
data: { parentId?: string | null };
|
||||
}>,
|
||||
};
|
||||
|
||||
vi.mock("@/store/canvas", () => ({
|
||||
useCanvasStore: Object.assign(
|
||||
(sel: (s: typeof mockStoreState) => unknown) => sel(mockStoreState),
|
||||
{ getState: () => mockStoreState },
|
||||
),
|
||||
}));
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
function openMenu(overrides?: Partial<NonNullable<typeof mockStoreState.contextMenu>>) {
|
||||
mockStoreState.contextMenu = {
|
||||
x: 100,
|
||||
y: 200,
|
||||
nodeId: "n1",
|
||||
nodeData: { name: "Alice", status: "online", tier: 4, role: "assistant" },
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
// ─── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("ContextMenu — visibility", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.closeContextMenu.mockClear();
|
||||
mockStoreState.updateNodeData.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
mockStoreState.nestNode.mockClear();
|
||||
mockStoreState.setPendingDelete.mockClear();
|
||||
mockStoreState.setCollapsed.mockClear();
|
||||
mockStoreState.arrangeChildren.mockClear();
|
||||
mockStoreState.nodes = [];
|
||||
apiPost.mockReset();
|
||||
apiPatch.mockReset();
|
||||
vi.mocked(showToast).mockClear();
|
||||
});
|
||||
|
||||
it("renders nothing when contextMenu is null", () => {
|
||||
mockStoreState.contextMenu = null;
|
||||
render(<ContextMenu />);
|
||||
expect(screen.queryByRole("menu")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders the menu when contextMenu is set", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menu")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("has aria-label describing the node name", () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "online", tier: 4, role: "assistant" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menu").getAttribute("aria-label")).toBe("Actions for Alice");
|
||||
});
|
||||
|
||||
it("shows the node name in the header", () => {
|
||||
openMenu({ nodeData: { name: "Bob", status: "offline", tier: 2, role: "analyst" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByText("Bob")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows the node status in the header", () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "failed", tier: 4, role: "assistant" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByText("failed")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ContextMenu — close", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.closeContextMenu.mockClear();
|
||||
mockStoreState.updateNodeData.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
mockStoreState.nestNode.mockClear();
|
||||
mockStoreState.setPendingDelete.mockClear();
|
||||
mockStoreState.setCollapsed.mockClear();
|
||||
mockStoreState.arrangeChildren.mockClear();
|
||||
mockStoreState.nodes = [];
|
||||
apiPost.mockReset();
|
||||
apiPatch.mockReset();
|
||||
vi.mocked(showToast).mockClear();
|
||||
});
|
||||
|
||||
it("closes when clicking outside the menu", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
fireEvent.mouseDown(document.body);
|
||||
expect(mockStoreState.closeContextMenu).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("closes when Escape is pressed", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
fireEvent.keyDown(document.body, { key: "Escape" });
|
||||
expect(mockStoreState.closeContextMenu).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("closes when Tab is pressed", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
fireEvent.keyDown(document.body, { key: "Tab" });
|
||||
expect(mockStoreState.closeContextMenu).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ContextMenu — menu items", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.closeContextMenu.mockClear();
|
||||
mockStoreState.updateNodeData.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
mockStoreState.nestNode.mockClear();
|
||||
mockStoreState.setPendingDelete.mockClear();
|
||||
mockStoreState.setCollapsed.mockClear();
|
||||
mockStoreState.arrangeChildren.mockClear();
|
||||
mockStoreState.nodes = [];
|
||||
apiPost.mockReset();
|
||||
apiPatch.mockReset();
|
||||
vi.mocked(showToast).mockClear();
|
||||
});
|
||||
|
||||
it("shows Chat and Terminal only for online nodes", () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "online", tier: 4, role: "assistant" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menuitem", { name: /chat/i })).toBeTruthy();
|
||||
expect(screen.getByRole("menuitem", { name: /terminal/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("hides Chat and Terminal for offline nodes", () => {
|
||||
openMenu({ nodeData: { name: "Bob", status: "offline", tier: 2, role: "analyst" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.queryByRole("menuitem", { name: /chat/i })).toBeNull();
|
||||
expect(screen.queryByRole("menuitem", { name: /terminal/i })).toBeNull();
|
||||
});
|
||||
|
||||
it("shows Pause for online nodes (not paused)", () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "online", tier: 4, role: "assistant" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menuitem", { name: /pause/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows Resume for paused nodes (not Pause)", () => {
|
||||
openMenu({ nodeData: { name: "Carol", status: "paused", tier: 3, role: "writer" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.queryByRole("menuitem", { name: /pause/i })).toBeNull();
|
||||
expect(screen.getByRole("menuitem", { name: /resume/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows Extract from Team only for child nodes", () => {
|
||||
openMenu({ nodeData: { name: "Child", status: "online", tier: 4, role: "", parentId: "parent1" } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menuitem", { name: /extract/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("hides Extract from Team for root nodes", () => {
|
||||
openMenu({ nodeData: { name: "Root", status: "online", tier: 4, role: "", parentId: null } });
|
||||
render(<ContextMenu />);
|
||||
expect(screen.queryByRole("menuitem", { name: /extract/i })).toBeNull();
|
||||
});
|
||||
|
||||
it("shows team items only when node has children", () => {
|
||||
openMenu({ nodeData: { name: "Parent", status: "online", tier: 4, role: "" } });
|
||||
mockStoreState.nodes = [{ id: "child1", data: { parentId: "n1" } }];
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menuitem", { name: /arrange/i })).toBeTruthy();
|
||||
expect(screen.getByRole("menuitem", { name: /collapse/i })).toBeTruthy();
|
||||
expect(screen.getByRole("menuitem", { name: /zoom/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("hides team items when node has no children", () => {
|
||||
openMenu({ nodeData: { name: "Leaf", status: "online", tier: 4, role: "" } });
|
||||
mockStoreState.nodes = [];
|
||||
render(<ContextMenu />);
|
||||
expect(screen.queryByRole("menuitem", { name: /arrange/i })).toBeNull();
|
||||
expect(screen.queryByRole("menuitem", { name: /collapse/i })).toBeNull();
|
||||
expect(screen.queryByRole("menuitem", { name: /zoom/i })).toBeNull();
|
||||
});
|
||||
|
||||
it("shows Collapse Team when collapsed, Expand Team when expanded", () => {
|
||||
openMenu({ nodeData: { name: "Parent", status: "online", tier: 4, role: "", collapsed: true } });
|
||||
mockStoreState.nodes = [{ id: "child1", data: { parentId: "n1" } }];
|
||||
render(<ContextMenu />);
|
||||
expect(screen.getByRole("menuitem", { name: /expand/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("Delete item has danger styling class", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
const deleteItem = screen.getByRole("menuitem", { name: /delete/i });
|
||||
expect(deleteItem.getAttribute("class")).toMatch(/text-bad|bad/);
|
||||
});
|
||||
|
||||
it("renders role=separator for dividers", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
expect(document.body.querySelectorAll('[role="separator"]').length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("ContextMenu — keyboard navigation", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.closeContextMenu.mockClear();
|
||||
mockStoreState.updateNodeData.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
mockStoreState.nestNode.mockClear();
|
||||
mockStoreState.setPendingDelete.mockClear();
|
||||
mockStoreState.setCollapsed.mockClear();
|
||||
mockStoreState.arrangeChildren.mockClear();
|
||||
mockStoreState.nodes = [];
|
||||
apiPost.mockReset();
|
||||
apiPatch.mockReset();
|
||||
vi.mocked(showToast).mockClear();
|
||||
});
|
||||
|
||||
it("ArrowDown moves focus to next enabled menuitem", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
const menu = screen.getByRole("menu");
|
||||
// First tab goes to Details (first non-disabled item)
|
||||
fireEvent.keyDown(menu, { key: "ArrowDown" });
|
||||
const buttons = screen.getAllByRole("menuitem");
|
||||
const focusedIdx = buttons.findIndex((b) => document.activeElement === b);
|
||||
expect(focusedIdx).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
|
||||
it("ArrowUp moves focus to previous enabled menuitem", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
const menu = screen.getByRole("menu");
|
||||
fireEvent.keyDown(menu, { key: "ArrowDown" });
|
||||
const beforeFocused = document.activeElement;
|
||||
fireEvent.keyDown(menu, { key: "ArrowUp" });
|
||||
// Focus should have moved
|
||||
expect(document.activeElement).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ContextMenu — item actions", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.closeContextMenu.mockClear();
|
||||
mockStoreState.updateNodeData.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
mockStoreState.nestNode.mockClear();
|
||||
mockStoreState.setPendingDelete.mockClear();
|
||||
mockStoreState.setCollapsed.mockClear();
|
||||
mockStoreState.arrangeChildren.mockClear();
|
||||
mockStoreState.nodes = [];
|
||||
apiPost.mockReset();
|
||||
apiPatch.mockReset();
|
||||
vi.mocked(showToast).mockClear();
|
||||
});
|
||||
|
||||
it("Details selects node and opens details tab", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
fireEvent.click(screen.getByRole("menuitem", { name: /details/i }));
|
||||
expect(mockStoreState.selectNode).toHaveBeenCalledWith("n1");
|
||||
expect(mockStoreState.setPanelTab).toHaveBeenCalledWith("details");
|
||||
});
|
||||
|
||||
it("Chat selects node and opens chat tab", () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "online", tier: 4, role: "assistant" } });
|
||||
render(<ContextMenu />);
|
||||
fireEvent.click(screen.getByRole("menuitem", { name: /chat/i }));
|
||||
expect(mockStoreState.selectNode).toHaveBeenCalledWith("n1");
|
||||
expect(mockStoreState.setPanelTab).toHaveBeenCalledWith("chat");
|
||||
});
|
||||
|
||||
it("Delete calls setPendingDelete without closing immediately", () => {
|
||||
openMenu();
|
||||
render(<ContextMenu />);
|
||||
fireEvent.click(screen.getByRole("menuitem", { name: /delete/i }));
|
||||
expect(mockStoreState.setPendingDelete).toHaveBeenCalled();
|
||||
expect(mockStoreState.closeContextMenu).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("Pause calls the pause API and updates node status optimistically", async () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "online", tier: 4, role: "assistant" } });
|
||||
apiPost.mockResolvedValue(undefined);
|
||||
render(<ContextMenu />);
|
||||
fireEvent.click(screen.getByRole("menuitem", { name: /pause/i }));
|
||||
await act(async () => { /* flush */ });
|
||||
expect(apiPost).toHaveBeenCalledWith("/workspaces/n1/pause", {});
|
||||
expect(mockStoreState.updateNodeData).toHaveBeenCalledWith("n1", { status: "paused" });
|
||||
});
|
||||
|
||||
it("Resume calls the resume API", async () => {
|
||||
openMenu({ nodeData: { name: "Alice", status: "paused", tier: 4, role: "assistant" } });
|
||||
apiPost.mockResolvedValue(undefined);
|
||||
render(<ContextMenu />);
|
||||
fireEvent.click(screen.getByRole("menuitem", { name: /resume/i }));
|
||||
await act(async () => { /* flush */ });
|
||||
expect(apiPost).toHaveBeenCalledWith("/workspaces/n1/resume", {});
|
||||
});
|
||||
});
|
||||
@ -1,156 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for ConversationTraceModal's extractMessageText helper.
|
||||
*
|
||||
* Covers: MCP simple task format, request params.message.parts extraction,
|
||||
* response result.parts extraction, result.root.text extraction, plain string
|
||||
* result, null input, malformed input, empty strings.
|
||||
*/
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { extractMessageText } from "../ConversationTraceModal";
|
||||
|
||||
describe("extractMessageText — MCP simple task format", () => {
|
||||
it("extracts text from body.task field", () => {
|
||||
const body = { task: "Deploy the agent to production" };
|
||||
expect(extractMessageText(body)).toBe("Deploy the agent to production");
|
||||
});
|
||||
|
||||
it("returns empty string when body is null", () => {
|
||||
expect(extractMessageText(null)).toBe("");
|
||||
});
|
||||
|
||||
it("returns empty string when body is undefined", () => {
|
||||
expect(extractMessageText(undefined as unknown as null)).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractMessageText — request params.message format", () => {
|
||||
it("extracts text from params.message.parts[].text", () => {
|
||||
const body = {
|
||||
params: {
|
||||
message: {
|
||||
parts: [{ text: "Hello world" }],
|
||||
},
|
||||
},
|
||||
};
|
||||
expect(extractMessageText(body)).toBe("Hello world");
|
||||
});
|
||||
|
||||
it("joins multiple parts with newlines", () => {
|
||||
const body = {
|
||||
params: {
|
||||
message: {
|
||||
parts: [
|
||||
{ text: "First part" },
|
||||
{ text: "Second part" },
|
||||
{ text: "Third part" },
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
expect(extractMessageText(body)).toBe("First part\nSecond part\nThird part");
|
||||
});
|
||||
|
||||
it("ignores parts without text field", () => {
|
||||
const body = {
|
||||
params: {
|
||||
message: {
|
||||
parts: [{ text: "Hello" }, { other: "field" }, { text: "World" }],
|
||||
},
|
||||
},
|
||||
};
|
||||
expect(extractMessageText(body)).toBe("Hello\nWorld");
|
||||
});
|
||||
|
||||
it("returns empty string when params.message is absent", () => {
|
||||
const body = { params: {} };
|
||||
expect(extractMessageText(body)).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractMessageText — response result format", () => {
|
||||
it("extracts text from result.parts[].text", () => {
|
||||
const body = {
|
||||
result: {
|
||||
parts: [{ text: "Agent response" }],
|
||||
},
|
||||
};
|
||||
expect(extractMessageText(body)).toBe("Agent response");
|
||||
});
|
||||
|
||||
it("extracts text from result.parts[].root.text", () => {
|
||||
const body = {
|
||||
result: {
|
||||
parts: [{ root: { text: "Root response text" } }],
|
||||
},
|
||||
};
|
||||
expect(extractMessageText(body)).toBe("Root response text");
|
||||
});
|
||||
|
||||
it("prefers parts[].text over parts[].root.text", () => {
|
||||
const body = {
|
||||
result: {
|
||||
parts: [
|
||||
{ text: "Direct text" },
|
||||
{ root: { text: "Root text" } },
|
||||
],
|
||||
},
|
||||
};
|
||||
// Both are non-empty strings, so the first one wins (filter picks the first)
|
||||
// The implementation: rText from rParts[0].text = "Direct text"
|
||||
expect(extractMessageText(body)).toBe("Direct text");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractMessageText — plain string result", () => {
|
||||
it("returns body.result when it is a plain string", () => {
|
||||
const body = { result: "Simple string response" };
|
||||
expect(extractMessageText(body)).toBe("Simple string response");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractMessageText — priority order", () => {
|
||||
it("prefers task format over params format", () => {
|
||||
const body = {
|
||||
task: "Task text",
|
||||
params: { message: { parts: [{ text: "Params text" }] } },
|
||||
};
|
||||
// Implementation: checks task first, returns if non-empty
|
||||
expect(extractMessageText(body)).toBe("Task text");
|
||||
});
|
||||
|
||||
it("prefers params format over result format", () => {
|
||||
const body = {
|
||||
params: { message: { parts: [{ text: "Params text" }] } },
|
||||
result: { parts: [{ text: "Result text" }] },
|
||||
};
|
||||
// Implementation: checks params.message.parts first (after task)
|
||||
expect(extractMessageText(body)).toBe("Params text");
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractMessageText — error resilience", () => {
|
||||
it("returns empty string on malformed input", () => {
|
||||
expect(extractMessageText({})).toBe("");
|
||||
expect(extractMessageText({ params: null })).toBe("");
|
||||
expect(extractMessageText({ result: null })).toBe("");
|
||||
});
|
||||
|
||||
it("returns empty string when all fields are absent", () => {
|
||||
expect(extractMessageText({ random: "field" })).toBe("");
|
||||
});
|
||||
|
||||
it("handles missing parts array gracefully", () => {
|
||||
const body = { params: { message: {} } };
|
||||
expect(extractMessageText(body)).toBe("");
|
||||
});
|
||||
|
||||
it("handles parts with undefined text gracefully", () => {
|
||||
const body = {
|
||||
result: {
|
||||
parts: [{ text: undefined }, { text: "valid" }],
|
||||
},
|
||||
};
|
||||
expect(extractMessageText(body)).toBe("valid");
|
||||
});
|
||||
});
|
||||
@ -1,170 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for KeyValueField component.
|
||||
*
|
||||
* Covers: renders password input, type=text when revealed,
|
||||
* onChange prop, auto-trim on paste, auto-hide after 30s,
|
||||
* disabled state, aria-label.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { KeyValueField } from "../ui/KeyValueField";
|
||||
|
||||
const AUTO_HIDE_MS = 30_000;
|
||||
|
||||
describe("KeyValueField — render", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("renders a password input by default", () => {
|
||||
render(<KeyValueField value="" onChange={vi.fn()} />);
|
||||
expect(screen.getByRole("textbox").getAttribute("type")).toBe("password");
|
||||
});
|
||||
|
||||
it("renders a text input when revealed=true", () => {
|
||||
const { container } = render(<KeyValueField value="secret" onChange={vi.fn()} />);
|
||||
// Cannot use getByRole because type=text inputs may not be queryable as textbox in jsdom
|
||||
const input = container.querySelector("input");
|
||||
expect(input).toBeTruthy();
|
||||
expect(input!.getAttribute("type")).toBe("password");
|
||||
});
|
||||
|
||||
it("uses the provided aria-label", () => {
|
||||
render(<KeyValueField value="" onChange={vi.fn()} aria-label="My secret field" />);
|
||||
expect(screen.getByRole("textbox").getAttribute("aria-label")).toBe("My secret field");
|
||||
});
|
||||
|
||||
it("uses default aria-label when omitted", () => {
|
||||
render(<KeyValueField value="" onChange={vi.fn()} />);
|
||||
expect(screen.getByRole("textbox").getAttribute("aria-label")).toBe("Secret value");
|
||||
});
|
||||
|
||||
it("renders a disabled input when disabled=true", () => {
|
||||
render(<KeyValueField value="x" onChange={vi.fn()} disabled={true} />);
|
||||
expect(screen.getByRole("textbox").getAttribute("disabled")).toBe("");
|
||||
});
|
||||
|
||||
it("renders with the provided placeholder", () => {
|
||||
render(<KeyValueField value="" onChange={vi.fn()} placeholder="Enter API key" />);
|
||||
expect(screen.getByRole("textbox").getAttribute("placeholder")).toBe("Enter API key");
|
||||
});
|
||||
|
||||
it("disables spell-check on the input", () => {
|
||||
render(<KeyValueField value="" onChange={vi.fn()} />);
|
||||
expect(screen.getByRole("textbox").getAttribute("spellcheck")).toBe("false");
|
||||
});
|
||||
|
||||
it("sets autoComplete=off on the input", () => {
|
||||
render(<KeyValueField value="" onChange={vi.fn()} />);
|
||||
expect(screen.getByRole("textbox").getAttribute("autocomplete")).toBe("off");
|
||||
});
|
||||
});
|
||||
|
||||
describe("KeyValueField — onChange", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("calls onChange when input changes", () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="" onChange={onChange} />);
|
||||
fireEvent.change(screen.getByRole("textbox"), { target: { value: "abc" } });
|
||||
expect(onChange).toHaveBeenCalledWith("abc");
|
||||
});
|
||||
|
||||
it("trims trailing whitespace on change", () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="" onChange={onChange} />);
|
||||
fireEvent.change(screen.getByRole("textbox"), { target: { value: "abc " } });
|
||||
expect(onChange).toHaveBeenCalledWith("abc");
|
||||
});
|
||||
|
||||
it("trims leading whitespace on change", () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="" onChange={onChange} />);
|
||||
fireEvent.change(screen.getByRole("textbox"), { target: { value: " abc" } });
|
||||
expect(onChange).toHaveBeenCalledWith("abc");
|
||||
});
|
||||
|
||||
it("passes value through unchanged when no whitespace trimming needed", () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="" onChange={onChange} />);
|
||||
fireEvent.change(screen.getByRole("textbox"), { target: { value: "no-change" } });
|
||||
expect(onChange).toHaveBeenCalledWith("no-change");
|
||||
});
|
||||
});
|
||||
|
||||
// Paste trimming is tested via onChange (handleChange trims whitespace) and
|
||||
// the structural trim logic is exercised by the onChange tests above.
|
||||
// Full paste testing requires @testing-library/user-event which is not installed.
|
||||
|
||||
describe("KeyValueField — auto-hide timer", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("auto-hides after 30 seconds when revealed", async () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="secret" onChange={onChange} />);
|
||||
|
||||
// Reveal the value
|
||||
const input = document.body.querySelector("input");
|
||||
fireEvent.click(document.body.querySelector("button")!);
|
||||
// After reveal, input type should be text (not password)
|
||||
expect(input?.getAttribute("type")).not.toBe("password");
|
||||
|
||||
// Advance 30 seconds
|
||||
act(() => { vi.advanceTimersByTime(AUTO_HIDE_MS); });
|
||||
|
||||
// Value should be hidden again — the input value is managed externally
|
||||
// via `value` prop, so we check the input type flipped back to password
|
||||
// by verifying the button was clicked twice (setRevealed toggled)
|
||||
// The component's internal revealed state should be false after timer fires.
|
||||
// Since we can't read internal state, we verify the behavior by checking
|
||||
// the input type (it flips back to password after auto-hide).
|
||||
// The timer callback calls setRevealed(false) which flips type back to password.
|
||||
const typeAfter = document.body.querySelector("input")?.getAttribute("type");
|
||||
expect(typeAfter).toBe("password");
|
||||
});
|
||||
|
||||
it("does not fire auto-hide before 30 seconds", async () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="secret" onChange={onChange} />);
|
||||
|
||||
fireEvent.click(document.body.querySelector("button")!);
|
||||
|
||||
// Advance 29 seconds — should NOT have hidden yet
|
||||
act(() => { vi.advanceTimersByTime(AUTO_HIDE_MS - 1000); });
|
||||
|
||||
const typeAfter = document.body.querySelector("input")?.getAttribute("type");
|
||||
// Still revealed (type=text) after 29s
|
||||
expect(typeAfter).toBe("text");
|
||||
});
|
||||
|
||||
it("clears the timer when revealed flips back to false before timeout", () => {
|
||||
const onChange = vi.fn();
|
||||
render(<KeyValueField value="secret" onChange={onChange} />);
|
||||
|
||||
fireEvent.click(document.body.querySelector("button")!);
|
||||
// Hide manually before the 30s auto-hide
|
||||
fireEvent.click(document.body.querySelector("button")!);
|
||||
|
||||
// Advance full 30s — should not crash (timer already cleared)
|
||||
act(() => { vi.advanceTimersByTime(AUTO_HIDE_MS); });
|
||||
|
||||
// Still hidden (we hid it manually)
|
||||
expect(document.body.querySelector("input")?.getAttribute("type")).toBe("password");
|
||||
});
|
||||
});
|
||||
@ -1,90 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
|
||||
|
||||
// ── Component under test — imported AFTER mocks ───────────────────────────────
|
||||
import { KeyboardShortcutsDialog } from "../KeyboardShortcutsDialog";
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
const onCloseMock = vi.fn();
|
||||
|
||||
beforeEach(() => {
|
||||
onCloseMock.mockReset();
|
||||
});
|
||||
|
||||
describe("KeyboardShortcutsDialog — a11y render", () => {
|
||||
it("renders with role=dialog and aria-modal=true when open", async () => {
|
||||
render(<KeyboardShortcutsDialog open={true} onClose={onCloseMock} />);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
});
|
||||
const dialog = screen.getByRole("dialog");
|
||||
expect(dialog.getAttribute("aria-modal")).toBe("true");
|
||||
});
|
||||
|
||||
it("has aria-labelledby pointing to the dialog title", async () => {
|
||||
render(<KeyboardShortcutsDialog open={true} onClose={onCloseMock} />);
|
||||
const dialog = await waitFor(() => screen.getByRole("dialog"));
|
||||
const labelledby = dialog.getAttribute("aria-labelledby");
|
||||
expect(labelledby).toBeTruthy();
|
||||
// The labelledby should reference the h2 with id="keyboard-shortcuts-title"
|
||||
const title = document.getElementById(labelledby!);
|
||||
expect(title?.textContent).toMatch(/keyboard shortcuts/i);
|
||||
});
|
||||
|
||||
it("does not render when open=false", () => {
|
||||
render(<KeyboardShortcutsDialog open={false} onClose={onCloseMock} />);
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("calls onClose when Escape is pressed", async () => {
|
||||
render(<KeyboardShortcutsDialog open={true} onClose={onCloseMock} />);
|
||||
await waitFor(() => expect(screen.getByRole("dialog")).toBeTruthy());
|
||||
act(() => {
|
||||
fireEvent.keyDown(window, { key: "Escape" });
|
||||
});
|
||||
expect(onCloseMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("focuses the first focusable element (close button) when dialog opens", async () => {
|
||||
render(<KeyboardShortcutsDialog open={true} onClose={onCloseMock} />);
|
||||
// The component uses requestAnimationFrame to move focus; wait for it to settle.
|
||||
await waitFor(() => expect(screen.getByRole("dialog")).toBeTruthy());
|
||||
await act(async () => {
|
||||
await new Promise((r) => requestAnimationFrame(() => requestAnimationFrame(r)));
|
||||
});
|
||||
const closeBtn = screen.getByRole("button", { name: /close/i });
|
||||
expect(document.activeElement).toBe(closeBtn);
|
||||
});
|
||||
|
||||
it("traps Tab focus within the dialog", async () => {
|
||||
render(<KeyboardShortcutsDialog open={true} onClose={onCloseMock} />);
|
||||
const dialog = await waitFor(() => screen.getByRole("dialog"));
|
||||
|
||||
// Collect all focusable elements inside the dialog
|
||||
const focusableSelectors =
|
||||
'button:not([disabled]), [href], input:not([disabled]), select:not([disabled]), textarea:not([disabled]), [tabindex]:not([tabindex="-1"])';
|
||||
const focusableEls = Array.from(
|
||||
dialog.querySelectorAll<HTMLElement>(focusableSelectors)
|
||||
);
|
||||
expect(focusableEls.length).toBeGreaterThan(0);
|
||||
|
||||
const onlyFocusable = focusableEls[0];
|
||||
act(() => { onlyFocusable.focus(); });
|
||||
|
||||
// Simulate Tab keydown. The dialog's handler should call preventDefault()
|
||||
// to stop focus leaving the dialog. Verify by checking the event was
|
||||
// handled (focus remains on the only focusable element).
|
||||
let tabWasIntercepted = false;
|
||||
const tabHandler = (e: KeyboardEvent) => {
|
||||
if (e.key === "Tab") tabWasIntercepted = e.defaultPrevented;
|
||||
};
|
||||
window.addEventListener("keydown", tabHandler);
|
||||
act(() => {
|
||||
fireEvent.keyDown(onlyFocusable, { key: "Tab", shiftKey: false });
|
||||
});
|
||||
expect(tabWasIntercepted).toBe(true);
|
||||
window.removeEventListener("keydown", tabHandler);
|
||||
});
|
||||
});
|
||||
@ -1,185 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for Legend component.
|
||||
*
|
||||
* Covers: open/closed state, localStorage persistence, palette-offset
|
||||
* positioning, status/tier/comm items rendering.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup } from "@testing-library/react";
|
||||
import { afterEach, describe, expect, it, vi, beforeEach } from "vitest";
|
||||
import { Legend } from "../Legend";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
|
||||
// ─── Mock localStorage ────────────────────────────────────────────────────────
|
||||
|
||||
const localStorageMock = (() => {
|
||||
let store: Record<string, string> = {};
|
||||
return {
|
||||
getItem: vi.fn((key: string) => store[key] ?? null),
|
||||
setItem: vi.fn((key: string, value: string) => { store[key] = value; }),
|
||||
removeItem: vi.fn((key: string) => { delete store[key]; }),
|
||||
clear: () => { store = {}; },
|
||||
getStore: () => store,
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(window, "localStorage", { value: localStorageMock });
|
||||
|
||||
// ─── Mock canvas store ────────────────────────────────────────────────────────
|
||||
|
||||
vi.mock("@/store/canvas", () => ({
|
||||
useCanvasStore: vi.fn(),
|
||||
}));
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
localStorageMock.clear();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("Legend — initial render (localStorage open)", () => {
|
||||
it("renders the legend panel when localStorage has no saved preference", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
expect(screen.getByText("Legend")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the legend panel when localStorage has open=1", () => {
|
||||
localStorageMock.getItem.mockReturnValueOnce("1");
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
expect(screen.getByText("Legend")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the collapsed pill when localStorage has open=0", () => {
|
||||
localStorageMock.getItem.mockReturnValueOnce("0");
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
// Collapsed pill shows "ⓘ Legend"
|
||||
expect(screen.getByText("Legend")).toBeTruthy();
|
||||
// Hide button should not be in the open panel
|
||||
expect(screen.queryByTitle("Hide legend")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Legend — open panel content", () => {
|
||||
beforeEach(() => {
|
||||
localStorageMock.getItem.mockReturnValue("1");
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
});
|
||||
|
||||
it("renders the Status section with status items", () => {
|
||||
render(<Legend />);
|
||||
expect(screen.getByText("Status")).toBeTruthy();
|
||||
// All statuses from LEGEND_STATUSES
|
||||
expect(screen.getByText("Online")).toBeTruthy();
|
||||
expect(screen.getByText("Offline")).toBeTruthy();
|
||||
expect(screen.getByText("Failed")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the Tier section", () => {
|
||||
render(<Legend />);
|
||||
expect(screen.getByText("Tier")).toBeTruthy();
|
||||
expect(screen.getByText("Sandboxed")).toBeTruthy();
|
||||
expect(screen.getByText("Standard")).toBeTruthy();
|
||||
expect(screen.getByText("Privileged")).toBeTruthy();
|
||||
expect(screen.getByText("Full Access")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the Communication section", () => {
|
||||
render(<Legend />);
|
||||
expect(screen.getByText("Communication")).toBeTruthy();
|
||||
expect(screen.getByText("A2A Out")).toBeTruthy();
|
||||
expect(screen.getByText("A2A In")).toBeTruthy();
|
||||
expect(screen.getByText("Task")).toBeTruthy();
|
||||
expect(screen.getByText("Error")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the hide button", () => {
|
||||
render(<Legend />);
|
||||
expect(screen.getByTitle("Hide legend")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Legend — close and reopen", () => {
|
||||
it("closes when the hide button is clicked and persists to localStorage", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
fireEvent.click(screen.getByTitle("Hide legend"));
|
||||
// localStorage should be updated to "0"
|
||||
expect(localStorageMock.setItem).toHaveBeenCalledWith(
|
||||
"molecule.legend.open",
|
||||
"0"
|
||||
);
|
||||
});
|
||||
|
||||
it("reopens when the collapsed pill is clicked and persists to localStorage", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
// Initially open — close it
|
||||
fireEvent.click(screen.getByTitle("Hide legend"));
|
||||
// Collapsed pill appears
|
||||
expect(screen.getByTitle("Show legend")).toBeTruthy();
|
||||
// Reopen
|
||||
fireEvent.click(screen.getByTitle("Show legend"));
|
||||
expect(localStorageMock.setItem).toHaveBeenLastCalledWith(
|
||||
"molecule.legend.open",
|
||||
"1"
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Legend — palette offset positioning", () => {
|
||||
it("uses left-4 when template palette is NOT open", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
const panel = screen.getByText("Legend").closest("div");
|
||||
expect(panel?.className).toContain("left-4");
|
||||
});
|
||||
|
||||
it("uses left-[296px] when template palette IS open", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: true } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
const panel = screen.getByText("Legend").closest("div");
|
||||
expect(panel?.className).toContain("left-[296px]");
|
||||
});
|
||||
});
|
||||
|
||||
describe("Legend — aria attributes", () => {
|
||||
it("the hide button has aria-label", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
const hideBtn = screen.getByTitle("Hide legend");
|
||||
expect(hideBtn.getAttribute("aria-label")).toBe("Hide legend");
|
||||
});
|
||||
|
||||
it("the show legend pill has aria-label", () => {
|
||||
vi.mocked(useCanvasStore).mockImplementation(
|
||||
(sel) => sel({ templatePaletteOpen: false } as ReturnType<typeof useCanvasStore.getState>)
|
||||
);
|
||||
render(<Legend />);
|
||||
fireEvent.click(screen.getByTitle("Hide legend"));
|
||||
const pill = screen.getByTitle("Show legend");
|
||||
expect(pill.getAttribute("aria-label")).toBe("Show legend");
|
||||
});
|
||||
});
|
||||
@ -1,69 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for MissingKeysModal's providerIdForModel helper.
|
||||
*
|
||||
* Covers: model match, no match, empty modelId, whitespace-only modelId,
|
||||
* model with no required_env, models undefined, single vs multiple env vars,
|
||||
* stable sort order for env var ordering.
|
||||
*/
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { providerIdForModel } from "../MissingKeysModal";
|
||||
|
||||
describe("providerIdForModel — match behavior", () => {
|
||||
it("returns sorted-joined env vars when model is found", () => {
|
||||
const models = [
|
||||
{ id: "claude-3-5-sonnet", name: "Claude 3.5 Sonnet", required_env: ["ANTHROPIC_API_KEY"] },
|
||||
];
|
||||
expect(providerIdForModel("claude-3-5-sonnet", models)).toBe("ANTHROPIC_API_KEY");
|
||||
});
|
||||
|
||||
it("returns null when model is not found", () => {
|
||||
const models = [
|
||||
{ id: "claude-3-5-sonnet", name: "Claude 3.5 Sonnet", required_env: ["ANTHROPIC_API_KEY"] },
|
||||
];
|
||||
expect(providerIdForModel("unknown-model", models)).toBeNull();
|
||||
});
|
||||
|
||||
it("returns null when models is undefined", () => {
|
||||
expect(providerIdForModel("claude-3-5-sonnet", undefined)).toBeNull();
|
||||
});
|
||||
|
||||
it("returns null when modelId is empty string", () => {
|
||||
const models = [{ id: "claude", name: "Claude", required_env: ["KEY"] }];
|
||||
expect(providerIdForModel("", models)).toBeNull();
|
||||
});
|
||||
|
||||
it("returns null when modelId is whitespace-only", () => {
|
||||
const models = [{ id: "claude", name: "Claude", required_env: ["KEY"] }];
|
||||
expect(providerIdForModel(" ", models)).toBeNull();
|
||||
});
|
||||
|
||||
it("trims whitespace from modelId before matching", () => {
|
||||
const models = [{ id: "claude", name: "Claude", required_env: ["KEY"] }];
|
||||
expect(providerIdForModel(" claude ", models)).toBe("KEY");
|
||||
});
|
||||
});
|
||||
|
||||
describe("providerIdForModel — required_env variations", () => {
|
||||
it("returns null when model has no required_env", () => {
|
||||
const models = [{ id: "local-model", name: "Local Model", required_env: [] }];
|
||||
expect(providerIdForModel("local-model", models)).toBeNull();
|
||||
});
|
||||
|
||||
it("returns null when model.required_env is undefined", () => {
|
||||
const models = [{ id: "local-model", name: "Local Model" }] as Array<{
|
||||
id: string;
|
||||
name: string;
|
||||
required_env?: string[];
|
||||
}>;
|
||||
expect(providerIdForModel("local-model", models)).toBeNull();
|
||||
});
|
||||
|
||||
it("sorts and joins multiple required_env alphabetically", () => {
|
||||
const models = [
|
||||
{ id: "openrouter", name: "OpenRouter", required_env: ["OPENAI_API_KEY", "ANTHROPIC_API_KEY"] },
|
||||
];
|
||||
// Expected: alphabetically sorted = ANTHROPIC_API_KEY|OPENAI_API_KEY
|
||||
expect(providerIdForModel("openrouter", models)).toBe("ANTHROPIC_API_KEY|OPENAI_API_KEY");
|
||||
});
|
||||
});
|
||||
@ -1,174 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for OnboardingWizard component.
|
||||
*
|
||||
* Covers: renders only when not dismissed, renders 4 steps, dismiss
|
||||
* button, localStorage persistence, progress bar width, step navigation,
|
||||
* auto-advance from welcome→api-key on nodes change, aria-live region.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { OnboardingWizard } from "../OnboardingWizard";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
|
||||
const mockStoreState = {
|
||||
nodes: [] as Array<{ id: string; data: Record<string, unknown> }>,
|
||||
selectedNodeId: null as string | null,
|
||||
panelTab: "chat" as string,
|
||||
agentMessages: {} as Record<string, unknown[]>,
|
||||
setPanelTab: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock("@/store/canvas", () => ({
|
||||
useCanvasStore: Object.assign(
|
||||
(sel: (s: typeof mockStoreState) => unknown) => sel(mockStoreState),
|
||||
{ getState: () => mockStoreState },
|
||||
),
|
||||
}));
|
||||
|
||||
const STORAGE_KEY = "molecule-onboarding-complete";
|
||||
|
||||
const localStorageMock = (() => {
|
||||
let store: Record<string, string> = {};
|
||||
return {
|
||||
getItem: vi.fn((key: string): string | null => store[key] ?? null),
|
||||
setItem: vi.fn((key: string, value: string) => { store[key] = value; }),
|
||||
removeItem: vi.fn((key: string) => { delete store[key]; }),
|
||||
clear: () => { store = {}; },
|
||||
getStore: () => store,
|
||||
};
|
||||
})();
|
||||
Object.defineProperty(window, "localStorage", { value: localStorageMock });
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
localStorageMock.clear();
|
||||
vi.clearAllMocks();
|
||||
// Reset mutable store properties (mockStoreState is const, so mutate fields)
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.selectedNodeId = null;
|
||||
mockStoreState.panelTab = "chat";
|
||||
mockStoreState.agentMessages = {};
|
||||
mockStoreState.setPanelTab = vi.fn();
|
||||
});
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("OnboardingWizard — visibility", () => {
|
||||
it("renders nothing when localStorage has the complete flag", () => {
|
||||
localStorageMock.getItem.mockReturnValueOnce("true");
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.queryByRole("complementary")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders the wizard for first-time users (no localStorage flag)", () => {
|
||||
localStorageMock.getItem.mockReturnValueOnce(null);
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.getByRole("complementary", { name: "Onboarding guide" })).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("OnboardingWizard — steps", () => {
|
||||
beforeEach(() => {
|
||||
localStorageMock.getItem.mockReturnValue(null);
|
||||
});
|
||||
|
||||
it("renders step 1 'Welcome to Molecule AI' on first paint", () => {
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.getByText("Welcome to Molecule AI")).toBeTruthy();
|
||||
expect(screen.getByText("Step 1 of 4")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the 'Skip guide' button", () => {
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.getByRole("button", { name: "Skip onboarding guide" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the progress bar", () => {
|
||||
render(<OnboardingWizard />);
|
||||
// Progress bar is inside a div
|
||||
const bar = document.body.querySelector(".h-full.bg-gradient-to-r");
|
||||
expect(bar).toBeTruthy();
|
||||
// Step 1 should be 25% wide
|
||||
expect(bar?.getAttribute("style")).toContain("25%");
|
||||
});
|
||||
|
||||
it("advances to step 2 'Set your API key' when Next is clicked", () => {
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.getByText("Welcome to Molecule AI")).toBeTruthy();
|
||||
fireEvent.click(screen.getByRole("button", { name: "Next" }));
|
||||
expect(screen.getByText("Set your API key")).toBeTruthy();
|
||||
expect(screen.getByText("Step 2 of 4")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("advances to step 3 'Send your first message' when Next is clicked twice", () => {
|
||||
render(<OnboardingWizard />);
|
||||
fireEvent.click(screen.getByRole("button", { name: "Next" }));
|
||||
fireEvent.click(screen.getByRole("button", { name: "Next" }));
|
||||
expect(screen.getByText("Send your first message")).toBeTruthy();
|
||||
expect(screen.getByText("Step 3 of 4")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows 'Get Started' button on the last step", () => {
|
||||
render(<OnboardingWizard />);
|
||||
// Navigate to done step
|
||||
fireEvent.click(screen.getByRole("button", { name: "Next" }));
|
||||
fireEvent.click(screen.getByRole("button", { name: "Next" }));
|
||||
fireEvent.click(screen.getByRole("button", { name: "Next" }));
|
||||
expect(screen.getByText("You're all set!")).toBeTruthy();
|
||||
expect(screen.getByRole("button", { name: "Get Started" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("dismisses the wizard when 'Skip guide' is clicked", () => {
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.getByRole("complementary")).toBeTruthy();
|
||||
fireEvent.click(screen.getByRole("button", { name: "Skip onboarding guide" }));
|
||||
expect(screen.queryByRole("complementary")).toBeNull();
|
||||
});
|
||||
|
||||
it("persists the dismissed state to localStorage when dismissed", () => {
|
||||
render(<OnboardingWizard />);
|
||||
fireEvent.click(screen.getByRole("button", { name: "Skip onboarding guide" }));
|
||||
expect(localStorageMock.setItem).toHaveBeenCalledWith(STORAGE_KEY, "true");
|
||||
});
|
||||
});
|
||||
|
||||
describe("OnboardingWizard — auto-advance", () => {
|
||||
beforeEach(() => {
|
||||
localStorageMock.getItem.mockReturnValue(null);
|
||||
});
|
||||
|
||||
it("auto-advances from welcome to api-key when nodes appear", async () => {
|
||||
const { unmount } = render(<OnboardingWizard />);
|
||||
expect(screen.getByText("Welcome to Molecule AI")).toBeTruthy();
|
||||
|
||||
// Simulate a node being added to the store and re-render
|
||||
mockStoreState.nodes = [{ id: "ws-1", data: {} }];
|
||||
render(<OnboardingWizard />);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByText("Welcome to Molecule AI")).toBeNull();
|
||||
});
|
||||
expect(screen.getByText("Set your API key")).toBeTruthy();
|
||||
unmount();
|
||||
});
|
||||
});
|
||||
|
||||
describe("OnboardingWizard — accessibility", () => {
|
||||
beforeEach(() => {
|
||||
localStorageMock.getItem.mockReturnValue(null);
|
||||
});
|
||||
|
||||
it("has aria-live='polite' region for step announcements", () => {
|
||||
render(<OnboardingWizard />);
|
||||
const liveRegion = document.body.querySelector('[aria-live="polite"]');
|
||||
expect(liveRegion).toBeTruthy();
|
||||
expect(liveRegion?.textContent).toMatch(/onboarding step 1/i);
|
||||
});
|
||||
|
||||
it("has role=complementary with aria-label", () => {
|
||||
render(<OnboardingWizard />);
|
||||
expect(screen.getByRole("complementary", { name: "Onboarding guide" })).toBeTruthy();
|
||||
});
|
||||
});
|
||||
@ -1,255 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for PurchaseSuccessModal component.
|
||||
*
|
||||
* Covers: no render when no URL params, renders with ?purchase_success=1,
|
||||
* portal rendering, item name from &item=, auto-dismiss after 5s,
|
||||
* manual dismiss, backdrop click close, Escape key close, URL stripping,
|
||||
* focus management.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { PurchaseSuccessModal } from "../PurchaseSuccessModal";
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
function pushUrl(url: string) {
|
||||
window.history.pushState({}, "", url);
|
||||
}
|
||||
function replaceUrl(url: string) {
|
||||
window.history.replaceState({}, "", url);
|
||||
}
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("PurchaseSuccessModal — render conditions", () => {
|
||||
beforeEach(() => {
|
||||
replaceUrl("http://localhost/");
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("renders nothing when URL has no purchase_success param", () => {
|
||||
replaceUrl("http://localhost/");
|
||||
render(<PurchaseSuccessModal />);
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders nothing on a plain URL", () => {
|
||||
replaceUrl("http://localhost/dashboard?foo=bar");
|
||||
render(<PurchaseSuccessModal />);
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders the dialog when ?purchase_success=1 is present", async () => {
|
||||
replaceUrl("http://localhost/?purchase_success=1");
|
||||
render(<PurchaseSuccessModal />);
|
||||
// useEffect fires after mount
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.queryByRole("dialog")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the dialog when ?purchase_success=true is present", async () => {
|
||||
replaceUrl("http://localhost/?purchase_success=true");
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.queryByRole("dialog")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders a portal attached to document.body", async () => {
|
||||
replaceUrl("http://localhost/?purchase_success=1");
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
const dialog = document.body.querySelector('[role="dialog"]');
|
||||
expect(dialog).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows the item name when &item= is present", async () => {
|
||||
replaceUrl("http://localhost/?purchase_success=1&item=MyAgent");
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByText("MyAgent")).toBeTruthy();
|
||||
expect(screen.getByText("Purchase successful")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows 'Your new agent' when no item param is present", async () => {
|
||||
replaceUrl("http://localhost/?purchase_success=1");
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByText("Your new agent")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("decodes URI-encoded item names", async () => {
|
||||
replaceUrl("http://localhost/?purchase_success=1&item=Claude%20Code%20Agent");
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByText("Claude Code Agent")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("PurchaseSuccessModal — dismiss", () => {
|
||||
beforeEach(() => {
|
||||
replaceUrl("http://localhost/?purchase_success=1&item=TestItem");
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("closes the dialog when the close button is clicked", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
fireEvent.click(screen.getByRole("button", { name: "Close" }));
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(10);
|
||||
});
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("closes the dialog when the backdrop is clicked", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
// Click the backdrop (the full-screen overlay div)
|
||||
const backdrop = document.body.querySelector('[aria-hidden="true"]');
|
||||
if (backdrop) fireEvent.click(backdrop);
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(10);
|
||||
});
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("closes on Escape key", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
fireEvent.keyDown(window, { key: "Escape" });
|
||||
await act(async () => {
|
||||
vi.advanceTimersByTime(10);
|
||||
});
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("auto-dismisses after 5 seconds", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
|
||||
// Advance 5 seconds
|
||||
act(() => { vi.advanceTimersByTime(5000); });
|
||||
await act(async () => { /* flush */ });
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("does not auto-dismiss before 5 seconds", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
|
||||
act(() => { vi.advanceTimersByTime(4900); });
|
||||
await act(async () => { /* flush */ });
|
||||
expect(screen.queryByRole("dialog")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("PurchaseSuccessModal — URL stripping", () => {
|
||||
beforeEach(() => {
|
||||
replaceUrl("http://localhost/?purchase_success=1&item=TestItem");
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("strips purchase_success and item params from the URL on mount", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
const url = new URL(window.location.href);
|
||||
expect(url.searchParams.get("purchase_success")).toBeNull();
|
||||
expect(url.searchParams.get("item")).toBeNull();
|
||||
});
|
||||
|
||||
it("uses replaceState (not pushState) so back-button does not re-trigger", async () => {
|
||||
const replaceSpy = vi.spyOn(window.history, "replaceState");
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
expect(replaceSpy).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("PurchaseSuccessModal — accessibility", () => {
|
||||
beforeEach(() => {
|
||||
replaceUrl("http://localhost/?purchase_success=1&item=TestItem");
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("has aria-modal=true on the dialog", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
const dialog = screen.getByRole("dialog");
|
||||
expect(dialog.getAttribute("aria-modal")).toBe("true");
|
||||
});
|
||||
|
||||
it("has aria-labelledby pointing to the title", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
});
|
||||
const dialog = screen.getByRole("dialog");
|
||||
const labelledby = dialog.getAttribute("aria-labelledby");
|
||||
expect(labelledby).toBeTruthy();
|
||||
expect(document.getElementById(labelledby!)).toBeTruthy();
|
||||
expect(document.getElementById(labelledby!)?.textContent).toMatch(/purchase successful/i);
|
||||
});
|
||||
|
||||
it("moves focus to the close button on open", async () => {
|
||||
render(<PurchaseSuccessModal />);
|
||||
await act(async () => {
|
||||
// Two rAFs for focus: one from the effect, one from the RAF wrapper
|
||||
await new Promise((r) => requestAnimationFrame(() => requestAnimationFrame(r)));
|
||||
});
|
||||
expect(document.activeElement?.textContent).toMatch(/close/i);
|
||||
});
|
||||
});
|
||||
@ -1,64 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for RevealToggle component.
|
||||
*
|
||||
* Covers: renders eye icon when hidden, eye-off when revealed,
|
||||
* aria-label, title text, onToggle callback.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent } from "@testing-library/react";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { RevealToggle } from "../ui/RevealToggle";
|
||||
|
||||
describe("RevealToggle — render", () => {
|
||||
it("renders a button element", () => {
|
||||
render(<RevealToggle revealed={false} onToggle={vi.fn()} />);
|
||||
expect(screen.getByRole("button")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("uses the provided aria-label", () => {
|
||||
render(<RevealToggle revealed={false} onToggle={vi.fn()} label="Show password" />);
|
||||
expect(screen.getByRole("button").getAttribute("aria-label")).toBe("Show password");
|
||||
});
|
||||
|
||||
it("uses default aria-label when label prop is omitted", () => {
|
||||
render(<RevealToggle revealed={false} onToggle={vi.fn()} />);
|
||||
expect(screen.getByRole("button").getAttribute("aria-label")).toBe("Toggle visibility");
|
||||
});
|
||||
|
||||
it("has title 'Show value' when revealed=false", () => {
|
||||
render(<RevealToggle revealed={false} onToggle={vi.fn()} />);
|
||||
expect(screen.getByRole("button").getAttribute("title")).toBe("Show value");
|
||||
});
|
||||
|
||||
it("has title 'Hide value' when revealed=true", () => {
|
||||
render(<RevealToggle revealed={true} onToggle={vi.fn()} />);
|
||||
expect(screen.getByRole("button").getAttribute("title")).toBe("Hide value");
|
||||
});
|
||||
});
|
||||
|
||||
describe("RevealToggle — interaction", () => {
|
||||
it("calls onToggle when clicked", () => {
|
||||
const onToggle = vi.fn();
|
||||
render(<RevealToggle revealed={false} onToggle={onToggle} />);
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(onToggle).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("renders EyeIcon (eye SVG) when revealed=false", () => {
|
||||
const { container } = render(<RevealToggle revealed={false} onToggle={vi.fn()} />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg).toBeTruthy();
|
||||
// Eye icon has a circle path for the eye
|
||||
expect(container.innerHTML).toContain("M1 12s4-8 11-8");
|
||||
});
|
||||
|
||||
it("renders EyeOffIcon (eye-off SVG) when revealed=true", () => {
|
||||
const { container } = render(<RevealToggle revealed={true} onToggle={vi.fn()} />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg).toBeTruthy();
|
||||
// Eye-off has a diagonal line
|
||||
expect(container.innerHTML).toContain("x1");
|
||||
expect(container.innerHTML).toContain("y2");
|
||||
});
|
||||
});
|
||||
@ -1,351 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for SearchDialog component.
|
||||
*
|
||||
* Covers: renders only when open, Cmd+K/Ctrl+K shortcut, Escape close,
|
||||
* focus management, text filtering (name/role/status), arrow-key
|
||||
* navigation, Enter to select, footer count, aria attributes.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { SearchDialog } from "../SearchDialog";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
|
||||
// ─── Mock store ──────────────────────────────────────────────────────────────
|
||||
|
||||
const mockStoreState = {
|
||||
searchOpen: false,
|
||||
setSearchOpen: vi.fn((open: boolean) => {
|
||||
mockStoreState.searchOpen = open;
|
||||
}),
|
||||
nodes: [] as Array<{
|
||||
id: string;
|
||||
data: {
|
||||
name: string;
|
||||
status: string;
|
||||
tier: number;
|
||||
role: string;
|
||||
parentId?: string | null;
|
||||
};
|
||||
}>,
|
||||
selectNode: vi.fn(),
|
||||
setPanelTab: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock("@/store/canvas", () => ({
|
||||
useCanvasStore: Object.assign(
|
||||
(sel: (s: typeof mockStoreState) => unknown) => sel(mockStoreState),
|
||||
{ getState: () => mockStoreState },
|
||||
),
|
||||
}));
|
||||
|
||||
const STORAGE_KEY = "molecule-onboarding-complete";
|
||||
|
||||
// ─── Helpers ─────────────────────────────────────────────────────────────────
|
||||
|
||||
function dispatchKeydown(key: string, meta = false, ctrl = false) {
|
||||
fireEvent.keyDown(window, {
|
||||
key,
|
||||
metaKey: meta,
|
||||
ctrlKey: ctrl,
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("SearchDialog — visibility", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("does not render when searchOpen is false", () => {
|
||||
mockStoreState.searchOpen = false;
|
||||
render(<SearchDialog />);
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("renders the dialog when searchOpen is true", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByRole("dialog", { name: "Search workspaces" })).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("SearchDialog — keyboard shortcuts", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("opens the dialog when Cmd+K is pressed", () => {
|
||||
render(<SearchDialog />);
|
||||
dispatchKeydown("k", true, false);
|
||||
expect(mockStoreState.setSearchOpen).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it("opens the dialog when Ctrl+K is pressed", () => {
|
||||
render(<SearchDialog />);
|
||||
dispatchKeydown("k", false, true);
|
||||
expect(mockStoreState.setSearchOpen).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it("clears the query when Cmd+K opens the dialog", () => {
|
||||
render(<SearchDialog />);
|
||||
dispatchKeydown("k", true, false);
|
||||
const input = screen.getByRole("combobox");
|
||||
expect(input.getAttribute("value") ?? "").toBe("");
|
||||
});
|
||||
|
||||
it("closes the dialog when Escape is pressed while open", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
dispatchKeydown("Escape");
|
||||
expect(mockStoreState.setSearchOpen).toHaveBeenCalledWith(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("SearchDialog — focus", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("focuses the input when the dialog opens", async () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
await act(async () => {
|
||||
await new Promise((r) => requestAnimationFrame(() => requestAnimationFrame(r)));
|
||||
});
|
||||
expect(document.activeElement?.getAttribute("role")).toBe("combobox");
|
||||
});
|
||||
|
||||
it("input has the combobox role", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByRole("combobox")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("SearchDialog — filtering", () => {
|
||||
beforeEach(() => {
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", data: { name: "Alice", status: "online", tier: 4, role: "assistant" } },
|
||||
{ id: "n2", data: { name: "Bob", status: "offline", tier: 2, role: "analyst" } },
|
||||
{ id: "n3", data: { name: "Carol", status: "online", tier: 3, role: "writer" } },
|
||||
];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("shows all workspaces when query is empty", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByText("Alice")).toBeTruthy();
|
||||
expect(screen.getByText("Bob")).toBeTruthy();
|
||||
expect(screen.getByText("Carol")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("filters workspaces by name (case-insensitive)", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "alice" } });
|
||||
expect(screen.getByText("Alice")).toBeTruthy();
|
||||
expect(screen.queryByText("Bob")).toBeNull();
|
||||
expect(screen.queryByText("Carol")).toBeNull();
|
||||
});
|
||||
|
||||
it("filters workspaces by role (case-insensitive)", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "writer" } });
|
||||
expect(screen.queryByText("Alice")).toBeNull();
|
||||
expect(screen.queryByText("Bob")).toBeNull();
|
||||
expect(screen.getByText("Carol")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("filters workspaces by status", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "online" } });
|
||||
expect(screen.getByText("Alice")).toBeTruthy();
|
||||
expect(screen.queryByText("Bob")).toBeNull();
|
||||
expect(screen.getByText("Carol")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows 'No workspaces match' when filtering returns nothing", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "xyz123" } });
|
||||
expect(screen.getByText("No workspaces match")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows 'No workspaces yet' when canvas is empty", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
mockStoreState.nodes = [];
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByText("No workspaces yet")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("SearchDialog — listbox navigation", () => {
|
||||
beforeEach(() => {
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", data: { name: "Alice", status: "online", tier: 4, role: "assistant" } },
|
||||
{ id: "n2", data: { name: "Bob", status: "offline", tier: 2, role: "analyst" } },
|
||||
{ id: "n3", data: { name: "Carol", status: "online", tier: 3, role: "writer" } },
|
||||
];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("highlights the first result when query is typed", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "a" } });
|
||||
// First result (Alice) should be highlighted
|
||||
const options = screen.getAllByRole("option");
|
||||
expect(options[0].getAttribute("aria-selected")).toBe("true");
|
||||
});
|
||||
|
||||
it("ArrowDown moves highlight to the next item", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "a" } }); // All 3 match
|
||||
fireEvent.keyDown(input, { key: "ArrowDown" });
|
||||
const options = screen.getAllByRole("option");
|
||||
expect(options[0].getAttribute("aria-selected")).toBe("false");
|
||||
expect(options[1].getAttribute("aria-selected")).toBe("true");
|
||||
});
|
||||
|
||||
it("ArrowUp moves highlight to the previous item", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "a" } }); // All 3 match
|
||||
fireEvent.keyDown(input, { key: "ArrowDown" });
|
||||
fireEvent.keyDown(input, { key: "ArrowUp" });
|
||||
const options = screen.getAllByRole("option");
|
||||
expect(options[0].getAttribute("aria-selected")).toBe("true");
|
||||
expect(options[1].getAttribute("aria-selected")).toBe("false");
|
||||
});
|
||||
|
||||
it("Enter selects the highlighted workspace", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const input = screen.getByRole("combobox");
|
||||
fireEvent.change(input, { target: { value: "a" } }); // All 3 match
|
||||
fireEvent.keyDown(input, { key: "ArrowDown" }); // Highlight Bob
|
||||
fireEvent.keyDown(input, { key: "Enter" });
|
||||
expect(mockStoreState.selectNode).toHaveBeenCalledWith("n1"); // Alice
|
||||
expect(mockStoreState.setPanelTab).toHaveBeenCalledWith("details");
|
||||
expect(mockStoreState.setSearchOpen).toHaveBeenCalledWith(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("SearchDialog — aria attributes", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("dialog has role=dialog and aria-modal=true", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
render(<SearchDialog />);
|
||||
const dialog = screen.getByRole("dialog");
|
||||
expect(dialog.getAttribute("aria-modal")).toBe("true");
|
||||
expect(dialog.getAttribute("aria-label")).toBe("Search workspaces");
|
||||
});
|
||||
|
||||
it("results container has role=listbox", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", data: { name: "Alice", status: "online", tier: 4, role: "assistant" } },
|
||||
];
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByRole("listbox")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("each result has role=option", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", data: { name: "Alice", status: "online", tier: 4, role: "assistant" } },
|
||||
];
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getAllByRole("option").length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("SearchDialog — footer", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
mockStoreState.searchOpen = false;
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.setSearchOpen.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.setPanelTab.mockClear();
|
||||
});
|
||||
|
||||
it("footer shows singular 'workspace' when count is 1", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", data: { name: "Alice", status: "online", tier: 4, role: "assistant" } },
|
||||
];
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByText("1 workspace")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("footer shows plural 'workspaces' when count > 1", () => {
|
||||
mockStoreState.searchOpen = true;
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", data: { name: "Alice", status: "online", tier: 4, role: "assistant" } },
|
||||
{ id: "n2", data: { name: "Bob", status: "offline", tier: 2, role: "analyst" } },
|
||||
];
|
||||
render(<SearchDialog />);
|
||||
expect(screen.getByText("2 workspaces")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
@ -1,173 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for SettingsButton component.
|
||||
*
|
||||
* Covers: renders gear button, aria attributes, toggle opens/closes panel,
|
||||
* active class when panel open, tooltip content (Mac vs non-Mac),
|
||||
* forwardRef button element.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { SettingsButton } from "../settings/SettingsButton";
|
||||
import { useSecretsStore } from "@/stores/secrets-store";
|
||||
|
||||
// ─── Mock Radix Tooltip ────────────────────────────────────────────────────────
|
||||
|
||||
vi.mock("@radix-ui/react-tooltip", () => ({
|
||||
Provider: ({ children }: { children: React.ReactNode }) => <>{children}</>,
|
||||
Root: ({ children }: { children: React.ReactNode }) => <>{children}</>,
|
||||
Trigger: ({ children }: { children: React.ReactNode }) => <>{children}</>,
|
||||
Portal: ({ children }: { children: React.ReactNode }) => <>{children}</>,
|
||||
Content: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
|
||||
Arrow: () => null,
|
||||
}));
|
||||
|
||||
// ─── Mock secrets store ────────────────────────────────────────────────────────
|
||||
|
||||
const mockSecretsState = {
|
||||
isPanelOpen: false,
|
||||
openPanel: vi.fn(),
|
||||
closePanel: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock("@/stores/secrets-store", () => ({
|
||||
useSecretsStore: Object.assign(
|
||||
(sel: (s: typeof mockSecretsState) => unknown) => sel(mockSecretsState),
|
||||
{ getState: () => mockSecretsState },
|
||||
),
|
||||
}));
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
function getMacUserAgent() {
|
||||
return vi.spyOn(navigator, "userAgent", "get").mockReturnValue(
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36"
|
||||
);
|
||||
}
|
||||
|
||||
// ─── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("SettingsButton — render", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.restoreAllMocks();
|
||||
vi.clearAllMocks();
|
||||
mockSecretsState.isPanelOpen = false;
|
||||
mockSecretsState.openPanel.mockClear();
|
||||
mockSecretsState.closePanel.mockClear();
|
||||
});
|
||||
|
||||
it("renders a button with aria-label=Settings", () => {
|
||||
render(<SettingsButton />);
|
||||
expect(screen.getByRole("button", { name: "Settings" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("has aria-expanded=false when panel is closed", () => {
|
||||
render(<SettingsButton />);
|
||||
expect(screen.getByRole("button").getAttribute("aria-expanded")).toBe("false");
|
||||
});
|
||||
|
||||
it("has aria-expanded=true when panel is open", () => {
|
||||
mockSecretsState.isPanelOpen = true;
|
||||
render(<SettingsButton />);
|
||||
expect(screen.getByRole("button").getAttribute("aria-expanded")).toBe("true");
|
||||
});
|
||||
|
||||
it("renders with active class when panel is open", () => {
|
||||
mockSecretsState.isPanelOpen = true;
|
||||
render(<SettingsButton />);
|
||||
const btn = screen.getByRole("button");
|
||||
expect(btn.className).toContain("settings-button--active");
|
||||
});
|
||||
|
||||
it("does not render active class when panel is closed", () => {
|
||||
render(<SettingsButton />);
|
||||
const btn = screen.getByRole("button");
|
||||
expect(btn.className).not.toContain("settings-button--active");
|
||||
});
|
||||
});
|
||||
|
||||
describe("SettingsButton — toggle", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.restoreAllMocks();
|
||||
vi.clearAllMocks();
|
||||
mockSecretsState.isPanelOpen = false;
|
||||
mockSecretsState.openPanel.mockClear();
|
||||
mockSecretsState.closePanel.mockClear();
|
||||
});
|
||||
|
||||
it("calls openPanel when panel is closed and button is clicked", () => {
|
||||
render(<SettingsButton />);
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(mockSecretsState.openPanel).toHaveBeenCalledTimes(1);
|
||||
expect(mockSecretsState.closePanel).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("calls closePanel when panel is open and button is clicked", () => {
|
||||
mockSecretsState.isPanelOpen = true;
|
||||
render(<SettingsButton />);
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
expect(mockSecretsState.closePanel).toHaveBeenCalledTimes(1);
|
||||
expect(mockSecretsState.openPanel).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("SettingsButton — tooltip", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
vi.clearAllMocks();
|
||||
mockSecretsState.isPanelOpen = false;
|
||||
mockSecretsState.openPanel.mockClear();
|
||||
mockSecretsState.closePanel.mockClear();
|
||||
});
|
||||
|
||||
it("shows tooltip with ⌘, on Mac", () => {
|
||||
getMacUserAgent();
|
||||
render(<SettingsButton />);
|
||||
// Advance timers to trigger Tooltip.Provider's delay (300ms)
|
||||
act(() => { vi.advanceTimersByTime(300); });
|
||||
// The Tooltip.Content renders via Portal — look for "Settings ⌘,"
|
||||
const content = document.body.querySelector("[data-radix-scroll-area-scrollbar-orientation]");
|
||||
// Tooltip content is rendered in a Portal (document.body)
|
||||
// The tooltip content should show "Settings ⌘," on Mac
|
||||
const portalContent = document.body.querySelector("div:last-child");
|
||||
// Check if the gear icon button was rendered
|
||||
expect(screen.getByRole("button", { name: "Settings" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows tooltip with Ctrl+, on non-Mac", () => {
|
||||
vi.spyOn(navigator, "userAgent", "get").mockReturnValue(
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
||||
);
|
||||
render(<SettingsButton />);
|
||||
act(() => { vi.advanceTimersByTime(300); });
|
||||
// Tooltip should say "Settings Ctrl+,"
|
||||
// The gear button is rendered correctly
|
||||
expect(screen.getByRole("button", { name: "Settings" })).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("SettingsButton — forwardRef", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.restoreAllMocks();
|
||||
vi.clearAllMocks();
|
||||
mockSecretsState.isPanelOpen = false;
|
||||
mockSecretsState.openPanel.mockClear();
|
||||
mockSecretsState.closePanel.mockClear();
|
||||
});
|
||||
|
||||
it("forwards the ref to the button element", () => {
|
||||
const ref = React.createRef<HTMLButtonElement>();
|
||||
render(<SettingsButton ref={ref} />);
|
||||
expect(ref.current).toBe(screen.getByRole("button"));
|
||||
});
|
||||
});
|
||||
@ -1,58 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for Spinner component.
|
||||
*
|
||||
* Covers: sm/md/lg size classes, aria-hidden, motion-safe animate-spin class.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { Spinner } from "../Spinner";
|
||||
|
||||
describe("Spinner — size variants", () => {
|
||||
it("renders with sm size class", () => {
|
||||
const { container } = render(<Spinner size="sm" />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg).toBeTruthy();
|
||||
expect(svg?.className).toContain("w-3");
|
||||
expect(svg?.className).toContain("h-3");
|
||||
});
|
||||
|
||||
it("renders with md size class (default)", () => {
|
||||
const { container } = render(<Spinner size="md" />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg?.className).toContain("w-4");
|
||||
expect(svg?.className).toContain("h-4");
|
||||
});
|
||||
|
||||
it("renders with lg size class", () => {
|
||||
const { container } = render(<Spinner size="lg" />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg?.className).toContain("w-5");
|
||||
expect(svg?.className).toContain("h-5");
|
||||
});
|
||||
|
||||
it("defaults to md size when no size prop given", () => {
|
||||
const { container } = render(<Spinner />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg?.className).toContain("w-4");
|
||||
expect(svg?.className).toContain("h-4");
|
||||
});
|
||||
|
||||
it("has aria-hidden=true so screen readers skip it", () => {
|
||||
const { container } = render(<Spinner />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg?.getAttribute("aria-hidden")).toBe("true");
|
||||
});
|
||||
|
||||
it("includes the motion-safe:animate-spin class for CSS animation", () => {
|
||||
const { container } = render(<Spinner />);
|
||||
const svg = container.querySelector("svg");
|
||||
expect(svg?.className).toContain("motion-safe:animate-spin");
|
||||
});
|
||||
|
||||
it("renders exactly one SVG element", () => {
|
||||
const { container } = render(<Spinner />);
|
||||
expect(container.querySelectorAll("svg").length).toBe(1);
|
||||
});
|
||||
});
|
||||
@ -1,57 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for StatusBadge component.
|
||||
*
|
||||
* Covers: renders all three status variants, aria-label, role=status,
|
||||
* icon presence, className variants, no render when passed invalid status.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { StatusBadge } from "../ui/StatusBadge";
|
||||
|
||||
describe("StatusBadge — render", () => {
|
||||
it("renders verified status with ✓ icon", () => {
|
||||
render(<StatusBadge status="verified" />);
|
||||
const badge = screen.getByRole("status");
|
||||
expect(badge.textContent).toBe("✓");
|
||||
expect(badge.getAttribute("aria-label")).toBe("Connection status: verified");
|
||||
});
|
||||
|
||||
it("renders invalid status with ✗ icon", () => {
|
||||
render(<StatusBadge status="invalid" />);
|
||||
const badge = screen.getByRole("status");
|
||||
expect(badge.textContent).toBe("✗");
|
||||
expect(badge.getAttribute("aria-label")).toBe("Connection status: invalid");
|
||||
});
|
||||
|
||||
it("renders unverified status with ○ icon", () => {
|
||||
render(<StatusBadge status="unverified" />);
|
||||
const badge = screen.getByRole("status");
|
||||
expect(badge.textContent).toBe("○");
|
||||
expect(badge.getAttribute("aria-label")).toBe("Connection status: unverified");
|
||||
});
|
||||
|
||||
it("has role=status on the badge element", () => {
|
||||
render(<StatusBadge status="verified" />);
|
||||
expect(screen.getByRole("status")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("includes the config className on the rendered element", () => {
|
||||
render(<StatusBadge status="verified" />);
|
||||
const badge = screen.getByRole("status");
|
||||
expect(badge.className).toContain("status-badge--valid");
|
||||
});
|
||||
|
||||
it("includes status-badge--invalid class for invalid status", () => {
|
||||
render(<StatusBadge status="invalid" />);
|
||||
const badge = screen.getByRole("status");
|
||||
expect(badge.className).toContain("status-badge--invalid");
|
||||
});
|
||||
|
||||
it("includes status-badge--unverified class for unverified status", () => {
|
||||
render(<StatusBadge status="unverified" />);
|
||||
const badge = screen.getByRole("status");
|
||||
expect(badge.className).toContain("status-badge--unverified");
|
||||
});
|
||||
});
|
||||
@ -1,100 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for StatusDot — the small coloured indicator rendered inside
|
||||
* workspace cards to convey runtime status (online/offline/degraded/etc.).
|
||||
*
|
||||
* Coverage:
|
||||
* - Renders for every known status in STATUS_CONFIG
|
||||
* - Unknown status falls back to bg-zinc-500
|
||||
* - size prop (sm/md) applies the correct Tailwind dimension class
|
||||
* - aria-hidden="true" and role="img" for accessibility
|
||||
* - provisioning status carries motion-safe:animate-pulse for the pulsing effect
|
||||
* - glow class applied when STATUS_CONFIG declares one
|
||||
*/
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import React from "react";
|
||||
|
||||
import { StatusDot } from "../StatusDot";
|
||||
|
||||
describe("StatusDot — snapshot", () => {
|
||||
it("renders with online status", () => {
|
||||
render(<StatusDot status="online" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-emerald-400");
|
||||
expect(dot.className).toContain("shadow-emerald-400/50");
|
||||
expect(dot.getAttribute("aria-hidden")).toBe("true");
|
||||
});
|
||||
|
||||
it("renders with offline status", () => {
|
||||
render(<StatusDot status="offline" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-zinc-500");
|
||||
// offline has no glow
|
||||
expect(dot.className).not.toContain("shadow-");
|
||||
});
|
||||
|
||||
it("renders with degraded status", () => {
|
||||
render(<StatusDot status="degraded" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-amber-400");
|
||||
expect(dot.className).toContain("shadow-amber-400/50");
|
||||
});
|
||||
|
||||
it("renders with failed status", () => {
|
||||
render(<StatusDot status="failed" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-red-400");
|
||||
expect(dot.className).toContain("shadow-red-400/50");
|
||||
});
|
||||
|
||||
it("renders with paused status", () => {
|
||||
render(<StatusDot status="paused" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-indigo-400");
|
||||
});
|
||||
|
||||
it("renders with not_configured status", () => {
|
||||
render(<StatusDot status="not_configured" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-amber-300");
|
||||
expect(dot.className).toContain("shadow-amber-300/50");
|
||||
});
|
||||
|
||||
it("renders with provisioning status and pulsing animation", () => {
|
||||
render(<StatusDot status="provisioning" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-sky-400");
|
||||
expect(dot.className).toContain("motion-safe:animate-pulse");
|
||||
expect(dot.className).toContain("shadow-sky-400/50");
|
||||
});
|
||||
|
||||
it("falls back to bg-zinc-500 for unknown status", () => {
|
||||
render(<StatusDot status="alien_artifact" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("bg-zinc-500");
|
||||
});
|
||||
});
|
||||
|
||||
describe("StatusDot — size prop", () => {
|
||||
it("applies w-2 h-2 (sm, default)", () => {
|
||||
render(<StatusDot status="online" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("w-2");
|
||||
expect(dot.className).toContain("h-2");
|
||||
});
|
||||
|
||||
it("applies w-2.5 h-2.5 (md)", () => {
|
||||
render(<StatusDot status="online" size="md" />);
|
||||
const dot = screen.getByRole("img");
|
||||
expect(dot.className).toContain("w-2.5");
|
||||
expect(dot.className).toContain("h-2.5");
|
||||
});
|
||||
});
|
||||
|
||||
describe("StatusDot — accessibility", () => {
|
||||
it("is aria-hidden so it doesn't pollute the accessibility tree", () => {
|
||||
render(<StatusDot status="online" />);
|
||||
expect(screen.getByRole("img").getAttribute("aria-hidden")).toBe("true");
|
||||
});
|
||||
});
|
||||
@ -1,222 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for TermsGate component.
|
||||
*
|
||||
* Covers: loading → accepted (already agreed), loading → pending (show
|
||||
* modal), 401 → accepted (not signed in), error state, accept flow,
|
||||
* focus management (WCAG 2.4.3), and modal accessibility.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, waitFor, act } from "@testing-library/react";
|
||||
import { afterEach, describe, expect, it, vi, beforeEach } from "vitest";
|
||||
import { TermsGate } from "../TermsGate";
|
||||
|
||||
// PLATFORM_URL is imported from @/lib/api; we mock it via module mock
|
||||
vi.mock("@/lib/api", () => ({
|
||||
PLATFORM_URL: "https://app.example.com",
|
||||
}));
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
function mockFetch(res: Response) {
|
||||
vi.spyOn(global, "fetch").mockResolvedValueOnce(res);
|
||||
}
|
||||
|
||||
async function resolveFetch(res: Response) {
|
||||
await act(async () => {
|
||||
vi.spyOn(global, "fetch").mockResolvedValueOnce(res);
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("TermsGate — loading → accepted", () => {
|
||||
it("renders children immediately (loading state)", () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: true }), { status: 200 }));
|
||||
render(
|
||||
<TermsGate>
|
||||
<div data-testid="children">App content</div>
|
||||
</TermsGate>
|
||||
);
|
||||
// Children are always rendered (TermsGate does not hide them)
|
||||
expect(screen.getByTestId("children")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows no dialog when server returns accepted=true", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: true }), { status: 200 }));
|
||||
render(
|
||||
<TermsGate>
|
||||
<div data-testid="children">App content</div>
|
||||
</TermsGate>
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
it("shows no dialog when server returns 401 (not signed in)", async () => {
|
||||
mockFetch(new Response(null, { status: 401 }));
|
||||
render(
|
||||
<TermsGate>
|
||||
<div data-testid="children">App content</div>
|
||||
</TermsGate>
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("TermsGate — pending state → modal", () => {
|
||||
it("shows the terms dialog when server returns accepted=false", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(
|
||||
<TermsGate>
|
||||
<div data-testid="children">App content</div>
|
||||
</TermsGate>
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("dialog has aria-modal=true and correct labelling", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(
|
||||
<TermsGate>
|
||||
<div>App content</div>
|
||||
</TermsGate>
|
||||
);
|
||||
const dialog = await waitFor(() => screen.getByRole("dialog"));
|
||||
expect(dialog.getAttribute("aria-modal")).toBe("true");
|
||||
expect(dialog.getAttribute("aria-labelledby")).toBeTruthy();
|
||||
const title = document.getElementById(dialog.getAttribute("aria-labelledby")!);
|
||||
expect(title?.textContent).toMatch(/terms/i);
|
||||
});
|
||||
|
||||
it("dialog body contains the terms text", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => screen.getByRole("dialog"));
|
||||
expect(screen.getByText(/Terms of Service/i)).toBeTruthy();
|
||||
expect(screen.getByText(/Privacy Policy/i)).toBeTruthy();
|
||||
expect(screen.getByText(/AWS us-east-2/i)).toBeTruthy();
|
||||
});
|
||||
|
||||
it("the I agree button is present", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => screen.getByRole("dialog"));
|
||||
expect(screen.getByRole("button", { name: /i agree/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("links to terms and privacy policy have correct hrefs", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => screen.getByRole("dialog"));
|
||||
const links = screen.getAllByRole("link");
|
||||
const hrefs = links.map((l) => l.getAttribute("href"));
|
||||
expect(hrefs).toContain("/legal/terms");
|
||||
expect(hrefs).toContain("/legal/privacy");
|
||||
});
|
||||
});
|
||||
|
||||
describe("TermsGate — focus management (WCAG 2.4.3)", () => {
|
||||
it("moves focus to the I agree button when modal opens", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
const dialog = await waitFor(() => screen.getByRole("dialog"));
|
||||
// Focus is moved via requestAnimationFrame — wait a tick
|
||||
await act(async () => {
|
||||
await new Promise((r) => requestAnimationFrame(() => requestAnimationFrame(r)));
|
||||
});
|
||||
const agreeBtn = screen.getByRole("button", { name: /i agree/i });
|
||||
expect(document.activeElement).toBe(agreeBtn);
|
||||
});
|
||||
});
|
||||
|
||||
describe("TermsGate — accept flow", () => {
|
||||
it("calls POST /cp/auth/accept-terms and closes dialog on success", async () => {
|
||||
// First: terms-status → pending
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
// Second: accept-terms → 200
|
||||
const postMock = mockFetch(new Response(null, { status: 200 }));
|
||||
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => screen.getByRole("dialog"));
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /i agree/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
// Check POST was called
|
||||
const calls = vi.mocked(global.fetch).mock.calls;
|
||||
expect(calls.some(
|
||||
([url, opts]) =>
|
||||
(url as string).includes("/accept-terms") &&
|
||||
(opts as RequestInit).method === "POST"
|
||||
)).toBe(true);
|
||||
});
|
||||
|
||||
it("shows error message and keeps modal open when accept fails", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
mockFetch(new Response("Internal Server Error", { status: 500 }));
|
||||
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => screen.getByRole("dialog"));
|
||||
|
||||
fireEvent.click(screen.getByRole("button", { name: /i agree/i }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
});
|
||||
// Dialog is still open
|
||||
expect(screen.getByRole("dialog")).toBeTruthy();
|
||||
});
|
||||
|
||||
it.skip("disables the button while submitting (requires fake-timers around fireEvent.click)", async () => {
|
||||
// This test requires vi.useFakeTimers() + act(() => { fireEvent.click(btn); vi.runAllTimers(); })
|
||||
// to synchronously advance through the async boundary between click and fetch initiation.
|
||||
// The current test structure fires the fetch before click, so this is skipped pending
|
||||
// a refactor of the component to not initiate fetch synchronously on user gesture.
|
||||
});
|
||||
});
|
||||
|
||||
describe("TermsGate — error state", () => {
|
||||
it("shows an error alert when terms-status fetch fails with non-401", async () => {
|
||||
mockFetch(new Response("Gateway Timeout", { status: 504 }));
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("error alert contains the status code", async () => {
|
||||
mockFetch(new Response(null, { status: 503 }));
|
||||
render(<TermsGate><div>App content</div></TermsGate>);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
});
|
||||
expect(screen.getByRole("alert").textContent).toMatch(/503/);
|
||||
});
|
||||
});
|
||||
|
||||
describe("TermsGate — children always rendered", () => {
|
||||
it("renders children even when modal is shown (does not gate them)", async () => {
|
||||
mockFetch(new Response(JSON.stringify({ accepted: false }), { status: 200 }));
|
||||
render(
|
||||
<TermsGate>
|
||||
<div data-testid="children-visible">Behind the modal</div>
|
||||
</TermsGate>
|
||||
);
|
||||
await waitFor(() => screen.getByRole("dialog"));
|
||||
expect(screen.getByTestId("children-visible")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
@ -1,216 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for TestConnectionButton component.
|
||||
*
|
||||
* Covers: all 4 states (idle/testing/success/failure), button disabled
|
||||
* during testing, disabled when secretValue empty, error detail display,
|
||||
* auto-reset to idle after 3s (success) and 5s (failure), onResult callback.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { TestConnectionButton } from "../ui/TestConnectionButton";
|
||||
import type { SecretGroup } from "@/types/secrets";
|
||||
|
||||
// ─── Mock validateSecret ──────────────────────────────────────────────────────
|
||||
|
||||
const mockValidateSecret = vi.fn();
|
||||
vi.mock("@/lib/api/secrets", () => ({
|
||||
validateSecret: mockValidateSecret,
|
||||
}));
|
||||
|
||||
// SecretGroup is a string literal type: 'github' | 'anthropic' | 'openrouter' | 'custom'
|
||||
const toGroup = (id: string): SecretGroup => id as SecretGroup;
|
||||
|
||||
// ─── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("TestConnectionButton — render", () => {
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
mockValidateSecret.mockReset();
|
||||
});
|
||||
|
||||
it("renders 'Test connection' button in idle state", () => {
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." />);
|
||||
expect(screen.getByRole("button", { name: "Test connection" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("disables button when secretValue is empty", () => {
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="" />);
|
||||
expect(screen.getByRole("button").getAttribute("disabled")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("enables button when secretValue is non-empty", () => {
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-test" />);
|
||||
expect(screen.getByRole("button").getAttribute("disabled")).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("TestConnectionButton — state machine", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
mockValidateSecret.mockReset();
|
||||
});
|
||||
|
||||
it("shows 'Testing…' while validateSecret is pending", async () => {
|
||||
mockValidateSecret.mockImplementation(() => new Promise(() => {})); // never resolves
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
|
||||
// Button should show testing label and be disabled
|
||||
expect(screen.getByRole("button", { name: "Testing…" }).getAttribute("disabled")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows 'Connected ✓' on success", async () => {
|
||||
mockValidateSecret.mockResolvedValue({ valid: true });
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
|
||||
expect(screen.getByRole("button", { name: "Connected ✓" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows 'Test failed' on validation failure", async () => {
|
||||
mockValidateSecret.mockResolvedValue({ valid: false, error: "Invalid key format" });
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="bad-key" />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
|
||||
expect(screen.getByRole("button", { name: "Test failed" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows error detail when validation returns invalid with message", async () => {
|
||||
mockValidateSecret.mockResolvedValue({ valid: false, error: "Permission denied" });
|
||||
render(<TestConnectionButton provider={toGroup("github")} secretValue="ghp_xxx" />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
expect(screen.getByText("Permission denied")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("shows generic error message on unexpected exception", async () => {
|
||||
mockValidateSecret.mockRejectedValue(new Error("timeout"));
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush */ });
|
||||
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
expect(screen.getByText(/timeout/i)).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("TestConnectionButton — auto-reset", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
mockValidateSecret.mockReset();
|
||||
});
|
||||
|
||||
it("resets to idle after 3 seconds on success", async () => {
|
||||
mockValidateSecret.mockResolvedValue({ valid: true });
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
expect(screen.getByRole("button", { name: "Connected ✓" })).toBeTruthy();
|
||||
|
||||
act(() => { vi.advanceTimersByTime(3000); });
|
||||
await act(async () => { /* flush */ });
|
||||
|
||||
expect(screen.getByRole("button", { name: "Test connection" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("resets to idle after 5 seconds on failure", async () => {
|
||||
mockValidateSecret.mockResolvedValue({ valid: false, error: "Bad key" });
|
||||
render(<TestConnectionButton provider={toGroup("github")} secretValue="bad" />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
expect(screen.getByRole("button", { name: "Test failed" })).toBeTruthy();
|
||||
|
||||
act(() => { vi.advanceTimersByTime(5000); });
|
||||
await act(async () => { /* flush */ });
|
||||
|
||||
expect(screen.getByRole("button", { name: "Test connection" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("does not reset before 3 seconds on success", async () => {
|
||||
mockValidateSecret.mockResolvedValue({ valid: true });
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
expect(screen.getByRole("button", { name: "Connected ✓" })).toBeTruthy();
|
||||
|
||||
act(() => { vi.advanceTimersByTime(2900); });
|
||||
await act(async () => { /* flush */ });
|
||||
|
||||
// Still showing success
|
||||
expect(screen.getByRole("button", { name: "Connected ✓" })).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("TestConnectionButton — onResult callback", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
mockValidateSecret.mockReset();
|
||||
});
|
||||
|
||||
it("calls onResult(true) on success", async () => {
|
||||
const onResult = vi.fn();
|
||||
mockValidateSecret.mockResolvedValue({ valid: true });
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." onResult={onResult} />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
|
||||
expect(onResult).toHaveBeenCalledWith(true);
|
||||
});
|
||||
|
||||
it("calls onResult(false) on failure", async () => {
|
||||
const onResult = vi.fn();
|
||||
mockValidateSecret.mockResolvedValue({ valid: false });
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="bad" onResult={onResult} />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush microtasks */ });
|
||||
|
||||
expect(onResult).toHaveBeenCalledWith(false);
|
||||
});
|
||||
|
||||
it("calls onResult(false) when exception is thrown", async () => {
|
||||
const onResult = vi.fn();
|
||||
mockValidateSecret.mockRejectedValue(new Error("network error"));
|
||||
render(<TestConnectionButton provider={toGroup("anthropic")} secretValue="sk-..." onResult={onResult} />);
|
||||
|
||||
fireEvent.click(screen.getByRole("button"));
|
||||
await act(async () => { /* flush */ });
|
||||
|
||||
expect(onResult).toHaveBeenCalledWith(false);
|
||||
});
|
||||
});
|
||||
@ -1,146 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for ThemeToggle component.
|
||||
*
|
||||
* Covers: renders all three options, aria radiogroup semantics,
|
||||
* aria-checked per option, setTheme calls on click, custom className prop.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { ThemeToggle } from "../ThemeToggle";
|
||||
import * as themeProvider from "@/lib/theme-provider";
|
||||
|
||||
// ─── Mock theme provider ───────────────────────────────────────────────────────
|
||||
|
||||
const mockSetTheme = vi.fn();
|
||||
|
||||
vi.mock("@/lib/theme-provider", () => ({
|
||||
useTheme: vi.fn(() => ({
|
||||
theme: "dark",
|
||||
resolvedTheme: "dark",
|
||||
setTheme: mockSetTheme,
|
||||
})),
|
||||
}));
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("ThemeToggle — render", () => {
|
||||
beforeEach(() => {
|
||||
vi.mocked(themeProvider.useTheme).mockReturnValue({
|
||||
theme: "dark",
|
||||
resolvedTheme: "dark",
|
||||
setTheme: mockSetTheme,
|
||||
});
|
||||
});
|
||||
|
||||
it("renders a radiogroup with aria-label", () => {
|
||||
render(<ThemeToggle />);
|
||||
expect(screen.getByRole("radiogroup", { name: "Theme preference" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders three radio buttons", () => {
|
||||
render(<ThemeToggle />);
|
||||
const radios = screen.getAllByRole("radio");
|
||||
expect(radios).toHaveLength(3);
|
||||
});
|
||||
|
||||
it("has aria-checked=true on the active option", () => {
|
||||
vi.mocked(themeProvider.useTheme).mockReturnValue({
|
||||
theme: "dark",
|
||||
resolvedTheme: "dark",
|
||||
setTheme: mockSetTheme,
|
||||
});
|
||||
render(<ThemeToggle />);
|
||||
const radios = screen.getAllByRole("radio");
|
||||
expect(radios[2].getAttribute("aria-checked")).toBe("true"); // dark is third
|
||||
expect(radios[0].getAttribute("aria-checked")).toBe("false"); // light is first
|
||||
expect(radios[1].getAttribute("aria-checked")).toBe("false"); // system is second
|
||||
});
|
||||
|
||||
it("marks 'light' as active when theme=light", () => {
|
||||
vi.mocked(themeProvider.useTheme).mockReturnValue({
|
||||
theme: "light",
|
||||
resolvedTheme: "light",
|
||||
setTheme: mockSetTheme,
|
||||
});
|
||||
render(<ThemeToggle />);
|
||||
const radios = screen.getAllByRole("radio");
|
||||
expect(radios[0].getAttribute("aria-checked")).toBe("true"); // light
|
||||
expect(radios[1].getAttribute("aria-checked")).toBe("false"); // system
|
||||
expect(radios[2].getAttribute("aria-checked")).toBe("false"); // dark
|
||||
});
|
||||
|
||||
it("marks 'system' as active when theme=system", () => {
|
||||
vi.mocked(themeProvider.useTheme).mockReturnValue({
|
||||
theme: "system",
|
||||
resolvedTheme: "light",
|
||||
setTheme: mockSetTheme,
|
||||
});
|
||||
render(<ThemeToggle />);
|
||||
const radios = screen.getAllByRole("radio");
|
||||
expect(radios[0].getAttribute("aria-checked")).toBe("false"); // light
|
||||
expect(radios[1].getAttribute("aria-checked")).toBe("true"); // system
|
||||
expect(radios[2].getAttribute("aria-checked")).toBe("false"); // dark
|
||||
});
|
||||
|
||||
it("has aria-label on each button matching the option label", () => {
|
||||
render(<ThemeToggle />);
|
||||
expect(screen.getByRole("radio", { name: "Light" })).toBeTruthy();
|
||||
expect(screen.getByRole("radio", { name: "System" })).toBeTruthy();
|
||||
expect(screen.getByRole("radio", { name: "Dark" })).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ThemeToggle — interaction", () => {
|
||||
beforeEach(() => {
|
||||
vi.mocked(themeProvider.useTheme).mockReturnValue({
|
||||
theme: "dark",
|
||||
resolvedTheme: "dark",
|
||||
setTheme: mockSetTheme,
|
||||
});
|
||||
});
|
||||
|
||||
it("calls setTheme with 'light' when light button is clicked", () => {
|
||||
render(<ThemeToggle />);
|
||||
fireEvent.click(screen.getByRole("radio", { name: "Light" }));
|
||||
expect(mockSetTheme).toHaveBeenCalledWith("light");
|
||||
});
|
||||
|
||||
it("calls setTheme with 'system' when system button is clicked", () => {
|
||||
render(<ThemeToggle />);
|
||||
fireEvent.click(screen.getByRole("radio", { name: "System" }));
|
||||
expect(mockSetTheme).toHaveBeenCalledWith("system");
|
||||
});
|
||||
|
||||
it("calls setTheme with 'dark' when dark button is clicked", () => {
|
||||
render(<ThemeToggle />);
|
||||
fireEvent.click(screen.getByRole("radio", { name: "Dark" }));
|
||||
expect(mockSetTheme).toHaveBeenCalledWith("dark");
|
||||
});
|
||||
|
||||
it("calls setTheme only once per click", () => {
|
||||
render(<ThemeToggle />);
|
||||
fireEvent.click(screen.getByRole("radio", { name: "Light" }));
|
||||
expect(mockSetTheme).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("ThemeToggle — className prop", () => {
|
||||
it("passes custom className to the radiogroup", () => {
|
||||
render(<ThemeToggle className="my-custom-class" />);
|
||||
const group = screen.getByRole("radiogroup", { name: "Theme preference" });
|
||||
expect(group.className).toContain("my-custom-class");
|
||||
});
|
||||
|
||||
it("applies default className when none provided", () => {
|
||||
render(<ThemeToggle />);
|
||||
const group = screen.getByRole("radiogroup", { name: "Theme preference" });
|
||||
expect(group.className).toContain("inline-flex");
|
||||
});
|
||||
});
|
||||
@ -1,235 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for Tooltip component.
|
||||
*
|
||||
* Covers: portal rendering, 400ms hover delay, keyboard focus reveal,
|
||||
* Esc dismiss, no render when text is empty.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
|
||||
import { afterEach, describe, expect, it, vi, beforeEach } from "vitest";
|
||||
import { Tooltip } from "../Tooltip";
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
describe("Tooltip — render", () => {
|
||||
it("renders children without showing tooltip on mount", () => {
|
||||
render(
|
||||
<Tooltip text="Hello world">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
expect(screen.getByRole("button", { name: "Hover me" })).toBeTruthy();
|
||||
// Tooltip portal is not yet in the DOM (no timer fires on mount)
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
});
|
||||
|
||||
it("does not render the tooltip portal when text is empty string", () => {
|
||||
render(
|
||||
<Tooltip text="">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
// Move mouse over trigger
|
||||
fireEvent.mouseEnter(screen.getByRole("button"));
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
});
|
||||
|
||||
it("mounts the tooltip into a portal attached to document.body", () => {
|
||||
render(
|
||||
<Tooltip text="Portal tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
// Simulate mouse enter → 400ms delay → tooltip renders
|
||||
fireEvent.mouseEnter(screen.getByRole("button"));
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(document.body.querySelector('[role="tooltip"]')).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Tooltip — hover delay", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("does NOT show tooltip before the 400ms delay expires", () => {
|
||||
render(
|
||||
<Tooltip text="Delayed tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
fireEvent.mouseEnter(screen.getByRole("button"));
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(300);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
});
|
||||
|
||||
it("shows tooltip after 400ms hover delay", () => {
|
||||
render(
|
||||
<Tooltip text="Delayed tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
fireEvent.mouseEnter(screen.getByRole("button"));
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("hides tooltip immediately on mouse leave (clears pending timer)", () => {
|
||||
render(
|
||||
<Tooltip text="Cleared tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
fireEvent.mouseEnter(btn);
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(200);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
|
||||
fireEvent.mouseLeave(btn);
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
// Still not shown because mouseLeave cancelled the timer
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
});
|
||||
|
||||
it("does not show on a second mouseEnter after mouseLeave", () => {
|
||||
render(
|
||||
<Tooltip text="Re-show tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
fireEvent.mouseEnter(btn);
|
||||
fireEvent.mouseLeave(btn);
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
|
||||
// Re-enter
|
||||
fireEvent.mouseEnter(btn);
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Tooltip — keyboard focus reveal", () => {
|
||||
it("shows tooltip on focus without needing the hover timer", () => {
|
||||
vi.useFakeTimers();
|
||||
render(
|
||||
<Tooltip text="Keyboard tip">
|
||||
<button type="button">Focus me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
// No timer needed — onFocus shows immediately
|
||||
act(() => {
|
||||
btn.focus();
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("hides tooltip on blur", () => {
|
||||
vi.useFakeTimers();
|
||||
render(
|
||||
<Tooltip text="Blur tip">
|
||||
<button type="button">Focus me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
act(() => {
|
||||
btn.focus();
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
|
||||
act(() => {
|
||||
btn.blur();
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Tooltip — Esc dismiss (WCAG 1.4.13)", () => {
|
||||
it("dismisses tooltip on Escape without blurring the trigger", () => {
|
||||
vi.useFakeTimers();
|
||||
render(
|
||||
<Tooltip text="Esc dismiss tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
fireEvent.mouseEnter(btn);
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
expect(document.activeElement).toBe(btn);
|
||||
|
||||
act(() => {
|
||||
fireEvent.keyDown(window, { key: "Escape" });
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeNull();
|
||||
// Trigger is still focused (Esc dismisses tooltip but does not blur)
|
||||
expect(document.activeElement).toBe(btn);
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("does nothing on non-Escape keys while tooltip is open", () => {
|
||||
vi.useFakeTimers();
|
||||
render(
|
||||
<Tooltip text="Non-Escape key">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
fireEvent.mouseEnter(btn);
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500);
|
||||
});
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
|
||||
act(() => {
|
||||
fireEvent.keyDown(window, { key: "Enter" });
|
||||
});
|
||||
// Tooltip still visible
|
||||
expect(screen.queryByRole("tooltip")).toBeTruthy();
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Tooltip — aria-describedby", () => {
|
||||
it("associates tooltip with the trigger via aria-describedby", () => {
|
||||
render(
|
||||
<Tooltip text="Associated tip">
|
||||
<button type="button">Hover me</button>
|
||||
</Tooltip>
|
||||
);
|
||||
const btn = screen.getByRole("button");
|
||||
const describedBy = btn.getAttribute("aria-describedby");
|
||||
expect(describedBy).toBeTruthy();
|
||||
// The describedby id matches the tooltip id
|
||||
const tooltipId = describedBy!.replace(/.*?:\s*/, "");
|
||||
expect(document.getElementById(tooltipId)).toBeTruthy();
|
||||
});
|
||||
});
|
||||
@ -1,50 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for TopBar component.
|
||||
*
|
||||
* Covers: renders header, logo, canvas name, "+ New Agent" button,
|
||||
* SettingsButton integration, custom canvasName prop.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { TopBar } from "../canvas/TopBar";
|
||||
|
||||
// ─── Mock SettingsButton ───────────────────────────────────────────────────────
|
||||
|
||||
vi.mock("../settings/SettingsButton", () => ({
|
||||
SettingsButton: vi.fn(() => <button aria-label="Settings">⚙</button>),
|
||||
}));
|
||||
|
||||
describe("TopBar — render", () => {
|
||||
it("renders a header element", () => {
|
||||
render(<TopBar />);
|
||||
expect(document.body.querySelector("header")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the canvas name (default)", () => {
|
||||
render(<TopBar />);
|
||||
expect(screen.getByText("Canvas")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders a custom canvas name", () => {
|
||||
render(<TopBar canvasName="My Org Canvas" />);
|
||||
expect(screen.getByText("My Org Canvas")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the '+ New Agent' button", () => {
|
||||
render(<TopBar />);
|
||||
expect(screen.getByRole("button", { name: /new agent/i })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders the SettingsButton", () => {
|
||||
render(<TopBar />);
|
||||
expect(screen.getByRole("button", { name: "Settings" })).toBeTruthy();
|
||||
});
|
||||
|
||||
it("has the logo span with aria-hidden", () => {
|
||||
render(<TopBar />);
|
||||
const logo = document.body.querySelector('[aria-hidden="true"]');
|
||||
expect(logo?.textContent).toBe("☁");
|
||||
});
|
||||
});
|
||||
@ -1,77 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for ValidationHint component.
|
||||
*
|
||||
* Covers: error state, valid state, neutral/hidden state,
|
||||
* aria-live for error, icon rendering.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { ValidationHint } from "../ui/ValidationHint";
|
||||
|
||||
describe("ValidationHint — error state", () => {
|
||||
it("renders error message when error is a non-null string", () => {
|
||||
render(<ValidationHint error="Invalid email address" />);
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
expect(screen.getByText("Invalid email address")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("includes the warning icon in error state", () => {
|
||||
render(<ValidationHint error="Too short" />);
|
||||
expect(screen.getByText(/⚠/)).toBeTruthy();
|
||||
});
|
||||
|
||||
it("uses the error class on the paragraph element", () => {
|
||||
render(<ValidationHint error="Bad input" />);
|
||||
const el = screen.getByRole("alert");
|
||||
expect(el.className).toContain("validation-hint--error");
|
||||
});
|
||||
|
||||
it("renders error even when showValid is true", () => {
|
||||
render(<ValidationHint error="Oops" showValid={true} />);
|
||||
expect(screen.getByRole("alert")).toBeTruthy();
|
||||
expect(screen.queryByText(/✓/)).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ValidationHint — valid state", () => {
|
||||
it("renders valid message when error is null and showValid is true", () => {
|
||||
render(<ValidationHint error={null} showValid={true} />);
|
||||
expect(screen.getByText("Valid format")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("includes the checkmark icon in valid state", () => {
|
||||
render(<ValidationHint error={null} showValid={true} />);
|
||||
expect(screen.getByText(/✓ Valid format/)).toBeTruthy();
|
||||
});
|
||||
|
||||
it("uses the valid class on the paragraph element", () => {
|
||||
render(<ValidationHint error={null} showValid={true} />);
|
||||
const el = document.body.querySelector(".validation-hint--valid");
|
||||
expect(el).toBeTruthy();
|
||||
});
|
||||
|
||||
it("renders nothing when error is null and showValid is false (default)", () => {
|
||||
const { container } = render(<ValidationHint error={null} />);
|
||||
expect(container.textContent).toBe("");
|
||||
});
|
||||
|
||||
it("renders nothing when error is empty string", () => {
|
||||
const { container } = render(<ValidationHint error="" />);
|
||||
expect(container.textContent).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
describe("ValidationHint — neutral / not-yet-validated", () => {
|
||||
it("renders nothing when error is null and showValid defaults to false", () => {
|
||||
const { container } = render(<ValidationHint error={null} />);
|
||||
expect(container.textContent).toBe("");
|
||||
});
|
||||
|
||||
it("renders nothing when error is undefined", () => {
|
||||
// @ts-expect-error — testing runtime behavior with undefined
|
||||
const { container } = render(<ValidationHint error={undefined} />);
|
||||
expect(container.textContent).toBe("");
|
||||
});
|
||||
});
|
||||
@ -1,75 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for createMessage — the ChatMessage factory from types.ts.
|
||||
*/
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { createMessage } from "../tabs/chat/types";
|
||||
|
||||
describe("createMessage", () => {
|
||||
beforeEach(() => {
|
||||
// Freeze time so timestamp is deterministic.
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-05-10T12:00:00.000Z"));
|
||||
// Stub crypto.randomUUID so message IDs are deterministic.
|
||||
vi.stubGlobal("crypto", { randomUUID: vi.fn(() => "fixed-uuid-1234") });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("creates a message with the correct role", () => {
|
||||
const userMsg = createMessage("user", "hello");
|
||||
expect(userMsg.role).toBe("user");
|
||||
|
||||
const agentMsg = createMessage("agent", "hi there");
|
||||
expect(agentMsg.role).toBe("agent");
|
||||
|
||||
const systemMsg = createMessage("system", "prompt loaded");
|
||||
expect(systemMsg.role).toBe("system");
|
||||
});
|
||||
|
||||
it("creates a message with the correct content", () => {
|
||||
const msg = createMessage("user", "Deploy the agent now");
|
||||
expect(msg.content).toBe("Deploy the agent now");
|
||||
});
|
||||
|
||||
it("sets a deterministic id via crypto.randomUUID", () => {
|
||||
const msg = createMessage("agent", "response");
|
||||
expect(msg.id).toBe("fixed-uuid-1234");
|
||||
});
|
||||
|
||||
it("sets a deterministic ISO timestamp", () => {
|
||||
const msg = createMessage("user", "hello");
|
||||
expect(msg.timestamp).toBe("2026-05-10T12:00:00.000Z");
|
||||
});
|
||||
|
||||
it("omits attachments field when none provided", () => {
|
||||
const msg = createMessage("user", "hello");
|
||||
expect(msg.attachments).toBeUndefined();
|
||||
});
|
||||
|
||||
it("omits attachments field when empty array is provided", () => {
|
||||
const msg = createMessage("agent", "result", []);
|
||||
expect(msg.attachments).toBeUndefined();
|
||||
});
|
||||
|
||||
it("includes attachments field when non-empty array is provided", () => {
|
||||
const atts = [{ name: "report.pdf", uri: "workspace:/docs/report.pdf" }];
|
||||
const msg = createMessage("agent", "see attached", atts);
|
||||
expect(msg.attachments).toEqual(atts);
|
||||
});
|
||||
|
||||
it("returns a frozen object (prevents accidental mutation)", () => {
|
||||
const msg = createMessage("user", "hello");
|
||||
expect(Object.isFrozen(msg)).toBe(true);
|
||||
});
|
||||
|
||||
it("returns a plain object with expected keys", () => {
|
||||
const msg = createMessage("user", "hello");
|
||||
expect(Object.keys(msg).sort()).toEqual(
|
||||
["id", "role", "content", "timestamp"].sort()
|
||||
);
|
||||
});
|
||||
});
|
||||
@ -1,104 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for getIcon — the pure icon-selector from FilesTab/tree.ts.
|
||||
*/
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { getIcon } from "../tabs/FilesTab/tree";
|
||||
|
||||
describe("getIcon", () => {
|
||||
// ─── Directories ──────────────────────────────────────────────────────────
|
||||
|
||||
it("returns 📁 for directories regardless of extension", () => {
|
||||
expect(getIcon("src", true)).toBe("📁");
|
||||
expect(getIcon("node_modules", true)).toBe("📁");
|
||||
expect(getIcon(".claude", true)).toBe("📁");
|
||||
expect(getIcon("foo/bar/baz", true)).toBe("📁");
|
||||
});
|
||||
|
||||
it("returns 📁 even for paths that look like files", () => {
|
||||
expect(getIcon("foo.txt", true)).toBe("📁");
|
||||
expect(getIcon("script.sh", true)).toBe("📁");
|
||||
});
|
||||
|
||||
// ─── Files by extension ────────────────────────────────────────────────────
|
||||
|
||||
it("returns 📄 for .md files", () => {
|
||||
expect(getIcon("README.md", false)).toBe("📄");
|
||||
expect(getIcon("CHANGELOG.md", false)).toBe("📄");
|
||||
expect(getIcon("docs/guide.md", false)).toBe("📄");
|
||||
});
|
||||
|
||||
it("returns ⚙ for .yaml and .yml files", () => {
|
||||
expect(getIcon("config.yaml", false)).toBe("⚙");
|
||||
expect(getIcon("values.yml", false)).toBe("⚙");
|
||||
expect(getIcon("deploy.yaml", false)).toBe("⚙");
|
||||
});
|
||||
|
||||
it("returns 🐍 for .py files", () => {
|
||||
expect(getIcon("main.py", false)).toBe("🐍");
|
||||
expect(getIcon("utils/helpers.py", false)).toBe("🐍");
|
||||
});
|
||||
|
||||
it("returns 💠 for .ts and .tsx files", () => {
|
||||
expect(getIcon("index.ts", false)).toBe("💠");
|
||||
expect(getIcon("Component.tsx", false)).toBe("💠");
|
||||
expect(getIcon("types.d.ts", false)).toBe("💠");
|
||||
});
|
||||
|
||||
it("returns 📜 for .js files", () => {
|
||||
expect(getIcon("bundle.js", false)).toBe("📜");
|
||||
expect(getIcon("src/index.js", false)).toBe("📜");
|
||||
});
|
||||
|
||||
it("returns {} for .json files", () => {
|
||||
expect(getIcon("package.json", false)).toBe("{}");
|
||||
expect(getIcon("config.json", false)).toBe("{}");
|
||||
});
|
||||
|
||||
it("returns 🌐 for .html files", () => {
|
||||
expect(getIcon("index.html", false)).toBe("🌐");
|
||||
expect(getIcon("templates/page.html", false)).toBe("🌐");
|
||||
});
|
||||
|
||||
it("returns 🎨 for .css files", () => {
|
||||
expect(getIcon("style.css", false)).toBe("🎨");
|
||||
expect(getIcon("src/app.css", false)).toBe("🎨");
|
||||
});
|
||||
|
||||
it("returns ▸ for .sh files", () => {
|
||||
expect(getIcon("deploy.sh", false)).toBe("▸");
|
||||
expect(getIcon("scripts/setup.sh", false)).toBe("▸");
|
||||
});
|
||||
|
||||
// ─── Fallback ─────────────────────────────────────────────────────────────
|
||||
|
||||
it("returns 📄 for unknown extensions", () => {
|
||||
expect(getIcon("README", false)).toBe("📄");
|
||||
expect(getIcon("Dockerfile", false)).toBe("📄");
|
||||
expect(getIcon("Makefile", false)).toBe("📄");
|
||||
expect(getIcon("notes.txt", false)).toBe("📄");
|
||||
expect(getIcon("archive.tar.gz", false)).toBe("📄");
|
||||
});
|
||||
|
||||
it("returns 📄 for paths with no extension", () => {
|
||||
expect(getIcon("Makefile", false)).toBe("📄");
|
||||
expect(getIcon("README", false)).toBe("📄");
|
||||
expect(getIcon("Dockerfile", false)).toBe("📄");
|
||||
});
|
||||
|
||||
// ─── Case sensitivity ──────────────────────────────────────────────────────
|
||||
|
||||
it("is case-insensitive for extension lookup", () => {
|
||||
expect(getIcon("image.PNG", false)).toBe("📄");
|
||||
expect(getIcon("data.JSON", false)).toBe("{}");
|
||||
expect(getIcon("script.SH", false)).toBe("▸");
|
||||
});
|
||||
|
||||
// ─── Nested paths ─────────────────────────────────────────────────────────
|
||||
|
||||
it("uses the leaf extension for nested paths", () => {
|
||||
expect(getIcon("src/utils/helpers.ts", false)).toBe("💠");
|
||||
expect(getIcon("docs/api.yaml", false)).toBe("⚙");
|
||||
expect(getIcon(".github/workflows/ci.yml", false)).toBe("⚙");
|
||||
});
|
||||
});
|
||||
@ -1,436 +0,0 @@
|
||||
// @vitest-environment jsdom
|
||||
/**
|
||||
* Tests for canvas keyboard shortcuts (useKeyboardShortcuts hook).
|
||||
*
|
||||
* Covers: Esc, Enter/Shift+Enter, Cmd+]/[, Z, and Arrow keys.
|
||||
*
|
||||
* The hook is tested by dispatching KeyboardEvents at the window and
|
||||
* asserting the resulting store mutations / dispatched events.
|
||||
*/
|
||||
import React from "react";
|
||||
import { render, cleanup, fireEvent } from "@testing-library/react";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { useKeyboardShortcuts } from "../useKeyboardShortcuts";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
|
||||
// ─── Mock store ──────────────────────────────────────────────────────────────
|
||||
|
||||
const mockSavePosition = vi.fn().mockResolvedValue(undefined);
|
||||
|
||||
vi.mock("@/store/canvas", () => ({
|
||||
useCanvasStore: Object.assign(
|
||||
vi.fn((sel) => sel(mockStoreState)),
|
||||
{
|
||||
getState: () => mockStoreState,
|
||||
}
|
||||
),
|
||||
}));
|
||||
|
||||
// Module-level mutable state so tests can mutate between cases
|
||||
const mockStoreState = {
|
||||
selectedNodeId: null as string | null,
|
||||
selectedNodeIds: new Set<string>(),
|
||||
nodes: [] as Array<{
|
||||
id: string;
|
||||
position: { x: number; y: number };
|
||||
data: { parentId?: string | null };
|
||||
width?: number;
|
||||
height?: number;
|
||||
}>,
|
||||
contextMenu: null as { x: number; y: number; nodeId: string } | null,
|
||||
closeContextMenu: vi.fn(),
|
||||
selectNode: vi.fn(),
|
||||
clearSelection: vi.fn(),
|
||||
bumpZOrder: vi.fn(),
|
||||
savePosition: mockSavePosition,
|
||||
moveNode: vi.fn(),
|
||||
onNodesChange: vi.fn(),
|
||||
};
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
// Reset to default empty state between tests
|
||||
mockStoreState.selectedNodeId = null;
|
||||
mockStoreState.selectedNodeIds = new Set();
|
||||
mockStoreState.nodes = [];
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.closeContextMenu.mockClear();
|
||||
mockStoreState.selectNode.mockClear();
|
||||
mockStoreState.clearSelection.mockClear();
|
||||
mockStoreState.bumpZOrder.mockClear();
|
||||
mockStoreState.moveNode.mockClear();
|
||||
mockStoreState.savePosition.mockClear();
|
||||
mockStoreState.onNodesChange.mockClear();
|
||||
});
|
||||
|
||||
// ─── Test wrapper ────────────────────────────────────────────────────────────
|
||||
|
||||
function ShortcutTestComponent() {
|
||||
useKeyboardShortcuts();
|
||||
return <div data-testid="canvas-root" />;
|
||||
}
|
||||
|
||||
function renderWithProvider() {
|
||||
return render(<ShortcutTestComponent />);
|
||||
}
|
||||
|
||||
// ─── Tests ───────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("Esc — deselect / close context menu", () => {
|
||||
it("closes the context menu when one is open", () => {
|
||||
mockStoreState.contextMenu = { x: 100, y: 100, nodeId: "n1" };
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "Escape" });
|
||||
expect(mockStoreState.closeContextMenu).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("clears the batch selection when no context menu is open", () => {
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.selectedNodeIds = new Set(["n1", "n2"]);
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "Escape" });
|
||||
expect(mockStoreState.clearSelection).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("deselects the focused node when no batch selection exists", () => {
|
||||
mockStoreState.contextMenu = null;
|
||||
mockStoreState.selectedNodeIds = new Set();
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "Escape" });
|
||||
expect(mockStoreState.selectNode).toHaveBeenCalledWith(null);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Enter — hierarchy navigation", () => {
|
||||
beforeEach(() => {
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", position: { x: 0, y: 0 }, data: { parentId: null } },
|
||||
{ id: "n2", position: { x: 100, y: 0 }, data: { parentId: "n1" } },
|
||||
{ id: "n3", position: { x: 200, y: 0 }, data: { parentId: null } },
|
||||
];
|
||||
});
|
||||
|
||||
it("navigates to the first child on Enter", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "Enter" });
|
||||
expect(mockStoreState.selectNode).toHaveBeenCalledWith("n2");
|
||||
});
|
||||
|
||||
it("navigates to the parent on Shift+Enter", () => {
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", position: { x: 0, y: 0 }, data: { parentId: null } },
|
||||
{ id: "n2", position: { x: 100, y: 0 }, data: { parentId: "n1" } },
|
||||
];
|
||||
mockStoreState.selectedNodeId = "n2";
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "Enter", shiftKey: true });
|
||||
expect(mockStoreState.selectNode).toHaveBeenCalledWith("n1");
|
||||
});
|
||||
|
||||
it("does NOT navigate when no node is selected", () => {
|
||||
mockStoreState.selectedNodeId = null;
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "Enter" });
|
||||
expect(mockStoreState.selectNode).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Cmd+]/[ — z-order bump", () => {
|
||||
beforeEach(() => {
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
});
|
||||
|
||||
it("bumps z-order forward on Cmd+]", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "]", metaKey: true });
|
||||
expect(mockStoreState.bumpZOrder).toHaveBeenCalledWith("n1", 1);
|
||||
});
|
||||
|
||||
it("bumps z-order backward on Cmd+[", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "[", metaKey: true });
|
||||
expect(mockStoreState.bumpZOrder).toHaveBeenCalledWith("n1", -1);
|
||||
});
|
||||
|
||||
it("uses Ctrl as the modifier key", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "]", ctrlKey: true });
|
||||
expect(mockStoreState.bumpZOrder).toHaveBeenCalledWith("n1", 1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Z — zoom-to-team", () => {
|
||||
let dispatchedEvents: CustomEvent[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
dispatchedEvents = [];
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", position: { x: 0, y: 0 }, data: { parentId: null } },
|
||||
{ id: "n2", position: { x: 100, y: 0 }, data: { parentId: "n1" } },
|
||||
];
|
||||
window.addEventListener("molecule:zoom-to-team", (e) => {
|
||||
dispatchedEvents.push(e as CustomEvent);
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
window.removeEventListener("molecule:zoom-to-team", () => {});
|
||||
});
|
||||
|
||||
it("dispatches zoom-to-team when the selected node has children", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "z" });
|
||||
expect(dispatchedEvents).toHaveLength(1);
|
||||
expect(dispatchedEvents[0].detail.nodeId).toBe("n1");
|
||||
});
|
||||
|
||||
it("does NOT fire when no node is selected", () => {
|
||||
mockStoreState.selectedNodeId = null;
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "z" });
|
||||
expect(dispatchedEvents).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("does NOT fire when the node has no children", () => {
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", position: { x: 0, y: 0 }, data: { parentId: null } },
|
||||
];
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "z" });
|
||||
expect(dispatchedEvents).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("skips when the target element is an input", () => {
|
||||
renderWithProvider();
|
||||
const input = document.createElement("input");
|
||||
document.body.appendChild(input);
|
||||
fireEvent.keyDown(input, { key: "z" });
|
||||
expect(dispatchedEvents).toHaveLength(0);
|
||||
document.body.removeChild(input);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Arrow keys — keyboard node movement", () => {
|
||||
beforeEach(() => {
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", position: { x: 100, y: 200 }, data: { parentId: null } },
|
||||
];
|
||||
});
|
||||
|
||||
it("moves the selected node down on ArrowDown", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "ArrowDown" });
|
||||
expect(mockStoreState.moveNode).toHaveBeenCalledWith("n1", 0, 10);
|
||||
});
|
||||
|
||||
it("moves the selected node up on ArrowUp", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "ArrowUp" });
|
||||
expect(mockStoreState.moveNode).toHaveBeenCalledWith("n1", 0, -10);
|
||||
});
|
||||
|
||||
it("moves the selected node right on ArrowRight", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "ArrowRight" });
|
||||
expect(mockStoreState.moveNode).toHaveBeenCalledWith("n1", 10, 0);
|
||||
});
|
||||
|
||||
it("moves the selected node left on ArrowLeft", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "ArrowLeft" });
|
||||
expect(mockStoreState.moveNode).toHaveBeenCalledWith("n1", -10, 0);
|
||||
});
|
||||
|
||||
it("moves 50 px when Shift is held", () => {
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "ArrowDown", shiftKey: true });
|
||||
expect(mockStoreState.moveNode).toHaveBeenCalledWith("n1", 0, 50);
|
||||
});
|
||||
|
||||
it("does NOT fire when no node is selected", () => {
|
||||
mockStoreState.selectedNodeId = null;
|
||||
renderWithProvider();
|
||||
fireEvent.keyDown(window, { key: "ArrowDown" });
|
||||
expect(mockStoreState.moveNode).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("skips when the target element is an input", () => {
|
||||
renderWithProvider();
|
||||
const input = document.createElement("input");
|
||||
document.body.appendChild(input);
|
||||
fireEvent.keyDown(input, { key: "ArrowDown" });
|
||||
expect(mockStoreState.moveNode).not.toHaveBeenCalled();
|
||||
document.body.removeChild(input);
|
||||
});
|
||||
|
||||
it("skips when a modal dialog is already open", () => {
|
||||
renderWithProvider();
|
||||
const dialog = document.createElement("div");
|
||||
dialog.setAttribute("role", "dialog");
|
||||
dialog.setAttribute("aria-modal", "true");
|
||||
document.body.appendChild(dialog);
|
||||
fireEvent.keyDown(window, { key: "ArrowDown" });
|
||||
expect(mockStoreState.moveNode).not.toHaveBeenCalled();
|
||||
document.body.removeChild(dialog);
|
||||
});
|
||||
|
||||
// NOTE: "prevents default browser scroll on arrow keys" was removed.
|
||||
// jsdom's KeyboardEvent.initKeyboardEvent does not copy the preventDefault
|
||||
// function from eventProperties into the real KeyboardEvent, so a
|
||||
// preventDefault mock passed via fireEvent.keyDown(eventProperties) is
|
||||
// never called. The guard (selected node required) is covered by
|
||||
// "does NOT fire when no node is selected". The e.preventDefault() call
|
||||
// itself is verified by code inspection.
|
||||
});
|
||||
|
||||
describe("all shortcuts respect inInput guard", () => {
|
||||
it("ArrowDown is skipped in an input element", () => {
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
renderWithProvider();
|
||||
const textarea = document.createElement("textarea");
|
||||
document.body.appendChild(textarea);
|
||||
fireEvent.keyDown(textarea, { key: "ArrowDown" });
|
||||
expect(mockStoreState.moveNode).not.toHaveBeenCalled();
|
||||
document.body.removeChild(textarea);
|
||||
});
|
||||
|
||||
it("Enter navigation is skipped in an input element", () => {
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
mockStoreState.nodes = [
|
||||
{ id: "n1", position: { x: 0, y: 0 }, data: { parentId: null } },
|
||||
{ id: "n2", position: { x: 100, y: 0 }, data: { parentId: "n1" } },
|
||||
];
|
||||
renderWithProvider();
|
||||
const input = document.createElement("input");
|
||||
document.body.appendChild(input);
|
||||
fireEvent.keyDown(input, { key: "Enter" });
|
||||
expect(mockStoreState.selectNode).not.toHaveBeenCalled();
|
||||
document.body.removeChild(input);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Cmd/Ctrl+Arrow — keyboard node resize", () => {
|
||||
beforeEach(() => {
|
||||
mockStoreState.nodes = [
|
||||
{
|
||||
id: "n1",
|
||||
position: { x: 0, y: 0 },
|
||||
data: { parentId: null },
|
||||
width: 210,
|
||||
height: 110,
|
||||
},
|
||||
];
|
||||
mockStoreState.selectedNodeId = "n1";
|
||||
renderWithProvider();
|
||||
});
|
||||
|
||||
it("resizes height down (smaller) on Cmd/Ctrl+ArrowUp", () => {
|
||||
// Node starts at minHeight=110 (no children). Shrinking clamps to min —
|
||||
// height stays 110. Width is unchanged.
|
||||
fireEvent.keyDown(window, { key: "ArrowUp", metaKey: true });
|
||||
expect(mockStoreState.onNodesChange).toHaveBeenCalledWith([
|
||||
expect.objectContaining({
|
||||
type: "dimensions",
|
||||
id: "n1",
|
||||
dimensions: { width: 210, height: 110 },
|
||||
}),
|
||||
]);
|
||||
});
|
||||
|
||||
it("resizes height up (larger) on Cmd/Ctrl+ArrowDown", () => {
|
||||
fireEvent.keyDown(window, { key: "ArrowDown", ctrlKey: true });
|
||||
expect(mockStoreState.onNodesChange).toHaveBeenCalledWith([
|
||||
expect.objectContaining({
|
||||
type: "dimensions",
|
||||
id: "n1",
|
||||
dimensions: { width: 210, height: 120 },
|
||||
}),
|
||||
]);
|
||||
});
|
||||
|
||||
it("resizes width down (smaller) on Cmd/Ctrl+ArrowLeft", () => {
|
||||
// Node starts at minWidth=210 (no children). Shrinking clamps to min —
|
||||
// width stays 210. Height is unchanged.
|
||||
fireEvent.keyDown(window, { key: "ArrowLeft", metaKey: true });
|
||||
expect(mockStoreState.onNodesChange).toHaveBeenCalledWith([
|
||||
expect.objectContaining({
|
||||
type: "dimensions",
|
||||
id: "n1",
|
||||
dimensions: { width: 210, height: 110 },
|
||||
}),
|
||||
]);
|
||||
});
|
||||
|
||||
it("resizes width up (larger) on Cmd/Ctrl+ArrowRight", () => {
|
||||
fireEvent.keyDown(window, { key: "ArrowRight", ctrlKey: true });
|
||||
expect(mockStoreState.onNodesChange).toHaveBeenCalledWith([
|
||||
expect.objectContaining({
|
||||
type: "dimensions",
|
||||
id: "n1",
|
||||
dimensions: { width: 220, height: 110 },
|
||||
}),
|
||||
]);
|
||||
});
|
||||
|
||||
it("uses 2px step with Shift held", () => {
|
||||
// Step is 2px with Shift, but minHeight=110 clamps the result.
|
||||
// 110 - 2 = 108, Math.max(110, 108) = 110. Width is unchanged.
|
||||
fireEvent.keyDown(window, { key: "ArrowUp", metaKey: true, shiftKey: true });
|
||||
expect(mockStoreState.onNodesChange).toHaveBeenCalledWith([
|
||||
expect.objectContaining({
|
||||
dimensions: { width: 210, height: 110 },
|
||||
}),
|
||||
]);
|
||||
});
|
||||
|
||||
it("respects min-height constraint (no children)", () => {
|
||||
fireEvent.keyDown(window, { key: "ArrowUp", metaKey: true });
|
||||
fireEvent.keyDown(window, { key: "ArrowUp", metaKey: true });
|
||||
// After shrinking from 110 to 100, another ArrowUp hits min-height of 110
|
||||
// (110 - 10 = 100, but 100 < 110 so it should stay at 110)
|
||||
// Actually: 110 -> 100 -> 110 (resets to min)
|
||||
// Let me check: the hook does Math.max(minHeight, currentHeight - step)
|
||||
// minHeight=110, step=10, so 110 - 10 = 100, but Math.max(110, 100) = 110
|
||||
// So two ArrowUp calls should both result in height=100 then height=110?
|
||||
// Wait: 110 - 10 = 100, Math.max(110, 100) = 110 (not 100)
|
||||
// So the height never goes below 110. After first: 110 -> 100, but clamped to 110.
|
||||
// Actually Math.max(110, 100) = 110, so the height never changes.
|
||||
// The min constraint is respected — height stays at 110.
|
||||
expect(mockStoreState.onNodesChange).toHaveBeenLastCalledWith([
|
||||
expect.objectContaining({ dimensions: { width: 210, height: 110 } }),
|
||||
]);
|
||||
});
|
||||
|
||||
it("does NOT fire when no node is selected", () => {
|
||||
mockStoreState.selectedNodeId = null;
|
||||
fireEvent.keyDown(window, { key: "ArrowDown", metaKey: true });
|
||||
expect(mockStoreState.onNodesChange).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("skips when a modal dialog is open", () => {
|
||||
const dialog = document.createElement("div");
|
||||
dialog.setAttribute("role", "dialog");
|
||||
dialog.setAttribute("aria-modal", "true");
|
||||
document.body.appendChild(dialog);
|
||||
fireEvent.keyDown(window, { key: "ArrowDown", metaKey: true });
|
||||
expect(mockStoreState.onNodesChange).not.toHaveBeenCalled();
|
||||
document.body.removeChild(dialog);
|
||||
});
|
||||
|
||||
it("skips plain arrow keys (no modifier) — moveNode is called instead", () => {
|
||||
fireEvent.keyDown(window, { key: "ArrowUp" });
|
||||
expect(mockStoreState.moveNode).toHaveBeenCalled();
|
||||
expect(mockStoreState.onNodesChange).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("skips Alt+Arrow (not a resize combo)", () => {
|
||||
fireEvent.keyDown(window, { key: "ArrowUp", altKey: true });
|
||||
expect(mockStoreState.onNodesChange).not.toHaveBeenCalled();
|
||||
expect(mockStoreState.moveNode).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@ -2,13 +2,6 @@
|
||||
|
||||
import { useEffect } from "react";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
import { type NodeChange, type Node } from "@xyflow/react";
|
||||
import type { WorkspaceNodeData } from "@/store/canvas";
|
||||
|
||||
/** Returns true if the node has any direct child in the node list. */
|
||||
function hasChildren(nodeId: string, nodes: Node<WorkspaceNodeData>[]): boolean {
|
||||
return nodes.some((n) => n.data.parentId === nodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Canvas-wide keyboard shortcuts. All bound to the document window so
|
||||
@ -21,9 +14,6 @@ function hasChildren(nodeId: string, nodes: Node<WorkspaceNodeData>[]): boolean
|
||||
* Cmd/Ctrl+] — bump selected node forward in z-order
|
||||
* Cmd/Ctrl+[ — bump selected node backward in z-order
|
||||
* Z — zoom-to-team if the selected node has children
|
||||
* Arrow keys — move selected node 10px (50px with Shift)
|
||||
* Cmd/Ctrl+Arrow — resize selected node (↑↓ height, ←→ width)
|
||||
* Cmd/Ctrl+Shift+Arrow — resize by 2px per press (fine control)
|
||||
*/
|
||||
export function useKeyboardShortcuts() {
|
||||
useEffect(() => {
|
||||
@ -90,76 +80,6 @@ export function useKeyboardShortcuts() {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Arrow-key node movement — Figma-style keyboard drag for keyboard users.
|
||||
// 10 px per press, 50 px with Shift held. Only fires when a node
|
||||
// is selected and the target isn't a form control. Skipped when a
|
||||
// modifier key (Cmd/Ctrl/Alt) is held so those combos can be used
|
||||
// for other shortcuts (e.g. Cmd+Arrow = resize).
|
||||
if (
|
||||
!inInput &&
|
||||
!e.metaKey &&
|
||||
!e.ctrlKey &&
|
||||
!e.altKey &&
|
||||
(e.key === "ArrowUp" ||
|
||||
e.key === "ArrowDown" ||
|
||||
e.key === "ArrowLeft" ||
|
||||
e.key === "ArrowRight")
|
||||
) {
|
||||
const state = useCanvasStore.getState();
|
||||
const selectedId = state.selectedNodeId;
|
||||
if (!selectedId) return;
|
||||
// Skip when a modal/dialog is already open — dialogs own their own
|
||||
// arrow-key semantics and shouldn't trigger canvas moves.
|
||||
if (document.querySelector('[role="dialog"][aria-modal="true"]')) return;
|
||||
e.preventDefault();
|
||||
const step = e.shiftKey ? 50 : 10;
|
||||
let dx = 0;
|
||||
let dy = 0;
|
||||
if (e.key === "ArrowUp") dy = -step;
|
||||
else if (e.key === "ArrowDown") dy = step;
|
||||
else if (e.key === "ArrowLeft") dx = -step;
|
||||
else dx = step;
|
||||
state.moveNode(selectedId, dx, dy);
|
||||
}
|
||||
|
||||
// Cmd/Ctrl+Arrow — keyboard-accessible node resize.
|
||||
// ↑/↓ resizes height, ←/→ resizes width.
|
||||
// 10 px per press (2 px with Shift for fine control).
|
||||
// Uses the same onNodesChange('dimensions') path that NodeResizer uses.
|
||||
if (
|
||||
!inInput &&
|
||||
(e.metaKey || e.ctrlKey) &&
|
||||
(e.key === "ArrowUp" ||
|
||||
e.key === "ArrowDown" ||
|
||||
e.key === "ArrowLeft" ||
|
||||
e.key === "ArrowRight")
|
||||
) {
|
||||
const state = useCanvasStore.getState();
|
||||
const selectedId = state.selectedNodeId;
|
||||
if (!selectedId) return;
|
||||
if (document.querySelector('[role="dialog"][aria-modal="true"]')) return;
|
||||
e.preventDefault();
|
||||
const step = e.shiftKey ? 2 : 10;
|
||||
const node = state.nodes.find((n) => n.id === selectedId);
|
||||
if (!node) return;
|
||||
const currentWidth = (node.width ?? 210) as number;
|
||||
const currentHeight = (node.height ?? 110) as number;
|
||||
const minWidth = hasChildren(node.id, state.nodes) ? 360 : 210;
|
||||
const minHeight = hasChildren(node.id, state.nodes) ? 200 : 110;
|
||||
let newWidth = currentWidth;
|
||||
let newHeight = currentHeight;
|
||||
if (e.key === "ArrowUp") newHeight = Math.max(minHeight, currentHeight - step);
|
||||
else if (e.key === "ArrowDown") newHeight = currentHeight + step;
|
||||
else if (e.key === "ArrowLeft") newWidth = Math.max(minWidth, currentWidth - step);
|
||||
else newWidth = currentWidth + step;
|
||||
const change: NodeChange = {
|
||||
type: "dimensions",
|
||||
id: selectedId,
|
||||
dimensions: { width: newWidth, height: newHeight },
|
||||
};
|
||||
state.onNodesChange([change]);
|
||||
}
|
||||
};
|
||||
window.addEventListener("keydown", handler);
|
||||
return () => window.removeEventListener("keydown", handler);
|
||||
|
||||
@ -109,7 +109,7 @@ export function OrgTokensTab() {
|
||||
Organization API Keys
|
||||
</h3>
|
||||
</div>
|
||||
<p className="text-[10px] text-ink-mid leading-relaxed">
|
||||
<p className="text-[10px] text-ink-soft leading-relaxed">
|
||||
Full-admin bearer tokens for this organization. Use with external
|
||||
integrations, CLI tools, or AI agents that need to manage
|
||||
workspaces, settings, and secrets. Each key has the same
|
||||
@ -182,13 +182,13 @@ export function OrgTokensTab() {
|
||||
|
||||
{/* Token list */}
|
||||
{loading ? (
|
||||
<div role="status" aria-live="polite" className="flex items-center justify-center gap-2 py-6 text-ink-mid text-xs">
|
||||
<div role="status" aria-live="polite" className="flex items-center justify-center gap-2 py-6 text-ink-soft text-xs">
|
||||
<Spinner /> Loading keys...
|
||||
</div>
|
||||
) : tokens.length === 0 ? (
|
||||
<div className="text-center py-6">
|
||||
<p className="text-xs text-ink-mid">No active keys</p>
|
||||
<p className="text-[10px] text-ink-mid mt-1">
|
||||
<p className="text-xs text-ink-soft">No active keys</p>
|
||||
<p className="text-[10px] text-ink-soft mt-1">
|
||||
Create a key above to authenticate API calls to this organization.
|
||||
</p>
|
||||
</div>
|
||||
@ -209,7 +209,7 @@ export function OrgTokensTab() {
|
||||
{t.name}
|
||||
</span>
|
||||
)}
|
||||
<div className="text-[9px] text-ink-mid space-x-3">
|
||||
<div className="text-[9px] text-ink-soft space-x-3">
|
||||
<span>Created {formatAge(t.created_at)}</span>
|
||||
{t.last_used_at && (
|
||||
<span>Last used {formatAge(t.last_used_at)}</span>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user