diff --git a/.gitea/scripts/sop-checklist-gate.py b/.gitea/scripts/sop-checklist-gate.py old mode 100755 new mode 100644 diff --git a/.gitea/workflows/audit-force-merge.yml b/.gitea/workflows/audit-force-merge.yml index b3441bca..218d0e0b 100644 --- a/.gitea/workflows/audit-force-merge.yml +++ b/.gitea/workflows/audit-force-merge.yml @@ -52,7 +52,10 @@ jobs: # Declared here rather than fetched from /branch_protections # because that endpoint requires admin write — sop-tier-bot is # read-only by design (least-privilege). + # + # staging branch protection (§F3a/F3b, mc#798): only + # sop-checklist / all-items-acked is required. Unlike main, + # staging does not require sop-tier-check or Secret scan. REQUIRED_CHECKS: | - CI / all-required (pull_request) sop-checklist / all-items-acked (pull_request) run: bash .gitea/scripts/audit-force-merge.sh diff --git a/.gitea/workflows/publish-workspace-server-image.yml b/.gitea/workflows/publish-workspace-server-image.yml index c73b9dd0..057b9462 100644 --- a/.gitea/workflows/publish-workspace-server-image.yml +++ b/.gitea/workflows/publish-workspace-server-image.yml @@ -18,21 +18,8 @@ name: publish-workspace-server-image # :staging- — per-commit digest, stable for canary verify # :staging-latest — tracks most recent build on this branch # -# Production auto-deploy: -# After both platform and tenant images are pushed, deploy-production waits -# for strict required push contexts on the same SHA to go green, then -# calls the production CP redeploy-fleet endpoint with target_tag= -# staging-. Set repo variable or secret PROD_AUTO_DEPLOY_DISABLED=true -# to stop production rollout while keeping image publishing enabled. -# # ECR target: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/* # Required secrets: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AUTO_SYNC_TOKEN -# -# mc#711: Docker daemon not accessible on ubuntu-latest runner (molecule-canonical-1 -# shows client-only in `docker info` — daemon not running). DinD mount is present but -# daemon doesn't respond. Fix: add diagnostic step showing socket info so ops can -# identify which runners have a live daemon. If no daemon is available, the job -# fails fast with actionable output rather than silent deep failure. on: push: @@ -45,10 +32,15 @@ on: - '.gitea/workflows/publish-workspace-server-image.yml' workflow_dispatch: -# No `concurrency:` block here. Gitea 1.22.6 can cancel queued runs despite -# `cancel-in-progress: false`; that is not acceptable for a workflow with a -# production deploy job. Per-SHA image tags are immutable, and staging-latest is -# best-effort last-writer-wins metadata. +# Serialize per-branch so two rapid main pushes don't race the same +# :staging-latest tag retag. Allow parallel runs as they produce +# different :staging- tags and last-write-wins on :staging-latest. +# +# cancel-in-progress: false → in-flight builds finish; the next push's +# build queues. This avoids a partially-pushed image. +concurrency: + group: publish-workspace-server-image-${{ github.ref }} + cancel-in-progress: false permissions: contents: read @@ -65,20 +57,23 @@ jobs: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - name: Diagnose Docker daemon access + # Health check: verify Docker daemon is accessible before attempting any + # build steps. This fails loudly at step 1 when the runner's docker.sock + # is inaccessible (e.g. permission change, daemon restart, or group-membership + # drift) rather than silently continuing to step 2 where `docker build` + # fails deep in the process with a cryptic ECR auth error that doesn't + # surface the root cause. Also reports the daemon version so operator + # can correlate with runner host logs. + - name: Verify Docker daemon access run: | set -euo pipefail - echo "::group::Docker daemon diagnosis" - echo "Runner: ${HOSTNAME:-unknown}" - echo "--- Socket info ---" - ls -la /var/run/docker.sock 2>/dev/null || echo "/var/run/docker.sock: not found" - stat /var/run/docker.sock 2>/dev/null || true - echo "--- User info ---" - id - echo "--- docker version ---" - docker version 2>&1 || true - echo "--- docker info (full) ---" - docker info 2>&1 || echo "docker info failed: exit $?" + echo "::group::Docker daemon health check" + docker info 2>&1 | head -5 || { + echo "::error::Docker daemon is not accessible at /var/run/docker.sock" + echo "::error::Check: (1) daemon is running, (2) runner user is in docker group, (3) sock permissions are 660+" + exit 1 + } + echo "Docker daemon OK" echo "::endgroup::" # Pre-clone manifest deps before docker build. @@ -97,12 +92,13 @@ jobs: MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }} run: | set -euo pipefail + if [ -z "${MOLECULE_GITEA_TOKEN}" ]; then + echo "::error::AUTO_SYNC_TOKEN secret is empty" + exit 1 + fi mkdir -p .tenant-bundle-deps - # Strip JSON5 comments before jq parsing — Integration Tester appends - # `// Triggered by ...` which breaks `jq` in clone-manifest.sh. - sed '/^[[:space:]]*\/\//d' manifest.json > .manifest-stripped.json bash scripts/clone-manifest.sh \ - .manifest-stripped.json \ + manifest.json \ .tenant-bundle-deps/workspace-configs-templates \ .tenant-bundle-deps/org-templates \ .tenant-bundle-deps/plugins @@ -119,11 +115,6 @@ jobs: # Build + push platform image (inline ECR auth — mirrors the operator-host # approach; credentials come from GITHUB_SECRET_AWS_ACCESS_KEY_ID / # GITHUB_SECRET_AWS_SECRET_ACCESS_KEY in Gitea Actions). - # docker buildx bake / build required for `imagetools inspect` digest - # capture in the CP pin-update step (RFC internal#229 §X step 4 PR-1). - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - - name: Build & push platform image to ECR (staging- + staging-latest) env: IMAGE_NAME: ${{ env.IMAGE_NAME }} @@ -139,16 +130,17 @@ jobs: ECR_REGISTRY="${IMAGE_NAME%%/*}" aws ecr get-login-password --region us-east-2 | \ docker login --username AWS --password-stdin "${ECR_REGISTRY}" - docker buildx build \ + docker build \ --file ./workspace-server/Dockerfile \ --build-arg GIT_SHA="${GIT_SHA}" \ - --label "org.opencontainers.image.source=https://git.moleculesai.app/molecule-ai/${REPO}" \ + --label "org.opencontainers.image.source=https://github.com/${REPO}" \ --label "org.opencontainers.image.revision=${GIT_SHA}" \ - --label "org.opencontainers.image.created=$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - --label "molecule.workflow.run_id=${GITHUB_RUN_ID}" \ + --label "org.opencontainers.image.description=Molecule AI platform — pending canary verify" \ --tag "${IMAGE_NAME}:${TAG_SHA}" \ --tag "${IMAGE_NAME}:${TAG_LATEST}" \ - --push . + . + docker push "${IMAGE_NAME}:${TAG_SHA}" + docker push "${IMAGE_NAME}:${TAG_LATEST}" # Build + push tenant image (Go platform + Next.js canvas in one image). - name: Build & push tenant image to ECR (staging- + staging-latest) @@ -166,184 +158,15 @@ jobs: ECR_REGISTRY="${TENANT_IMAGE_NAME%%/*}" aws ecr get-login-password --region us-east-2 | \ docker login --username AWS --password-stdin "${ECR_REGISTRY}" - docker buildx build \ + docker build \ --file ./workspace-server/Dockerfile.tenant \ --build-arg NEXT_PUBLIC_PLATFORM_URL= \ --build-arg GIT_SHA="${GIT_SHA}" \ - --label "org.opencontainers.image.source=https://git.moleculesai.app/molecule-ai/${REPO}" \ + --label "org.opencontainers.image.source=https://github.com/${REPO}" \ --label "org.opencontainers.image.revision=${GIT_SHA}" \ - --label "org.opencontainers.image.created=$(date -u +%Y-%m-%dT%H:%M:%SZ)" \ - --label "molecule.workflow.run_id=${GITHUB_RUN_ID}" \ + --label "org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify" \ --tag "${TENANT_IMAGE_NAME}:${TAG_SHA}" \ --tag "${TENANT_IMAGE_NAME}:${TAG_LATEST}" \ - --push . - - # bp-exempt: production deploy side-effect; merge is gated by CI / all-required and this job waits for push CI before acting. - deploy-production: - name: Production auto-deploy - needs: build-and-push - if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} - runs-on: ubuntu-latest - timeout-minutes: 75 - env: - CP_URL: ${{ vars.PROD_CP_URL || 'https://api.moleculesai.app' }} - CP_ADMIN_API_TOKEN: ${{ secrets.CP_ADMIN_API_TOKEN }} - GITEA_HOST: git.moleculesai.app - GITEA_TOKEN: ${{ secrets.PROD_AUTO_DEPLOY_CONTROL_TOKEN || secrets.AUTO_SYNC_TOKEN }} - PROD_AUTO_DEPLOY_DISABLED: ${{ vars.PROD_AUTO_DEPLOY_DISABLED || secrets.PROD_AUTO_DEPLOY_DISABLED || '' }} - PROD_AUTO_DEPLOY_CANARY_SLUG: ${{ vars.PROD_AUTO_DEPLOY_CANARY_SLUG || 'hongming' }} - PROD_AUTO_DEPLOY_SOAK_SECONDS: ${{ vars.PROD_AUTO_DEPLOY_SOAK_SECONDS || '60' }} - PROD_AUTO_DEPLOY_BATCH_SIZE: ${{ vars.PROD_AUTO_DEPLOY_BATCH_SIZE || '3' }} - PROD_AUTO_DEPLOY_DRY_RUN: ${{ vars.PROD_AUTO_DEPLOY_DRY_RUN || '' }} - PROD_ALLOW_NON_PROD_CP_URL: ${{ vars.PROD_ALLOW_NON_PROD_CP_URL || '' }} - steps: - - name: Checkout - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - - name: Build deploy plan - id: plan - run: | - set -euo pipefail - python3 .gitea/scripts/prod-auto-deploy.py plan > "$RUNNER_TEMP/prod-auto-deploy-plan.json" - jq . "$RUNNER_TEMP/prod-auto-deploy-plan.json" - enabled="$(jq -r '.enabled' "$RUNNER_TEMP/prod-auto-deploy-plan.json")" - echo "enabled=$enabled" >> "$GITHUB_OUTPUT" - if [ "$enabled" != "true" ]; then - reason="$(jq -r '.disabled_reason' "$RUNNER_TEMP/prod-auto-deploy-plan.json")" - echo "::notice::Production auto-deploy disabled: $reason" - { - echo "## Production auto-deploy skipped" - echo "" - echo "Reason: \`$reason\`" - } >> "$GITHUB_STEP_SUMMARY" - exit 0 - fi - if [ -z "${CP_ADMIN_API_TOKEN:-}" ]; then - echo "::error::CP_ADMIN_API_TOKEN secret is required for production auto-deploy." - exit 1 - fi - if [ -z "${GITEA_TOKEN:-}" ]; then - echo "::error::AUTO_SYNC_TOKEN secret is required so production deploy can wait for green CI." - exit 1 - fi - - - name: Self-test production deploy helper - if: ${{ steps.plan.outputs.enabled == 'true' }} - run: | - set -euo pipefail - python3 -m pip install --quiet 'pytest==9.0.2' 'PyYAML==6.0.2' - python3 -m pytest .gitea/scripts/tests/test_prod_auto_deploy.py -q - python3 .gitea/scripts/lint-workflow-yaml.py --workflow-dir .gitea/workflows - - - name: Wait for green main CI on this SHA - if: ${{ steps.plan.outputs.enabled == 'true' }} - run: | - set -euo pipefail - python3 .gitea/scripts/prod-auto-deploy.py wait-ci - - - name: Call production CP redeploy-fleet - if: ${{ steps.plan.outputs.enabled == 'true' }} - run: | - set -euo pipefail - python3 .gitea/scripts/prod-auto-deploy.py assert-enabled - PLAN="$RUNNER_TEMP/prod-auto-deploy-plan.json" - TARGET_TAG="$(jq -r '.target_tag' "$PLAN")" - BODY="$(jq -c '.body' "$PLAN")" - - echo "POST $CP_URL/cp/admin/tenants/redeploy-fleet" - echo " target_tag: $TARGET_TAG" - echo " body: $BODY" - - HTTP_RESPONSE="$RUNNER_TEMP/prod-redeploy-response.json" - HTTP_CODE_FILE="$RUNNER_TEMP/prod-redeploy-http-code.txt" - set +e - curl -sS -o "$HTTP_RESPONSE" -w '%{http_code}' \ - -m 1200 \ - -H "Authorization: Bearer $CP_ADMIN_API_TOKEN" \ - -H "Content-Type: application/json" \ - -X POST "$CP_URL/cp/admin/tenants/redeploy-fleet" \ - -d "$BODY" > "$HTTP_CODE_FILE" - set -e - - HTTP_CODE="$(cat "$HTTP_CODE_FILE" 2>/dev/null || echo "000")" - [ -z "$HTTP_CODE" ] && HTTP_CODE="000" - echo "HTTP $HTTP_CODE" - jq '{ok, result_count: (.results // [] | length)}' "$HTTP_RESPONSE" || true - - { - echo "## Production auto-deploy" - echo "" - echo "**Commit:** \`${GITHUB_SHA:0:7}\`" - echo "**Target tag:** \`$TARGET_TAG\`" - echo "**HTTP:** $HTTP_CODE" - echo "" - echo "### Per-tenant result" - echo "" - echo "| Slug | Phase | SSM Status | Exit | Healthz | Error present |" - echo "|------|-------|------------|------|---------|---------------|" - jq -r '.results[]? | "| \(.slug) | \(.phase) | \(.ssm_status // "-") | \(.ssm_exit_code) | \(.healthz_ok) | \((.error // "") != "") |"' "$HTTP_RESPONSE" || true - } >> "$GITHUB_STEP_SUMMARY" - - if [ "$HTTP_CODE" != "200" ]; then - echo "::error::redeploy-fleet returned HTTP $HTTP_CODE" - exit 1 - fi - OK="$(jq -r '.ok' "$HTTP_RESPONSE")" - if [ "$OK" != "true" ]; then - echo "::error::redeploy-fleet reported ok=false; production rollout halted." - exit 1 - fi - - - name: Verify reachable tenants report this SHA - if: ${{ steps.plan.outputs.enabled == 'true' }} - env: - TENANT_DOMAIN: moleculesai.app - run: | - set -euo pipefail - RESP="$RUNNER_TEMP/prod-redeploy-response.json" - mapfile -t SLUGS < <(jq -r '.results[]? | .slug' "$RESP") - if [ ${#SLUGS[@]} -eq 0 ]; then - echo "::error::No tenants returned from redeploy-fleet; refusing to mark production deploy verified." - exit 1 - fi - - STALE_COUNT=0 - UNREACHABLE_COUNT=0 - UNHEALTHY_COUNT=0 - for slug in "${SLUGS[@]}"; do - healthz_ok="$(jq -r --arg slug "$slug" '.results[]? | select(.slug == $slug) | .healthz_ok' "$RESP" | tail -1)" - if [ "$healthz_ok" != "true" ]; then - echo "::error::$slug did not report healthz_ok=true in redeploy-fleet response." - UNHEALTHY_COUNT=$((UNHEALTHY_COUNT + 1)) - continue - fi - url="https://${slug}.${TENANT_DOMAIN}/buildinfo" - body="$(curl -sS --max-time 30 --retry 3 --retry-delay 5 --retry-connrefused "$url" || true)" - actual="$(echo "$body" | jq -r '.git_sha // ""' 2>/dev/null || echo "")" - if [ -z "$actual" ]; then - echo "::error::$slug did not return /buildinfo after deploy." - UNREACHABLE_COUNT=$((UNREACHABLE_COUNT + 1)) - continue - fi - if [ "$actual" != "$GITHUB_SHA" ]; then - echo "::error::$slug is stale: actual=${actual:0:7}, expected=${GITHUB_SHA:0:7}" - STALE_COUNT=$((STALE_COUNT + 1)) - else - echo "$slug: ${actual:0:7}" - fi - done - - { - echo "" - echo "### Buildinfo verification" - echo "" - echo "Expected SHA: \`${GITHUB_SHA:0:7}\`" - echo "Verified tenants: ${#SLUGS[@]}" - echo "Stale tenants: $STALE_COUNT" - echo "Unhealthy tenants: $UNHEALTHY_COUNT" - echo "Unreachable tenants: $UNREACHABLE_COUNT" - } >> "$GITHUB_STEP_SUMMARY" - - if [ "$STALE_COUNT" -gt 0 ] || [ "$UNHEALTHY_COUNT" -gt 0 ] || [ "$UNREACHABLE_COUNT" -gt 0 ]; then - exit 1 - fi + . + docker push "${TENANT_IMAGE_NAME}:${TAG_SHA}" + docker push "${TENANT_IMAGE_NAME}:${TAG_LATEST}" diff --git a/.gitea/workflows/sop-tier-check.yml b/.gitea/workflows/sop-tier-check.yml index 235ed633..d3f7aefb 100644 --- a/.gitea/workflows/sop-tier-check.yml +++ b/.gitea/workflows/sop-tier-check.yml @@ -28,16 +28,15 @@ # # Environment variables: # SOP_DEBUG=1 — per-API-call diagnostic lines. Default: off. -# SOP_LEGACY_CHECK=1 — revert to OR-gate for this run. Intended for -# emergency use only; burn-in window closed -# 2026-05-17 (internal#189 Phase 1). +# SOP_LEGACY_CHECK=1 — revert to OR-gate for this run. Grace window +# for PRs in-flight when AND-composition deployed. +# Burn-in: remove after 2026-05-17 (7-day window). # -# BURN-IN CLOSED 2026-05-17 (internal#189 Phase 1): The 7-day burn-in -# window closed. continue-on-error: true has been removed from the -# tier-check job; AND-composition is now fully enforced. If you need -# to temporarily re-introduce a mask, file a tracker and follow the -# mc#774 protocol (Tier 2e lint requires a current tracker within -# 2 lines of any continue-on-error: true). +# BURN-IN NOTE (internal#189 Phase 1): continue-on-error: true is set on +# the tier-check job below. This prevents AND-composition from blocking +# PRs during the 7-day burn-in. After 2026-05-17: +# 1. Remove `continue-on-error: true` from this job block. +# 2. Update this BURN-IN NOTE comment to mark the window closed. name: sop-tier-check @@ -64,6 +63,9 @@ on: jobs: tier-check: runs-on: ubuntu-latest + # BURN-IN: continue-on-error prevents AND-composition from blocking + # PRs during the 7-day window. Remove after 2026-05-17 (internal#189). + continue-on-error: true permissions: contents: read pull-requests: read @@ -87,7 +89,6 @@ jobs: # runners). The sop-tier-check script has its own fallback as a # third line of defense. continue-on-error: true ensures this step # failing does not block the job. - # mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently. continue-on-error: true run: | # apt-get is the primary method — Ubuntu package mirrors are reliably @@ -108,7 +109,6 @@ jobs: # continue-on-error: true at step level — job-level is ignored by Gitea # Actions (quirk #10, internal runbooks). Belt-and-suspenders with # SOP_FAIL_OPEN=1 + || true below. - # mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently. continue-on-error: true env: GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }} diff --git a/canvas/src/components/CommunicationOverlay.tsx b/canvas/src/components/CommunicationOverlay.tsx index 11198d21..88aab5af 100644 --- a/canvas/src/components/CommunicationOverlay.tsx +++ b/canvas/src/components/CommunicationOverlay.tsx @@ -226,7 +226,7 @@ export function CommunicationOverlay() { type="button" onClick={() => setVisible(false)} aria-label="Close communications panel" - className="text-ink-mid hover:text-ink-mid text-xs focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1 focus-visible:ring-offset-surface" + className="text-ink-mid hover:text-ink-mid text-xs focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1 focus-visible:ring-offset-surface rounded" > diff --git a/canvas/src/components/ConversationTraceModal.tsx b/canvas/src/components/ConversationTraceModal.tsx index 4bf3a9d4..7789b4c1 100644 --- a/canvas/src/components/ConversationTraceModal.tsx +++ b/canvas/src/components/ConversationTraceModal.tsx @@ -115,7 +115,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos diff --git a/canvas/src/components/ExternalConnectModal.tsx b/canvas/src/components/ExternalConnectModal.tsx index 14de5d1c..3c4ad33d 100644 --- a/canvas/src/components/ExternalConnectModal.tsx +++ b/canvas/src/components/ExternalConnectModal.tsx @@ -18,7 +18,110 @@ import { useCallback, useState } from "react"; import * as Dialog from "@radix-ui/react-dialog"; -type Tab = "python" | "curl" | "claude" | "mcp" | "hermes" | "codex" | "openclaw" | "kimi" | "fields"; +// ─── Pure fill helpers ──────────────────────────────────────────────────────── +// Each snippet is server-stamped with workspace_id + platform_url but leaves +// AUTH_TOKEN as a placeholder. These helpers stamp the real token in so the +// operator's copy-paste is truly ready-to-run. All are pure string ops. + +export function fillPythonSnippet( + snippet: string, + authToken: string, +): string { + return snippet.replace( + 'AUTH_TOKEN = ""', + `AUTH_TOKEN = "${authToken}"`, + ); +} + +export function fillCurlSnippet( + snippet: string, + authToken: string, +): string { + return snippet.replace( + 'WORKSPACE_AUTH_TOKEN=""', + `WORKSPACE_AUTH_TOKEN="${authToken}"`, + ); +} + +export function fillChannelSnippet( + snippet: string | undefined, + authToken: string, +): string | undefined { + return snippet?.replace( + 'MOLECULE_WORKSPACE_TOKENS=', + `MOLECULE_WORKSPACE_TOKENS=${authToken}`, + ); +} + +export function fillUniversalMcpSnippet( + snippet: string | undefined, + authToken: string, +): string | undefined { + return snippet?.replace( + 'MOLECULE_WORKSPACE_TOKEN=""', + `MOLECULE_WORKSPACE_TOKEN="${authToken}"`, + ); +} + +export function fillHermesSnippet( + snippet: string | undefined, + authToken: string, +): string | undefined { + return snippet?.replace( + 'MOLECULE_WORKSPACE_TOKEN=""', + `MOLECULE_WORKSPACE_TOKEN="${authToken}"`, + ); +} + +export function fillCodexSnippet( + snippet: string | undefined, + authToken: string, +): string | undefined { + return snippet?.replace( + 'MOLECULE_WORKSPACE_TOKEN = ""', + `MOLECULE_WORKSPACE_TOKEN = "${authToken}"`, + ); +} + +export function fillOpenClawSnippet( + snippet: string | undefined, + authToken: string, +): string | undefined { + return snippet?.replace( + 'WORKSPACE_TOKEN=""', + `WORKSPACE_TOKEN="${authToken}"`, + ); +} + +/** Build the ordered tab list shown in the modal. Each tab only appears when + * the platform supplies the corresponding snippet. */ +export function buildTabOrder(info: ExternalConnectionInfo): Tab[] { + const tabs: Tab[] = []; + const { filledUniversalMcp, filledChannel, filledHermes, filledCodex, filledOpenClaw } = buildFilledSnippets(info); + if (filledUniversalMcp) tabs.push("mcp"); + tabs.push("python"); + if (filledChannel) tabs.push("claude"); + if (filledHermes) tabs.push("hermes"); + if (filledCodex) tabs.push("codex"); + if (filledOpenClaw) tabs.push("openclaw"); + tabs.push("curl", "fields"); + return tabs; +} + +/** Pre-fill all snippets from an info object. Exposed for testing. */ +export function buildFilledSnippets(info: ExternalConnectionInfo) { + return { + filledPython: fillPythonSnippet(info.python_snippet, info.auth_token), + filledCurl: fillCurlSnippet(info.curl_register_template, info.auth_token), + filledChannel: fillChannelSnippet(info.claude_code_channel_snippet, info.auth_token), + filledUniversalMcp: fillUniversalMcpSnippet(info.universal_mcp_snippet, info.auth_token), + filledHermes: fillHermesSnippet(info.hermes_channel_snippet, info.auth_token), + filledCodex: fillCodexSnippet(info.codex_snippet, info.auth_token), + filledOpenClaw: fillOpenClawSnippet(info.openclaw_snippet, info.auth_token), + }; +} + +type Tab = "python" | "curl" | "claude" | "mcp" | "hermes" | "codex" | "openclaw" | "fields"; export interface ExternalConnectionInfo { workspace_id: string; @@ -58,10 +161,6 @@ export interface ExternalConnectionInfo { // openclaw gateway on loopback. Outbound-tools-only today; push // parity on an external openclaw needs a sessions.steer bridge. openclaw_snippet?: string; - // Kimi CLI setup snippet — self-contained Python heartbeat script - // that keeps a Kimi workspace online in poll mode. Optional for - // backward compat with platforms that haven't shipped the Kimi tab. - kimi_snippet?: string; } interface Props { @@ -106,59 +205,7 @@ export function ExternalConnectModal({ info, onClose }: Props) { if (!info) return null; - // Python snippet is stamped server-side with workspace_id + - // platform_url but leaves AUTH_TOKEN as a "" placeholder - // (that's what we're showing in the modal). Fill in the real - // token here so the snippet the operator copies is truly ready-to-run. - const filledPython = info.python_snippet.replace( - 'AUTH_TOKEN = ""', - `AUTH_TOKEN = "${info.auth_token}"`, - ); - const filledCurl = info.curl_register_template.replace( - 'WORKSPACE_AUTH_TOKEN=""', - `WORKSPACE_AUTH_TOKEN="${info.auth_token}"`, - ); - // The channel snippet asks the operator to paste the auth_token into - // the .env file's MOLECULE_WORKSPACE_TOKENS field. Stamp it server-side - // here so the copy-paste-block is truly ready-to-run. - const filledChannel = info.claude_code_channel_snippet?.replace( - 'MOLECULE_WORKSPACE_TOKENS=', - `MOLECULE_WORKSPACE_TOKENS=${info.auth_token}`, - ); - // Universal MCP snippet uses MOLECULE_WORKSPACE_TOKEN as the env-var - // name passed through to molecule-mcp via `claude mcp add ... -- env - // MOLECULE_WORKSPACE_TOKEN=...`. The placeholder must match the - // template's literal — pre-2026-04-30 polish this looked for - // WORKSPACE_AUTH_TOKEN (carryover from the curl tab), which silently - // skipped the substitution and left "" - // visible in the operator's clipboard. - const filledUniversalMcp = info.universal_mcp_snippet?.replace( - 'MOLECULE_WORKSPACE_TOKEN=""', - `MOLECULE_WORKSPACE_TOKEN="${info.auth_token}"`, - ); - // Hermes channel snippet uses MOLECULE_WORKSPACE_TOKEN (same env-var - // name as Universal MCP). Stamp the auth_token in so the operator's - // copy-paste is fully ready-to-run. - const filledHermes = info.hermes_channel_snippet?.replace( - 'MOLECULE_WORKSPACE_TOKEN=""', - `MOLECULE_WORKSPACE_TOKEN="${info.auth_token}"`, - ); - // Codex + OpenClaw snippets carry the placeholder inside the - // generated config block (TOML / JSON respectively). Stamp the - // token in so the copy-paste is one less manual edit. - const filledCodex = info.codex_snippet?.replace( - 'MOLECULE_WORKSPACE_TOKEN = ""', - `MOLECULE_WORKSPACE_TOKEN = "${info.auth_token}"`, - ); - const filledOpenClaw = info.openclaw_snippet?.replace( - 'WORKSPACE_TOKEN=""', - `WORKSPACE_TOKEN="${info.auth_token}"`, - ); - // Kimi snippet carries the placeholder inside the shell heredoc. - const filledKimi = info.kimi_snippet?.replace( - 'MOLECULE_WORKSPACE_TOKEN=', - `MOLECULE_WORKSPACE_TOKEN=${info.auth_token}`, - ); + const { filledPython, filledCurl, filledChannel, filledUniversalMcp, filledHermes, filledCodex, filledOpenClaw } = buildFilledSnippets(info); return ( !o && onClose()}> @@ -180,28 +227,7 @@ export function ExternalConnectModal({ info, onClose }: Props) { aria-label="Connection snippet format" className="mt-4 flex gap-1 border-b border-line" > - {(() => { - // Build the tab order dynamically. Claude Code first - // (when offered) since it's the simplest setup; Python - // SDK second (full register+heartbeat+inbound); Universal - // MCP third (any MCP-aware runtime, outbound-only); curl - // for one-shot register; Fields for raw values. - // Tab order: Universal MCP first (default, runtime- - // agnostic primitives), then runtime-specific channel/ - // SDK tabs, then curl + Fields. Each runtime tab only - // appears when the platform supplies the snippet — no - // dead "tab missing snippet" UX. - const tabs: Tab[] = []; - if (filledUniversalMcp) tabs.push("mcp"); - tabs.push("python"); - if (filledChannel) tabs.push("claude"); - if (filledHermes) tabs.push("hermes"); - if (filledCodex) tabs.push("codex"); - if (filledOpenClaw) tabs.push("openclaw"); - if (filledKimi) tabs.push("kimi"); - tabs.push("curl", "fields"); - return tabs; - })().map((t) => ( + {buildTabOrder(info).map((t) => ( @@ -397,7 +412,7 @@ function Field({ type="button" onClick={onCopy} disabled={!value} - className="text-xs px-2 py-1 rounded bg-surface-card hover:bg-surface-card text-ink disabled:opacity-40 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1" + className="text-xs px-2 py-1 rounded bg-surface-card hover:bg-surface-card text-ink disabled:opacity-40 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1 focus-visible:ring-offset-surface" > {copied ? "Copied!" : "Copy"} diff --git a/canvas/src/components/MemoryInspectorPanel.tsx b/canvas/src/components/MemoryInspectorPanel.tsx index 6655ad37..42b83fd8 100644 --- a/canvas/src/components/MemoryInspectorPanel.tsx +++ b/canvas/src/components/MemoryInspectorPanel.tsx @@ -360,7 +360,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { setDebouncedQuery(''); }} aria-label="Clear search" - className="absolute right-2 text-ink-mid hover:text-ink transition-colors text-sm leading-none focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1" + className="absolute right-2 text-ink-mid hover:text-ink transition-colors text-sm leading-none focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1 focus-visible:ring-offset-surface rounded" > × @@ -381,7 +381,7 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { type="button" onClick={loadEntries} disabled={pluginUnavailable} - className="px-2 py-1 text-[11px] bg-surface-card hover:bg-surface-card text-ink-mid rounded transition-colors disabled:opacity-50 disabled:cursor-not-allowed focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1" + className="px-2 py-1 text-[11px] bg-surface-card hover:bg-surface-card text-ink-mid rounded transition-colors disabled:opacity-50 disabled:cursor-not-allowed focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1 focus-visible:ring-offset-surface" aria-label="Refresh memories" > ↻ Refresh @@ -515,7 +515,7 @@ function MemoryEntryRow({ entry, onDelete }: MemoryEntryRowProps) { {/* Header row */} diff --git a/canvas/src/components/MissingKeysModal.tsx b/canvas/src/components/MissingKeysModal.tsx index c9dbc90d..850f656c 100644 --- a/canvas/src/components/MissingKeysModal.tsx +++ b/canvas/src/components/MissingKeysModal.tsx @@ -631,8 +631,9 @@ function AllKeysModal({ // React's commit ordering.