diff --git a/.ci-trigger/RERUN b/.ci-trigger/RERUN new file mode 100644 index 00000000..fe4398c1 --- /dev/null +++ b/.ci-trigger/RERUN @@ -0,0 +1 @@ +CI re-trigger at Tue Apr 21 15:40:21 UTC 2026\n \ No newline at end of file diff --git a/.github/workflows/canary-staging.yml b/.github/workflows/canary-staging.yml new file mode 100644 index 00000000..32cba939 --- /dev/null +++ b/.github/workflows/canary-staging.yml @@ -0,0 +1,153 @@ +name: Canary — staging SaaS smoke (every 30 min) + +# Minimum viable health check: provisions one Hermes workspace on a fresh +# staging org, sends one A2A message, verifies PONG, tears down. ~8 min +# wall clock. Pages on failure by opening a GitHub issue; auto-closes the +# issue on the next green run. +# +# The full-SaaS workflow (e2e-staging-saas.yml) covers the broader surface +# but runs only on provisioning-critical pushes + nightly — this one +# catches drift in the 30-min window between those runs (AMI health, CF +# cert rotation, WorkOS session stability, etc.). +# +# Lean mode: E2E_MODE=canary skips the child workspace + HMA memory + +# peers/activity checks. One parent workspace + one A2A turn is enough +# to signal "SaaS stack end-to-end is alive." + +on: + schedule: + # Every 30 min. Cron on GitHub-hosted runners has a known drift of + # a few minutes under load — that's fine for a canary. + - cron: '*/30 * * * *' + workflow_dispatch: + +# Serialise with the full-SaaS workflow so they don't contend for the +# same org-create quota on staging. Different group key from +# e2e-staging-saas since we don't mind queueing canaries behind one +# full run, but two canaries SHOULD queue against each other. +concurrency: + group: canary-staging + cancel-in-progress: false + +permissions: + # Needed to open / close the alerting issue. + issues: write + contents: read + +jobs: + canary: + name: Canary smoke + runs-on: ubuntu-latest + timeout-minutes: 15 + + env: + MOLECULE_CP_URL: https://staging-api.moleculesai.app + MOLECULE_ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + E2E_MODE: canary + E2E_RUNTIME: hermes + E2E_RUN_ID: "canary-${{ github.run_id }}" + + steps: + - uses: actions/checkout@v4 + + - name: Verify admin token present + run: | + if [ -z "$MOLECULE_ADMIN_TOKEN" ]; then + echo "::error::MOLECULE_STAGING_ADMIN_TOKEN not set" + exit 2 + fi + + - name: Canary run + id: canary + run: bash tests/e2e/test_staging_full_saas.sh + + # Alerting: open an issue on first failure, auto-close on recovery. + # Title includes a stable marker so multiple consecutive failures + # don't spam — they just add comments to the existing issue. + - name: Open issue on failure + if: failure() + uses: actions/github-script@v7 + with: + script: | + const title = '🔴 Canary failing: staging SaaS smoke'; + const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; + const body = + `Canary run failed at ${new Date().toISOString()}.\n\n` + + `Run: ${runURL}\n\n` + + `This issue auto-closes on the next green canary run. ` + + `Consecutive failures add a comment here rather than a new issue.`; + + // Find an existing open canary issue (stable title match). + const { data: existing } = await github.rest.issues.listForRepo({ + owner: context.repo.owner, repo: context.repo.repo, + state: 'open', labels: 'canary-staging', + per_page: 10, + }); + const match = existing.find(i => i.title === title); + + if (match) { + await github.rest.issues.createComment({ + owner: context.repo.owner, repo: context.repo.repo, + issue_number: match.number, + body: `Canary still failing. ${runURL}`, + }); + core.info(`Commented on existing issue #${match.number}`); + } else { + await github.rest.issues.create({ + owner: context.repo.owner, repo: context.repo.repo, + title, body, + labels: ['canary-staging', 'bug'], + }); + core.info('Opened new canary failure issue'); + } + + - name: Auto-close canary issue on success + if: success() + uses: actions/github-script@v7 + with: + script: | + const title = '🔴 Canary failing: staging SaaS smoke'; + const { data: open } = await github.rest.issues.listForRepo({ + owner: context.repo.owner, repo: context.repo.repo, + state: 'open', labels: 'canary-staging', + per_page: 10, + }); + const match = open.find(i => i.title === title); + if (match) { + await github.rest.issues.createComment({ + owner: context.repo.owner, repo: context.repo.repo, + issue_number: match.number, + body: `Canary recovered at ${new Date().toISOString()}. Closing.`, + }); + await github.rest.issues.update({ + owner: context.repo.owner, repo: context.repo.repo, + issue_number: match.number, + state: 'closed', + }); + core.info(`Closed recovered canary issue #${match.number}`); + } + + - name: Teardown safety net + if: always() + env: + ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + run: | + set +e + orgs=$(curl -sS "$MOLECULE_CP_URL/cp/admin/orgs" \ + -H "Authorization: Bearer $ADMIN_TOKEN" 2>/dev/null \ + | python3 -c " + import json, sys + d = json.load(sys.stdin) + today = __import__('datetime').date.today().strftime('%Y%m%d') + candidates = [o['slug'] for o in d.get('orgs', []) + if o.get('slug','').startswith(f'e2e-{today}-canary-') + and o.get('status') not in ('purged',)] + print('\n'.join(candidates)) + " 2>/dev/null) + for slug in $orgs; do + curl -sS -X DELETE "$MOLECULE_CP_URL/cp/admin/tenants/$slug" \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d "{\"confirm\":\"$slug\"}" >/dev/null || true + done + exit 0 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd285434..a8e06781 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,13 +6,21 @@ on: pull_request: branches: [main, staging] +# Cancel in-progress CI runs when a new commit arrives on the same ref. +# This prevents multiple stale runs from queuing behind each other and +# monopolising the self-hosted macOS arm64 runner. +concurrency: + group: ci-${{ github.ref }} + cancel-in-progress: true + jobs: # Detect which paths changed so downstream jobs can skip when only - # docs/markdown files were modified. Uses git diff (no Docker — works - # on macOS self-hosted runners unlike dorny/paths-filter). + # docs/markdown files were modified. Uses plain `git diff` — no macOS + # dependency, so this runs on ubuntu-latest to free the self-hosted + # macOS arm64 runner for jobs that genuinely need it. changes: name: Detect changes - runs-on: [self-hosted, macos, arm64] + runs-on: ubuntu-latest outputs: platform: ${{ steps.check.outputs.platform }} canvas: ${{ steps.check.outputs.canvas }} @@ -24,12 +32,17 @@ jobs: fetch-depth: 0 - id: check run: | - # For push events: diff against previous commit (handles merge commits) - # For PR events: diff against the base branch - if [ "${{ github.event_name }}" = "pull_request" ]; then + # For PR events: diff against the base branch (not HEAD~1 of the branch, + # which may be unrelated after force-pushes). When a push updates a PR, + # both pull_request and push events fire — prefer the PR base so that + # the diff is always computed against the actual merge base, not the + # previous SHA on the branch which may be on a different history line. + BASE="${GITHUB_BASE_REF:-${{ github.event.before }}}" + # GITHUB_BASE_REF is set by GitHub for PR events (the base branch name). + # For pull_request events we use the stored base.sha; for push events + # (or when base.sha is unavailable) fall back to github.event.before. + if [ "${{ github.event_name }}" = "pull_request" ] && [ -n "${{ github.event.pull_request.base.sha }}" ]; then BASE="${{ github.event.pull_request.base.sha }}" - else - BASE="${{ github.event.before }}" fi # Fallback: if BASE is empty or all zeros (new branch), run everything if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$'; then @@ -174,6 +187,8 @@ jobs: needs: changes if: needs.changes.outputs.python == 'true' runs-on: [self-hosted, macos, arm64] + env: + WORKSPACE_ID: test defaults: run: working-directory: workspace diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 02989b4d..a57f1d86 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -23,6 +23,13 @@ on: # Weekly run picks up findings in code that hasn't been touched. - cron: '30 1 * * 0' +# Workflow-level concurrency: only one CodeQL run per branch/PR at a time. +# `cancel-in-progress: false` queues new runs — the 45-min analysis is the +# longest CI occupant and fights the single mac mini runner the hardest. +concurrency: + group: codeql-${{ github.ref }} + cancel-in-progress: false + permissions: actions: read contents: read diff --git a/.github/workflows/e2e-staging-canvas.yml b/.github/workflows/e2e-staging-canvas.yml new file mode 100644 index 00000000..c90794bd --- /dev/null +++ b/.github/workflows/e2e-staging-canvas.yml @@ -0,0 +1,116 @@ +name: E2E Staging Canvas (Playwright) + +# Playwright test suite that provisions a fresh staging org per run and +# verifies every workspace-panel tab renders without crashing. Complements +# e2e-staging-saas.yml (which tests the API shape) by exercising the +# actual browser + canvas bundle against live staging. +# +# Triggers: push to main or PR touching canvas sources + this workflow, +# manual dispatch, and weekly cron to catch browser/runtime drift even +# when canvas is quiet. + +on: + push: + branches: [main] + paths: + - 'canvas/**' + - '.github/workflows/e2e-staging-canvas.yml' + pull_request: + branches: [main] + paths: + - 'canvas/**' + - '.github/workflows/e2e-staging-canvas.yml' + workflow_dispatch: + schedule: + # Weekly on Sunday 08:00 UTC — catches Chrome / Playwright / Next.js + # release-note-shaped regressions that don't ride in with a PR. + - cron: '0 8 * * 0' + +concurrency: + group: e2e-staging-canvas + cancel-in-progress: false + +jobs: + playwright: + name: Canvas tabs E2E + runs-on: ubuntu-latest + timeout-minutes: 40 + + env: + CANVAS_E2E_STAGING: '1' + MOLECULE_CP_URL: https://staging-api.moleculesai.app + MOLECULE_ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + + defaults: + run: + working-directory: canvas + + steps: + - uses: actions/checkout@v4 + + - name: Verify admin token present + run: | + if [ -z "$MOLECULE_ADMIN_TOKEN" ]; then + echo "::error::Missing MOLECULE_STAGING_ADMIN_TOKEN" + exit 2 + fi + + - name: Set up Node + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: canvas/package-lock.json + + - name: Install canvas deps + run: npm ci + + - name: Install Playwright browsers + run: npx playwright install --with-deps chromium + + - name: Run staging canvas E2E + run: npx playwright test --config=playwright.staging.config.ts + + - name: Upload Playwright report on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: playwright-report-staging + path: canvas/playwright-report-staging/ + retention-days: 14 + + - name: Upload screenshots on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: playwright-screenshots + path: canvas/test-results/ + retention-days: 14 + + # Safety-net teardown mirrors the bash-harness workflow — if + # globalTeardown didn't run (worker crash, runner cancel), this + # step sweeps any e2e-canvas-* org tagged with today's date. + - name: Teardown safety net + if: always() + env: + ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + run: | + set +e + orgs=$(curl -sS "$MOLECULE_CP_URL/cp/admin/orgs" \ + -H "Authorization: Bearer $ADMIN_TOKEN" 2>/dev/null \ + | python3 -c " + import json, sys + d = json.load(sys.stdin) + today = __import__('datetime').date.today().strftime('%Y%m%d') + candidates = [o['slug'] for o in d.get('orgs', []) + if o.get('slug','').startswith(f'e2e-canvas-{today}-') + and o.get('status') not in ('purged',)] + print('\n'.join(candidates)) + " 2>/dev/null) + for slug in $orgs; do + curl -sS -X DELETE "$MOLECULE_CP_URL/cp/admin/tenants/$slug" \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d "{\"confirm\":\"$slug\"}" >/dev/null || true + done + exit 0 diff --git a/.github/workflows/e2e-staging-saas.yml b/.github/workflows/e2e-staging-saas.yml new file mode 100644 index 00000000..c1e2b878 --- /dev/null +++ b/.github/workflows/e2e-staging-saas.yml @@ -0,0 +1,161 @@ +name: E2E Staging SaaS (full lifecycle) + +# Dedicated workflow that provisions a fresh staging org per run, exercises +# the full workspace lifecycle (register → heartbeat → A2A → delegation → +# HMA memory → activity → peers), then tears down and asserts leak-free. +# +# Why a separate workflow (not folded into ci.yml): +# - The run takes ~20 min (EC2 boot + cloudflared DNS + provision sweeps + +# agent bootstrap), way too slow for every PR. +# - Needs its own concurrency group so two pushes don't fight over the +# same staging org slug prefix. +# - Has its own required secrets (session cookie, admin token) that most +# PRs don't need to read. +# +# Triggers: +# - Push to main (regression guard) +# - workflow_dispatch (manual re-run from UI) +# - Nightly cron (catches drift even when no pushes land) +# - Changes to any provisioning-critical file under PR review (opt-in +# via the same paths watcher that e2e-api.yml uses) + +on: + push: + branches: [main] + paths: + - 'workspace-server/internal/handlers/registry.go' + - 'workspace-server/internal/handlers/workspace_provision.go' + - 'workspace-server/internal/handlers/a2a_proxy.go' + - 'workspace-server/internal/middleware/**' + - 'workspace-server/internal/provisioner/**' + - 'tests/e2e/test_staging_full_saas.sh' + - '.github/workflows/e2e-staging-saas.yml' + pull_request: + branches: [main] + paths: + - 'workspace-server/internal/handlers/registry.go' + - 'workspace-server/internal/handlers/workspace_provision.go' + - 'workspace-server/internal/handlers/a2a_proxy.go' + - 'workspace-server/internal/middleware/**' + - 'workspace-server/internal/provisioner/**' + - 'tests/e2e/test_staging_full_saas.sh' + - '.github/workflows/e2e-staging-saas.yml' + workflow_dispatch: + inputs: + runtime: + description: "Runtime to test (hermes | claude-code | langgraph)" + required: false + default: "hermes" + keep_org: + description: "Skip teardown for debugging (only use via manual dispatch!)" + required: false + type: boolean + default: false + schedule: + # 07:00 UTC every day — catches AMI drift, WorkOS cert rotation, + # Cloudflare API regressions, etc. even on quiet days. + - cron: '0 7 * * *' + +# Serialize: staging has a finite per-hour org creation quota. Two pushes +# landing in quick succession should queue, not race. `cancel-in-progress: +# false` mirrors e2e-api.yml — GitHub would otherwise cancel the running +# teardown step and leave orphan EC2s. +concurrency: + group: e2e-staging-saas + cancel-in-progress: false + +jobs: + e2e-staging-saas: + name: E2E Staging SaaS + runs-on: ubuntu-latest + timeout-minutes: 30 + permissions: + contents: read + + env: + MOLECULE_CP_URL: https://staging-api.moleculesai.app + # Single admin-bearer secret drives provision + tenant-token + # retrieval + teardown. Configure in + # Settings → Secrets and variables → Actions → Repository secrets. + MOLECULE_ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + # OpenAI key for workspace LLM calls (section 8 A2A). Without it, + # Hermes runtime crashes at boot with "No provider API key found". + # Configure at Settings → Secrets → Actions → MOLECULE_STAGING_OPENAI_KEY. + E2E_OPENAI_API_KEY: ${{ secrets.MOLECULE_STAGING_OPENAI_KEY }} + E2E_RUNTIME: ${{ github.event.inputs.runtime || 'hermes' }} + E2E_RUN_ID: "${{ github.run_id }}-${{ github.run_attempt }}" + E2E_KEEP_ORG: ${{ github.event.inputs.keep_org && '1' || '0' }} + + steps: + - uses: actions/checkout@v4 + + - name: Verify admin token present + run: | + if [ -z "$MOLECULE_ADMIN_TOKEN" ]; then + echo "::error::MOLECULE_STAGING_ADMIN_TOKEN secret not set (Railway staging CP_ADMIN_API_TOKEN)" + exit 2 + fi + echo "Admin token present ✓" + + - name: Verify OpenAI key present + run: | + if [ -z "$E2E_OPENAI_API_KEY" ]; then + echo "::error::MOLECULE_STAGING_OPENAI_KEY secret not set — workspaces will fail at boot with 'No provider API key found'" + exit 2 + fi + echo "OpenAI key present ✓ (len=${#E2E_OPENAI_API_KEY})" + + - name: CP staging health preflight + run: | + code=$(curl -sS -o /dev/null -w "%{http_code}" --max-time 10 "$MOLECULE_CP_URL/health") + if [ "$code" != "200" ]; then + echo "::error::Staging CP unhealthy (got HTTP $code). Skipping — not a workspace bug." + exit 1 + fi + echo "Staging CP healthy ✓" + + - name: Run full-lifecycle E2E + id: e2e + run: bash tests/e2e/test_staging_full_saas.sh + + # Belt-and-braces teardown: the test script itself installs a trap + # for EXIT/INT/TERM, but if the GH runner itself is cancelled (e.g. + # someone pushes a new commit and workflow concurrency is set to + # cancel), the trap may not fire. This `always()` step runs even on + # cancellation and attempts the delete a second time. The admin + # DELETE endpoint is idempotent so double-invoking is safe. + - name: Teardown safety net (runs on cancel/failure) + if: always() + env: + ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + run: | + # Best-effort: find any e2e-YYYYMMDD-* orgs matching this run and + # nuke them. Catches the case where the script died before + # exporting its slug. + set +e + orgs=$(curl -sS "$MOLECULE_CP_URL/cp/admin/orgs" \ + -H "Authorization: Bearer $ADMIN_TOKEN" 2>/dev/null \ + | python3 -c " + import json, sys, os + run_id = os.environ.get('GITHUB_RUN_ID', '') + d = json.load(sys.stdin) + today = __import__('datetime').date.today().strftime('%Y%m%d') + # ONLY sweep slugs from *this* CI run. Previously the filter was + # f'e2e-{today}-' which stomped on parallel CI runs AND any manual + # E2E probes a dev was running against staging (incident 2026-04-21 + # 15:02Z: this workflow's safety net deleted an unrelated manual + # run's tenant 1s after it hit 'running'). + prefix = f'e2e-{today}-{run_id}-' if run_id else f'e2e-{today}-' + candidates = [o['slug'] for o in d.get('orgs', []) + if o.get('slug','').startswith(prefix) + and o.get('instance_status') not in ('purged',)] + print('\n'.join(candidates)) + " 2>/dev/null) + for slug in $orgs; do + echo "Safety-net teardown: $slug" + curl -sS -X DELETE "$MOLECULE_CP_URL/cp/admin/tenants/$slug" \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d "{\"confirm\":\"$slug\"}" >/dev/null || true + done + exit 0 diff --git a/.github/workflows/e2e-staging-sanity.yml b/.github/workflows/e2e-staging-sanity.yml new file mode 100644 index 00000000..6eacac36 --- /dev/null +++ b/.github/workflows/e2e-staging-sanity.yml @@ -0,0 +1,152 @@ +name: E2E Staging Sanity (leak-detection self-check) + +# Periodic assertion that the teardown safety nets in e2e-staging-saas +# and canary-staging actually work. Runs the E2E harness with +# E2E_INTENTIONAL_FAILURE=1, which poisons the tenant admin token after +# the org is provisioned. The workspace-provision step then fails, the +# script exits non-zero, and the EXIT trap + workflow always()-step +# must still tear down cleanly. +# +# A green run means: +# - The script exited non-zero (intentional failure caught) +# - The trap fired teardown +# - The leak-detection poll found zero orphan orgs +# +# A red run means the teardown path itself is broken — act on this the +# same way you'd act on a canary failure (the whole E2E safety net is +# compromised until it's fixed). +# +# Cadence: once a week, Monday 06:00 UTC. Drift-slow, not per-PR — the +# teardown path rarely changes, and a weekly heartbeat is enough to +# catch silent regressions in cleanup code paths. + +on: + schedule: + - cron: '0 6 * * 1' + workflow_dispatch: + +concurrency: + # Shares the group with canary + full so they don't collide on + # staging org-create quota. + group: e2e-staging-sanity + cancel-in-progress: false + +permissions: + issues: write + contents: read + +jobs: + sanity: + name: Intentional-failure teardown sanity + runs-on: ubuntu-latest + timeout-minutes: 20 + + env: + MOLECULE_CP_URL: https://staging-api.moleculesai.app + MOLECULE_ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + E2E_MODE: canary # lean lifecycle; we only need the org to exist + E2E_RUNTIME: hermes + E2E_RUN_ID: "sanity-${{ github.run_id }}" + E2E_INTENTIONAL_FAILURE: "1" + + steps: + - uses: actions/checkout@v4 + + - name: Verify admin token present + run: | + if [ -z "$MOLECULE_ADMIN_TOKEN" ]; then + echo "::error::MOLECULE_STAGING_ADMIN_TOKEN not set" + exit 2 + fi + + # Inverted assertion: the run MUST fail. If it passes, the + # E2E_INTENTIONAL_FAILURE path is broken (token not being + # poisoned correctly, or the harness silently recovered). + - name: Run harness — expecting exit !=0 + id: harness + run: | + set +e + bash tests/e2e/test_staging_full_saas.sh + rc=$? + echo "harness_rc=$rc" >> "$GITHUB_OUTPUT" + # The only acceptable outcomes: + # 1 — harness failed mid-run, teardown ran, leak-check passed + # (exit 4 means teardown left a leak — that's the real bug + # this sanity check exists to catch) + if [ "$rc" = "1" ]; then + echo "✓ Harness failed as expected (rc=1); teardown trap ran, leak-check passed" + exit 0 + elif [ "$rc" = "0" ]; then + echo "::error::Harness succeeded under E2E_INTENTIONAL_FAILURE=1 — the poisoning path is broken" + exit 1 + elif [ "$rc" = "4" ]; then + echo "::error::LEAK DETECTED (rc=4) — teardown failed to clean up the org. Safety net broken." + exit 4 + else + echo "::error::Unexpected rc=$rc — neither clean-failure nor leak. Investigate harness." + exit 1 + fi + + - name: Open issue if safety net is broken + if: failure() + uses: actions/github-script@v7 + with: + script: | + const title = "🚨 E2E teardown safety net broken"; + const runURL = `https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}`; + const body = + `The weekly sanity run (E2E_INTENTIONAL_FAILURE=1) did not exit ` + + `as expected. This means one of:\n` + + ` - poisoning didn't actually cause failure (test harness regression), OR\n` + + ` - teardown left an orphan org (leak detection caught a real bug)\n\n` + + `Run: ${runURL}\n\n` + + `This is higher priority than a canary failure — the whole ` + + `E2E safety net can't be trusted until this is resolved.`; + + const { data: existing } = await github.rest.issues.listForRepo({ + owner: context.repo.owner, repo: context.repo.repo, + state: 'open', labels: 'e2e-safety-net', + }); + const match = existing.find(i => i.title === title); + if (match) { + await github.rest.issues.createComment({ + owner: context.repo.owner, repo: context.repo.repo, + issue_number: match.number, + body: `Still broken. ${runURL}`, + }); + } else { + await github.rest.issues.create({ + owner: context.repo.owner, repo: context.repo.repo, + title, body, + labels: ['e2e-safety-net', 'bug', 'priority-high'], + }); + } + + # Belt-and-braces: if teardown left anything behind, nuke it here + # so we don't bleed staging quota. Different label from the + # always()-steps in the other workflows so sanity-only orgs get + # cleaned up by sanity runs. + - name: Teardown safety net + if: always() + env: + ADMIN_TOKEN: ${{ secrets.MOLECULE_STAGING_ADMIN_TOKEN }} + run: | + set +e + orgs=$(curl -sS "$MOLECULE_CP_URL/cp/admin/orgs" \ + -H "Authorization: Bearer $ADMIN_TOKEN" 2>/dev/null \ + | python3 -c " + import json, sys + d = json.load(sys.stdin) + today = __import__('datetime').date.today().strftime('%Y%m%d') + candidates = [o['slug'] for o in d.get('orgs', []) + if o.get('slug','').startswith(f'e2e-canary-{today}-sanity-') + and o.get('status') not in ('purged',)] + print('\n'.join(candidates)) + " 2>/dev/null) + for slug in $orgs; do + curl -sS -X DELETE "$MOLECULE_CP_URL/cp/admin/tenants/$slug" \ + -H "Authorization: Bearer $ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d "{\"confirm\":\"$slug\"}" >/dev/null || true + done + exit 0 diff --git a/canvas/Dockerfile b/canvas/Dockerfile index f530e0ec..f871bd07 100644 --- a/canvas/Dockerfile +++ b/canvas/Dockerfile @@ -20,4 +20,7 @@ COPY --from=builder /app/public ./public EXPOSE 3000 ENV PORT=3000 ENV HOSTNAME="0.0.0.0" +# Non-root runtime — node image defaults to root, explicitly drop. +RUN addgroup -g 1000 canvas && adduser -u 1000 -G canvas -s /bin/sh -D canvas +USER canvas CMD ["node", "server.js"] diff --git a/canvas/e2e/context-menu-delete.spec.ts b/canvas/e2e/context-menu-delete.spec.ts new file mode 100644 index 00000000..4b437323 --- /dev/null +++ b/canvas/e2e/context-menu-delete.spec.ts @@ -0,0 +1,131 @@ +import { test, expect } from "@playwright/test"; + +/** + * Playwright E2E for context-menu → delete confirm flow. + * Regression test for the portal/race bug fixed in PR #1133: + * clicking "Delete" in the context menu did nothing because the + * portal-rendered ConfirmDialog was closed by the menu's outside-click + * handler before onConfirm could fire. + * + * The fix hoists dialog state to the canvas store via `setPendingDelete`, + * which survives ContextMenu unmount. This test exercises the full + * interaction in a real browser environment. + * + * Requires: platform on :8080, canvas on :3000. + */ +const API = process.env.E2E_API_URL ?? "http://localhost:8080"; + +test.describe("Context Menu → Delete Confirm", () => { + test.beforeEach(async ({ request }) => { + // Ensure at least one workspace exists so the menu can be triggered + const res = await request.get(`${API}/workspaces`); + const workspaces = (await res.json()) as Array<{ id: string; name: string }>; + if (workspaces.length === 0) { + test.skip("No workspaces on canvas — cannot test context menu"); + } + }); + + test("Delete button opens ConfirmDialog and clicking Confirm deletes the workspace", async ({ + page, + request, + }) => { + // 1. Create a workspace to delete (leaf node — no children, no cascade) + const create = await request.post(`${API}/workspaces`, { + data: { name: "E2E Delete Test", tier: 1, runtime: "claude-code" }, + headers: { "Content-Type": "application/json" }, + }); + const workspace = (await create.json()) as { id: string; name: string }; + const wsId = workspace.id; + + // Register so the node appears online on the canvas + await request.post(`${API}/registry/register`, { + data: { + id: wsId, + url: `http://localhost:9999`, + agent_card: { name: "E2E Delete Test", skills: [] }, + }, + headers: { "Content-Type": "application/json" }, + }); + + // 2. Open the canvas and wait for the workspace node + await page.goto("/", { waitUntil: "networkidle" }); + await page.waitForTimeout(2000); // allow WS to appear + + // Find the workspace node on the canvas + const node = page.locator(`.react-flow__node`).filter({ hasText: "E2E Delete Test" }).first(); + await expect(node).toBeVisible({ timeout: 10000 }); + + // 3. Right-click to open context menu + await node.click({ button: "right" }); + const menu = page.locator('[role="menu"]').first(); + await expect(menu).toBeVisible({ timeout: 3000 }); + await expect(menu).toHaveAttribute("aria-label", /E2E Delete Test/i); + + // 4. Click "Delete" — should open the ConfirmDialog (not close silently) + const deleteBtn = menu.getByRole("menuitem").filter({ hasText: /Delete/i }); + await expect(deleteBtn).toBeVisible(); + await deleteBtn.click(); + + // 5. ConfirmDialog should appear (portal renders into document.body) + const dialog = page.locator('[role="dialog"]'); + await expect(dialog).toBeVisible({ timeout: 3000 }); + await expect(dialog).toContainText(/delete/i); + await expect(dialog.getByRole("button", { name: /confirm|delete/i })).toBeVisible(); + + // 6. Click Confirm — workspace should be deleted + await dialog.getByRole("button", { name: /confirm|delete/i }).first().click(); + + // 7. Dialog should close + await expect(dialog).not.toBeVisible({ timeout: 3000 }); + + // 8. Node should disappear from canvas + await expect( + page.locator(`.react-flow__node`).filter({ hasText: "E2E Delete Test" }) + ).not.toBeVisible({ timeout: 5000 }); + + // 9. API confirms workspace is gone + const getRes = await request.get(`${API}/workspaces/${wsId}`); + expect(getRes.status()).toBeGreaterThanOrEqual(400); // 404 or similar + }); + + test("Cancel closes the dialog and the workspace remains", async ({ page, request }) => { + const res = await request.get(`${API}/workspaces`); + const workspaces = (await res.json()) as Array<{ id: string; name: string }>; + if (workspaces.length === 0) { + test.skip("No workspaces"); + } + + const ws = workspaces[0]; + + // Register if not already + await request.post(`${API}/registry/register`, { + data: { id: ws.id, url: `http://localhost:9999`, agent_card: { name: ws.name, skills: [] } }, + headers: { "Content-Type": "application/json" }, + }); + + await page.goto("/", { waitUntil: "networkidle" }); + await page.waitForTimeout(2000); + + const node = page.locator(`.react-flow__node`).filter({ hasText: ws.name }).first(); + await node.click({ button: "right" }); + + const menu = page.locator('[role="menu"]').first(); + await expect(menu).toBeVisible(); + + // Get workspace name before we click Delete (can't easily look it up after) + const wsName = ws.name; + + await menu.getByRole("menuitem").filter({ hasText: /Delete/i }).click(); + const dialog = page.locator('[role="dialog"]'); + await expect(dialog).toBeVisible({ timeout: 3000 }); + + // Cancel + await dialog.getByRole("button", { name: /cancel/i }).first().click(); + await expect(dialog).not.toBeVisible({ timeout: 3000 }); + + // Node still on canvas + await expect( + page.locator(`.react-flow__node`).filter({ hasText: wsName }).first() + ).toBeVisible({ timeout: 5000 }); + }); +}); \ No newline at end of file diff --git a/canvas/e2e/staging-setup.ts b/canvas/e2e/staging-setup.ts new file mode 100644 index 00000000..598fb877 --- /dev/null +++ b/canvas/e2e/staging-setup.ts @@ -0,0 +1,199 @@ +/** + * Playwright global setup for the staging canvas E2E. + * + * Provisions a fresh staging org per run (POST /cp/admin/orgs), fetches + * the per-tenant admin token, provisions one hermes workspace, waits + * for online, then exports: + * + * STAGING_TENANT_URL https://.moleculesai.app + * STAGING_WORKSPACE_ID UUID of the hermes workspace + * STAGING_TENANT_TOKEN per-tenant admin bearer (for spec requests) + * STAGING_SLUG org slug (used by teardown) + * + * Required env: + * MOLECULE_CP_URL default: https://staging-api.moleculesai.app + * MOLECULE_ADMIN_TOKEN CP admin bearer (Railway staging + * CP_ADMIN_API_TOKEN). Drives provision + + * tenant-token retrieval + teardown via a + * single credential. + */ + +import type { FullConfig } from "@playwright/test"; +import { writeFileSync } from "fs"; +import { join } from "path"; + +const CP_URL = process.env.MOLECULE_CP_URL || "https://staging-api.moleculesai.app"; +const ADMIN_TOKEN = process.env.MOLECULE_ADMIN_TOKEN; +const STAGING = process.env.CANVAS_E2E_STAGING === "1"; + +const PROVISION_TIMEOUT_MS = 15 * 60 * 1000; +const WORKSPACE_ONLINE_TIMEOUT_MS = 10 * 60 * 1000; +const TLS_TIMEOUT_MS = 3 * 60 * 1000; + +async function jsonFetch( + url: string, + init: RequestInit = {}, +): Promise<{ status: number; body: any }> { + const res = await fetch(url, { + ...init, + headers: { "Content-Type": "application/json", ...(init.headers || {}) }, + }); + let body: any = null; + try { + body = await res.json(); + } catch { + /* non-JSON */ + } + return { status: res.status, body }; +} + +async function waitFor( + op: () => Promise, + deadlineMs: number, + intervalMs: number, + desc: string, +): Promise { + const deadline = Date.now() + deadlineMs; + while (Date.now() < deadline) { + const v = await op(); + if (v !== null) return v; + await new Promise((r) => setTimeout(r, intervalMs)); + } + throw new Error(`${desc}: timed out after ${Math.round(deadlineMs / 1000)}s`); +} + +function makeSlug(): string { + const y = new Date().toISOString().slice(0, 10).replace(/-/g, ""); + const rand = Math.random().toString(36).slice(2, 8); + return `e2e-canvas-${y}-${rand}`.slice(0, 32); +} + +export default async function globalSetup(_config: FullConfig): Promise { + if (!STAGING) { + console.log("[staging-setup] CANVAS_E2E_STAGING not set, skipping"); + return; + } + if (!ADMIN_TOKEN) { + throw new Error( + "MOLECULE_ADMIN_TOKEN required (Railway staging CP_ADMIN_API_TOKEN)", + ); + } + + const slug = makeSlug(); + const adminAuth = { Authorization: `Bearer ${ADMIN_TOKEN}` }; + console.log(`[staging-setup] Using slug=${slug}`); + + // 1. Create org via admin endpoint — no WorkOS session needed + const create = await jsonFetch(`${CP_URL}/cp/admin/orgs`, { + method: "POST", + headers: adminAuth, + body: JSON.stringify({ + slug, + name: `E2E Canvas ${slug}`, + owner_user_id: `e2e-runner:${slug}`, + }), + }); + if (create.status >= 400) { + throw new Error( + `POST /cp/admin/orgs ${create.status}: ${JSON.stringify(create.body)}`, + ); + } + console.log(`[staging-setup] Org created: ${slug}`); + + // 2. Wait for tenant running (admin-orgs list is the status source) + await waitFor( + async () => { + const r = await jsonFetch(`${CP_URL}/cp/admin/orgs`, { headers: adminAuth }); + if (r.status !== 200) return null; + const row = (r.body?.orgs || []).find((o: any) => o.slug === slug); + if (!row) return null; + if (row.status === "running") return true; + if (row.status === "failed") throw new Error(`provision failed: ${slug}`); + return null; + }, + PROVISION_TIMEOUT_MS, + 15_000, + "tenant provision", + ); + console.log(`[staging-setup] Tenant running`); + + // 3. Fetch per-tenant admin token + const tokRes = await jsonFetch( + `${CP_URL}/cp/admin/orgs/${slug}/admin-token`, + { headers: adminAuth }, + ); + if (tokRes.status !== 200 || !tokRes.body?.admin_token) { + throw new Error( + `tenant-token fetch ${tokRes.status}: ${JSON.stringify(tokRes.body)}`, + ); + } + const tenantToken: string = tokRes.body.admin_token; + const tenantURL = `https://${slug}.moleculesai.app`; + console.log(`[staging-setup] Tenant URL: ${tenantURL}`); + + // 4. TLS readiness + await waitFor( + async () => { + try { + const res = await fetch(`${tenantURL}/health`, { + signal: AbortSignal.timeout(5000), + }); + return res.ok ? true : null; + } catch { + return null; + } + }, + TLS_TIMEOUT_MS, + 5_000, + "tenant TLS", + ); + + // 5. Provision workspace + const tenantAuth = { Authorization: `Bearer ${tenantToken}` }; + const ws = await jsonFetch(`${tenantURL}/workspaces`, { + method: "POST", + headers: tenantAuth, + body: JSON.stringify({ + name: "E2E Canvas Test", + runtime: "hermes", + tier: 2, + model: "gpt-4o", + }), + }); + if (ws.status >= 400 || !ws.body?.id) { + throw new Error(`Workspace create ${ws.status}: ${JSON.stringify(ws.body)}`); + } + const workspaceId = ws.body.id as string; + console.log(`[staging-setup] Workspace created: ${workspaceId}`); + + // 6. Wait for workspace online + await waitFor( + async () => { + const r = await jsonFetch(`${tenantURL}/workspaces/${workspaceId}`, { + headers: tenantAuth, + }); + if (r.status !== 200) return null; + if (r.body?.status === "online") return true; + if (r.body?.status === "failed") { + throw new Error(`Workspace failed: ${r.body.last_sample_error || ""}`); + } + return null; + }, + WORKSPACE_ONLINE_TIMEOUT_MS, + 10_000, + "workspace online", + ); + console.log(`[staging-setup] Workspace online`); + + // 7. Hand state off to tests + teardown + const stateFile = join(process.cwd(), ".playwright-staging-state.json"); + writeFileSync( + stateFile, + JSON.stringify({ slug, tenantURL, workspaceId, tenantToken }, null, 2), + ); + process.env.STAGING_SLUG = slug; + process.env.STAGING_TENANT_URL = tenantURL; + process.env.STAGING_WORKSPACE_ID = workspaceId; + process.env.STAGING_TENANT_TOKEN = tenantToken; + console.log(`[staging-setup] Ready — ${stateFile}`); +} diff --git a/canvas/e2e/staging-tabs.spec.ts b/canvas/e2e/staging-tabs.spec.ts new file mode 100644 index 00000000..412953a5 --- /dev/null +++ b/canvas/e2e/staging-tabs.spec.ts @@ -0,0 +1,151 @@ +/** + * Staging canvas E2E — opens each of the 13 workspace-panel tabs against a + * fresh staging org provisioned in the global setup. Asserts each tab + * renders without throwing and captures a screenshot for visual review. + * + * Auth model: the tenant platform's AdminAuth middleware accepts a bearer + * token OR a WorkOS session cookie. Playwright can't mint a WorkOS + * session, so we feed the per-tenant admin token (fetched in global + * setup via GET /cp/admin/orgs/:slug/admin-token) as an Authorization: + * Bearer header via context.setExtraHTTPHeaders(). Every browser + * request inherits the header. + * + * Known SaaS gaps — documented in #1369 and allowed to render errored + * content without failing the test (the gate is "no hard crash, no + * 'Failed to load' toast"): + * - Files tab: empty (platform can't docker exec into a remote EC2) + * - Terminal tab: WS connect fails + * - Peers tab: 401 without workspace-scoped token + */ + +import { test, expect } from "@playwright/test"; + +// Tab ids as declared in canvas/src/components/SidePanel.tsx TABS. +const TAB_IDS = [ + "chat", + "activity", + "details", + "skills", + "terminal", + "config", + "schedule", + "channels", + "files", + "memory", + "traces", + "events", + "audit", +] as const; + +const STAGING = process.env.CANVAS_E2E_STAGING === "1"; + +test.skip(!STAGING, "CANVAS_E2E_STAGING not set — skipping staging-only tests"); + +test.describe("staging canvas tabs", () => { + test("each workspace-panel tab renders without error", async ({ + page, + context, + }) => { + const tenantURL = process.env.STAGING_TENANT_URL; + const tenantToken = process.env.STAGING_TENANT_TOKEN; + const workspaceId = process.env.STAGING_WORKSPACE_ID; + + if (!tenantURL || !tenantToken || !workspaceId) { + throw new Error( + "staging-setup.ts did not export STAGING_TENANT_URL / STAGING_TENANT_TOKEN / STAGING_WORKSPACE_ID — did global setup run?", + ); + } + + // Attach the per-tenant admin bearer to every outbound request. + // The tenant platform's AdminAuth middleware accepts this; no + // WorkOS session needed. + await context.setExtraHTTPHeaders({ + Authorization: `Bearer ${tenantToken}`, + }); + + const consoleErrors: string[] = []; + page.on("console", (msg) => { + if (msg.type() === "error") { + consoleErrors.push(msg.text()); + } + }); + + await page.goto(tenantURL, { waitUntil: "networkidle" }); + + // Canvas hydration races WebSocket connect + /workspaces fetch. + // Wait for the tablist element (appears after a workspace is + // selected) or the hydration-error banner — whichever wins first. + await page.waitForSelector( + '[role="tablist"], [data-testid="hydration-error"]', + { timeout: 45_000 }, + ); + + const hydrationErr = await page + .locator('[data-testid="hydration-error"]') + .count(); + expect( + hydrationErr, + "canvas hydration failed — check staging CP + tenant reachability", + ).toBe(0); + + // Click the workspace node to open the side panel. Try a data + // attribute first, fall back to a generic role-based selector so + // the test doesn't break when the node-card markup changes. + const byDataAttr = page.locator(`[data-workspace-id="${workspaceId}"]`).first(); + if ((await byDataAttr.count()) > 0) { + await byDataAttr.click({ timeout: 10_000 }); + } else { + const firstNode = page + .locator('[role="button"][aria-label*="Workspace" i]') + .first(); + await firstNode.click({ timeout: 10_000 }); + } + + await page.waitForSelector('[role="tablist"]', { timeout: 15_000 }); + + for (const tabId of TAB_IDS) { + await test.step(`tab: ${tabId}`, async () => { + const tabButton = page.locator(`#tab-${tabId}`); + await expect( + tabButton, + `tab-${tabId} button missing — TABS list may have drifted`, + ).toBeVisible({ timeout: 5_000 }); + await tabButton.click(); + + const panel = page.locator(`#panel-${tabId}`); + await expect(panel, `panel for ${tabId} never rendered`).toBeVisible({ + timeout: 10_000, + }); + + // "Failed to load" toast = hard crash. Known SaaS-mode gaps + // (Files empty, Terminal disconnected, Peers 401) surface as + // in-panel content, not toasts. + const errorToasts = await page + .locator('[role="alert"]:has-text("Failed to load")') + .count(); + expect(errorToasts, `tab ${tabId}: "Failed to load" toast`).toBe(0); + + await page.screenshot({ + path: `test-results/staging-tab-${tabId}.png`, + fullPage: false, + }); + }); + } + + // Aggregate console-error budget. Known-noisy sources whitelisted: + // Sentry, Vercel analytics, WS reconnects (expected on SaaS + // terminal), favicon 404 (cosmetic). + const appErrors = consoleErrors.filter( + (msg) => + !msg.includes("sentry") && + !msg.includes("vercel") && + !msg.includes("WebSocket") && + !msg.includes("favicon") && + !msg.includes("molecule-icon.png"), // another cosmetic 404 + ); + expect( + appErrors, + `unexpected console errors:\n${appErrors.join("\n")}`, + ).toHaveLength(0); + }); +}); diff --git a/canvas/e2e/staging-teardown.ts b/canvas/e2e/staging-teardown.ts new file mode 100644 index 00000000..b573cb2d --- /dev/null +++ b/canvas/e2e/staging-teardown.ts @@ -0,0 +1,66 @@ +/** + * Playwright global teardown — deletes the staging org provisioned by + * staging-setup.ts via DELETE /cp/admin/tenants/:slug. Runs on success AND + * failure (Playwright calls globalTeardown regardless). + * + * The workflow's always()-step safety net also catches orphan orgs + * tagged with the run ID, so this is the primary cleanup and the + * workflow step is the belt-and-braces backup. + */ + +import { existsSync, readFileSync, unlinkSync } from "fs"; +import { join } from "path"; + +const CP_URL = process.env.MOLECULE_CP_URL || "https://staging-api.moleculesai.app"; +const ADMIN_TOKEN = process.env.MOLECULE_ADMIN_TOKEN; +const STAGING = process.env.CANVAS_E2E_STAGING === "1"; + +export default async function globalTeardown(): Promise { + if (!STAGING) return; + if (!ADMIN_TOKEN) { + console.warn("[staging-teardown] no MOLECULE_ADMIN_TOKEN, skipping"); + return; + } + + const stateFile = join(process.cwd(), ".playwright-staging-state.json"); + if (!existsSync(stateFile)) { + console.warn("[staging-teardown] no state file — setup must have failed before org create; nothing to tear down"); + return; + } + + let slug: string; + try { + const state = JSON.parse(readFileSync(stateFile, "utf-8")); + slug = state.slug; + } catch (e) { + console.warn(`[staging-teardown] state file unreadable: ${e}`); + return; + } + + console.log(`[staging-teardown] Deleting org ${slug}...`); + try { + const res = await fetch(`${CP_URL}/cp/admin/tenants/${slug}`, { + method: "DELETE", + headers: { + Authorization: `Bearer ${ADMIN_TOKEN}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ confirm: slug }), + }); + if (res.ok) { + console.log(`[staging-teardown] ${slug} deleted`); + } else { + console.warn( + `[staging-teardown] DELETE returned ${res.status} (may already be gone)`, + ); + } + } catch (e) { + console.warn(`[staging-teardown] DELETE failed: ${e}`); + } + + try { + unlinkSync(stateFile); + } catch { + /* non-fatal */ + } +} diff --git a/canvas/package-lock.json b/canvas/package-lock.json index f4defc1f..c7f76c9e 100644 --- a/canvas/package-lock.json +++ b/canvas/package-lock.json @@ -80,6 +80,7 @@ "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/helper-validator-identifier": "^7.28.5", "js-tokens": "^4.0.0", @@ -95,6 +96,7 @@ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=6.9.0" } @@ -197,7 +199,6 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" }, @@ -221,11 +222,31 @@ } ], "license": "MIT", - "peer": true, "engines": { "node": ">=18" } }, + "node_modules/@emnapi/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz", + "integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==", + "dev": true, + "optional": true, + "peer": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.1", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", + "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@emnapi/wasi-threads": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", @@ -980,7 +1001,6 @@ "integrity": "sha512-PG6q63nQg5c9rIi4/Z5lR5IVF7yU5MqmKaPOe0HSc0O2cX1fPi96sUQu5j7eo4gKCkB2AnNGoWt7y4/Xx3Kcqg==", "devOptional": true, "license": "Apache-2.0", - "peer": true, "dependencies": { "playwright": "1.59.1" }, @@ -1829,6 +1849,27 @@ "node": ">=14.0.0" } }, + "node_modules/@rolldown/binding-wasm32-wasi/node_modules/@emnapi/core": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz", + "integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==", + "dev": true, + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.1", + "tslib": "^2.4.0" + } + }, + "node_modules/@rolldown/binding-wasm32-wasi/node_modules/@emnapi/runtime": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz", + "integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==", + "dev": true, + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, "node_modules/@rolldown/binding-win32-arm64-msvc": { "version": "1.0.0-rc.15", "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.15.tgz", @@ -1990,7 +2031,8 @@ "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/@types/chai": { "version": "5.2.3", @@ -2113,7 +2155,6 @@ "integrity": "sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "undici-types": "~6.21.0" } @@ -2123,7 +2164,6 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "license": "MIT", - "peer": true, "dependencies": { "csstype": "^3.2.2" } @@ -2134,7 +2174,6 @@ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", "devOptional": true, "license": "MIT", - "peer": true, "peerDependencies": { "@types/react": "^19.2.0" } @@ -2372,6 +2411,7 @@ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=8" } @@ -2382,6 +2422,7 @@ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=10" }, @@ -2557,7 +2598,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "baseline-browser-mapping": "^2.10.12", "caniuse-lite": "^1.0.30001782", @@ -2874,7 +2914,6 @@ "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", "license": "ISC", - "peer": true, "engines": { "node": ">=12" } @@ -3039,7 +3078,8 @@ "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/dunder-proto": { "version": "1.0.1", @@ -3651,7 +3691,6 @@ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", "license": "MIT", - "peer": true, "bin": { "jiti": "bin/jiti.js" } @@ -3661,7 +3700,8 @@ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/jsdom": { "version": "25.0.1", @@ -3669,7 +3709,6 @@ "integrity": "sha512-8i7LzZj7BF8uplX+ZyOlIz86V6TAsSs+np6m1kpW9u0JWi4z/1t+FzcK1aek+ybTnAC4KhBL4uXCNT0wcUIeCw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "cssstyle": "^4.1.0", "data-urls": "^5.0.0", @@ -4007,6 +4046,7 @@ "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", "dev": true, "license": "MIT", + "peer": true, "bin": { "lz-string": "bin/bin.js" } @@ -5234,7 +5274,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -5391,6 +5430,7 @@ "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "ansi-regex": "^5.0.1", "ansi-styles": "^5.0.0", @@ -5445,7 +5485,6 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz", "integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==", "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } @@ -5455,7 +5494,6 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.5.tgz", "integrity": "sha512-J5bAZz+DXMMwW/wV3xzKke59Af6CHY7G4uYLN1OvBcKEsWOs4pQExj86BBKamxl/Ik5bx9whOrvBlSDfWzgSag==", "license": "MIT", - "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -5468,7 +5506,8 @@ "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", "dev": true, - "license": "MIT" + "license": "MIT", + "peer": true }, "node_modules/react-markdown": { "version": "10.1.0", @@ -6017,7 +6056,6 @@ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", "license": "MIT", - "peer": true, "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", @@ -6139,7 +6177,6 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -6478,7 +6515,6 @@ "integrity": "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "lightningcss": "^1.32.0", "picomatch": "^4.0.4", diff --git a/canvas/playwright.staging.config.ts b/canvas/playwright.staging.config.ts new file mode 100644 index 00000000..62dec331 --- /dev/null +++ b/canvas/playwright.staging.config.ts @@ -0,0 +1,50 @@ +/** + * Playwright config for staging canvas E2E. + * + * Separate from playwright.config.ts (local dev) so: + * - globalSetup / globalTeardown don't run for every local `pnpm test` + * - Retries + timeouts can be longer (staging is remote + shared) + * - baseURL is dynamic (set by globalSetup → STAGING_TENANT_URL) + * + * Invoked by the e2e-staging-canvas GH Actions workflow: + * npx playwright test --config=playwright.staging.config.ts + */ + +import { defineConfig } from "@playwright/test"; + +export default defineConfig({ + testDir: "./e2e", + // Only the staging-*.spec.ts files run under this config. The smoke + + // unit specs (chat-separation, filestab-smoke, etc.) stay on the local + // config so they don't hit staging. + testMatch: /staging-.*\.spec\.ts/, + // Global setup provisions the org; budget generously because EC2 boot + // is ~5 min and can drift to 10+ on cold AMI days. + timeout: 120_000, + expect: { timeout: 15_000 }, + fullyParallel: false, + // A transient network blip shouldn't cost us the whole run. Two retries + // mean up to 3 attempts — staging flakes fall within that budget. + retries: 2, + // One worker: the setup provisions exactly one org/workspace, and + // parallel specs would fight over the shared workspace selector state. + workers: 1, + globalSetup: "./e2e/staging-setup.ts", + globalTeardown: "./e2e/staging-teardown.ts", + use: { + // STAGING_TENANT_URL gets written to process.env in global setup, but + // Playwright resolves baseURL before setup runs. We read it inside + // each spec instead — don't hard-code here. + headless: true, + screenshot: "only-on-failure", + video: "retain-on-failure", + trace: "retain-on-failure", + navigationTimeout: 45_000, + actionTimeout: 15_000, + }, + reporter: [ + ["list"], + ["html", { outputFolder: "playwright-report-staging", open: "never" }], + ], + projects: [{ name: "chromium", use: { browserName: "chromium" } }], +}); diff --git a/canvas/src/app/__tests__/orgs-page.test.tsx b/canvas/src/app/__tests__/orgs-page.test.tsx index 430aa8f0..4cc794f6 100644 --- a/canvas/src/app/__tests__/orgs-page.test.tsx +++ b/canvas/src/app/__tests__/orgs-page.test.tsx @@ -15,7 +15,8 @@ * - Polling: provisioning orgs schedule a 5s refresh (fake timers) */ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; -import { render, screen, waitFor, cleanup } from "@testing-library/react"; +import { act } from "react"; +import { render, screen, cleanup } from "@testing-library/react"; // ── Hoisted mocks ──────────────────────────────────────────────────────────── // vi.mock factories are hoisted above imports; any captured references must @@ -36,6 +37,12 @@ vi.mock("@/lib/api", () => ({ PLATFORM_URL: "https://cp.test", })); +// Mock TermsGate to a pass-through so it doesn't make network calls that +// consume the mockFetch queue. OrgsPage wraps its content in TermsGate. +vi.mock("@/components/TermsGate", () => ({ + TermsGate: ({ children }: { children: React.ReactNode }) => children, +})); + const mockFetch = vi.fn(); globalThis.fetch = mockFetch as unknown as typeof fetch; @@ -79,12 +86,27 @@ function setLocation(href: string) { } beforeEach(() => { + // Always reset to real timers first. If a previous polling test failed + // before its finally-block ran, fake timers would still be active and + // vi.useFakeTimers() in the polling tests would be a no-op — causing + // setTimeout(0) to hang and the test to time out. + vi.useRealTimers(); + // Now install fake timers for this test's deterministic timing. + vi.useFakeTimers(); vi.clearAllMocks(); + // Reset mock return values so each test starts fresh. + // The mock functions (vi.fn) persist across tests; only their + // per-call behavior is reset here. + mockFetchSession.mockReset(); + mockFetch.mockReset(); setLocation("https://moleculesai.app/orgs"); }); afterEach(() => { cleanup(); + // Restore real timers so subsequent tests (and vitest internals) + // aren't polluted by fake timer state from a previous test. + vi.useRealTimers(); }); // ── Tests ──────────────────────────────────────────────────────────────────── @@ -93,7 +115,8 @@ describe("/orgs — auth guard", () => { it("redirects to login when session is null", async () => { mockFetchSession.mockResolvedValueOnce(null); render(); - await waitFor(() => expect(mockRedirectToLogin).toHaveBeenCalled()); + await vi.advanceTimersByTimeAsync(50); + expect(mockRedirectToLogin).toHaveBeenCalled(); // Must not attempt to fetch /cp/orgs before auth is established expect(mockFetch).not.toHaveBeenCalledWith( expect.stringContaining("/cp/orgs"), @@ -104,20 +127,22 @@ describe("/orgs — auth guard", () => { describe("/orgs — error state", () => { it("shows error + Retry button when /cp/orgs fails", async () => { - mockFetchSession.mockResolvedValueOnce({ userId: "u-1" }); + mockFetchSession.mockResolvedValue({ userId: "u-1" }); mockFetch.mockResolvedValueOnce(notOk(500, "db down")); render(); - await waitFor(() => expect(screen.getByText(/Error:/)).toBeTruthy()); + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + expect(screen.getByText(/Error:/)).toBeTruthy(); expect(screen.getByRole("button", { name: /retry/i })).toBeTruthy(); }); }); describe("/orgs — empty list", () => { it("renders EmptyState with CreateOrgForm when user has zero orgs", async () => { - mockFetchSession.mockResolvedValueOnce({ userId: "u-1" }); + mockFetchSession.mockResolvedValue({ userId: "u-1" }); mockFetch.mockResolvedValueOnce(okJson({ orgs: [] })); render(); - await waitFor(() => expect(screen.getByText(/don't have any organizations/i)).toBeTruthy()); + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + expect(screen.getByText(/don't have any organizations/i)).toBeTruthy(); expect(screen.getByRole("button", { name: /create organization/i })).toBeTruthy(); }); }); @@ -126,7 +151,7 @@ describe("/orgs — CTAs by status", () => { const session = { userId: "u-1" }; it("running → Open link targets {slug}.moleculesai.app", async () => { - mockFetchSession.mockResolvedValueOnce(session); + mockFetchSession.mockResolvedValue(session); mockFetch.mockResolvedValueOnce( okJson({ orgs: [ @@ -143,12 +168,13 @@ describe("/orgs — CTAs by status", () => { }) ); render(); - const link = (await screen.findByRole("link", { name: /open/i })) as HTMLAnchorElement; + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + const link = screen.getByRole("link", { name: /open/i }) as HTMLAnchorElement; expect(link.href).toBe("https://acme.moleculesai.app/"); }); it("awaiting_payment → Complete payment link to /pricing?org=", async () => { - mockFetchSession.mockResolvedValueOnce(session); + mockFetchSession.mockResolvedValue(session); mockFetch.mockResolvedValueOnce( okJson({ orgs: [ @@ -165,14 +191,15 @@ describe("/orgs — CTAs by status", () => { }) ); render(); - const link = (await screen.findByRole("link", { + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + const link = screen.getByRole("link", { name: /complete payment/i, - })) as HTMLAnchorElement; + }) as HTMLAnchorElement; expect(link.getAttribute("href")).toBe("/pricing?org=beta-co"); }); it("failed → mailto support link", async () => { - mockFetchSession.mockResolvedValueOnce(session); + mockFetchSession.mockResolvedValue(session); mockFetch.mockResolvedValueOnce( okJson({ orgs: [ @@ -189,9 +216,10 @@ describe("/orgs — CTAs by status", () => { }) ); render(); - const link = (await screen.findByRole("link", { + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + const link = screen.getByRole("link", { name: /contact support/i, - })) as HTMLAnchorElement; + }) as HTMLAnchorElement; expect(link.getAttribute("href")).toBe("mailto:support@moleculesai.app"); }); }); @@ -200,7 +228,7 @@ describe("/orgs — post-checkout banner", () => { it("renders CheckoutBanner when ?checkout=success and scrubs the URL", async () => { setLocation("https://moleculesai.app/orgs?checkout=success"); const replaceState = vi.spyOn(window.history, "replaceState"); - mockFetchSession.mockResolvedValueOnce({ userId: "u-1" }); + mockFetchSession.mockResolvedValue({ userId: "u-1" }); mockFetch.mockResolvedValueOnce( okJson({ orgs: [ @@ -217,7 +245,8 @@ describe("/orgs — post-checkout banner", () => { }) ); render(); - expect(await screen.findByText(/Payment confirmed/i)).toBeTruthy(); + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + expect(screen.getByText(/Payment confirmed/i)).toBeTruthy(); // URL must be rewritten to drop the ?checkout flag so reload doesn't re-show the banner expect(replaceState).toHaveBeenCalled(); const callArgs = replaceState.mock.calls[0]; @@ -225,22 +254,21 @@ describe("/orgs — post-checkout banner", () => { }); it("does NOT render CheckoutBanner without ?checkout=success", async () => { - mockFetchSession.mockResolvedValueOnce({ userId: "u-1" }); + mockFetchSession.mockResolvedValue({ userId: "u-1" }); mockFetch.mockResolvedValueOnce(okJson({ orgs: [] })); render(); - await waitFor(() => - expect(screen.getByText(/don't have any organizations/i)).toBeTruthy() - ); + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); + expect(screen.getByText(/don't have any organizations/i)).toBeTruthy(); expect(screen.queryByText(/Payment confirmed/i)).toBeNull(); }); }); describe("/orgs — fetch includes credentials + timeout signal", () => { it("/cp/orgs fetch is called with credentials:include and an AbortSignal", async () => { - mockFetchSession.mockResolvedValueOnce({ userId: "u-1" }); + mockFetchSession.mockResolvedValue({ userId: "u-1" }); mockFetch.mockResolvedValueOnce(okJson({ orgs: [] })); render(); - await waitFor(() => expect(mockFetch).toHaveBeenCalled()); + await act(async () => { await vi.advanceTimersByTimeAsync(50); }); const callArgs = mockFetch.mock.calls.find((c) => String(c[0]).includes("/cp/orgs") ); @@ -258,111 +286,98 @@ describe("/orgs — fetch includes credentials + timeout signal", () => { describe("/orgs — polling of in-flight orgs", () => { it("schedules a 5s refetch when at least one org is provisioning", async () => { - vi.useFakeTimers({ shouldAdvanceTime: true }); - try { - mockFetchSession.mockResolvedValue({ userId: "u-1" }); - mockFetch.mockResolvedValueOnce( - okJson({ - orgs: [ - { - id: "o-1", - slug: "acme", - name: "Acme", - plan: "pro", - status: "provisioning", - created_at: "", - updated_at: "", - }, - ], - }) - ); - // Second fetch (the poll refresh) returns a running org so we can - // observe the state flip — and to let the test stop re-scheduling. - mockFetch.mockResolvedValueOnce( - okJson({ - orgs: [ - { - id: "o-1", - slug: "acme", - name: "Acme", - plan: "pro", - status: "running", - created_at: "", - updated_at: "", - }, - ], - }) - ); + // beforeEach already set up fake timers; advance time to fire the 5s poll. + mockFetchSession.mockResolvedValue({ userId: "u-1" }); + // First /cp/orgs returns provisioning orgs so a poll is scheduled. + // Second returns running orgs to observe the state flip stop re-scheduling. + mockFetch.mockResolvedValueOnce( + okJson({ + orgs: [ + { + id: "o-1", + slug: "acme", + name: "Acme", + plan: "pro", + status: "provisioning", + created_at: "", + updated_at: "", + }, + ], + }) + ); + mockFetch.mockResolvedValueOnce( + okJson({ + orgs: [ + { + id: "o-1", + slug: "acme", + name: "Acme", + plan: "pro", + status: "running", + created_at: "", + updated_at: "", + }, + ], + }) + ); - render(); - // First fetch resolves - await vi.waitFor(() => expect(mockFetch).toHaveBeenCalledTimes(1)); - // Advance past the 5s scheduled refresh - await vi.advanceTimersByTimeAsync(5_100); - // Second fetch is the poll refresh - await vi.waitFor(() => expect(mockFetch).toHaveBeenCalledTimes(2)); - } finally { - vi.useRealTimers(); - } + render(); + await vi.advanceTimersByTimeAsync(5_100); + // First /cp/orgs + second poll /cp/orgs + expect(mockFetch).toHaveBeenCalledTimes(2); }); it("does NOT schedule a refetch when all orgs are running", async () => { - vi.useFakeTimers({ shouldAdvanceTime: true }); - try { - mockFetchSession.mockResolvedValue({ userId: "u-1" }); - mockFetch.mockResolvedValueOnce( - okJson({ - orgs: [ - { - id: "o-1", - slug: "acme", - name: "Acme", - plan: "pro", - status: "running", - created_at: "", - updated_at: "", - }, - ], - }) - ); - render(); - await vi.waitFor(() => expect(mockFetch).toHaveBeenCalledTimes(1)); - // Advance well past the 5s poll window — no second fetch must fire - await vi.advanceTimersByTimeAsync(10_000); - expect(mockFetch).toHaveBeenCalledTimes(1); - } finally { - vi.useRealTimers(); - } + // beforeEach already set up fake timers. + mockFetchSession.mockResolvedValue({ userId: "u-1" }); + mockFetch.mockResolvedValueOnce( + okJson({ + orgs: [ + { + id: "o-1", + slug: "acme", + name: "Acme", + plan: "pro", + status: "running", + created_at: "", + updated_at: "", + }, + ], + }) + ); + render(); + await vi.advanceTimersByTimeAsync(10_000); + // Only the initial /cp/orgs — no poll fires (stillMoving = false) + expect(mockFetch).toHaveBeenCalledTimes(1); }); it("clears the poll timer on unmount — no fetch after unmount", async () => { - vi.useFakeTimers({ shouldAdvanceTime: true }); - try { - mockFetchSession.mockResolvedValue({ userId: "u-1" }); - mockFetch.mockResolvedValueOnce( - okJson({ - orgs: [ - { - id: "o-1", - slug: "acme", - name: "Acme", - plan: "pro", - status: "awaiting_payment", - created_at: "", - updated_at: "", - }, - ], - }) - ); - const { unmount } = render(); - await vi.waitFor(() => expect(mockFetch).toHaveBeenCalledTimes(1)); - // Tear down BEFORE the 5s timer fires - unmount(); - await vi.advanceTimersByTimeAsync(10_000); - // Fetch count must stay at 1 — the cleanup cleared the timer - expect(mockFetch).toHaveBeenCalledTimes(1); - } finally { - vi.useRealTimers(); - } + // beforeEach already set up fake timers. + mockFetchSession.mockResolvedValue({ userId: "u-1" }); + mockFetch.mockResolvedValueOnce( + okJson({ + orgs: [ + { + id: "o-1", + slug: "acme", + name: "Acme", + plan: "pro", + status: "awaiting_payment", + created_at: "", + updated_at: "", + }, + ], + }) + ); + const { unmount } = render(); + // Flush microtasks so the effect runs and schedules the 5s poll before we unmount. + await vi.advanceTimersByTimeAsync(0); + // Now the effect has run (scheduling the poll) but not the poll itself + expect(mockFetch).toHaveBeenCalledTimes(1); + // Tear down — cleanup must clear the 5s timer + unmount(); + // Advance timers — the cleanup cleared the 5s timer, so no poll fires + await vi.advanceTimersByTimeAsync(10_000); + expect(mockFetch).toHaveBeenCalledTimes(1); }); }); diff --git a/canvas/src/app/orgs/page.tsx b/canvas/src/app/orgs/page.tsx index 5f1787d6..e8163e24 100644 --- a/canvas/src/app/orgs/page.tsx +++ b/canvas/src/app/orgs/page.tsx @@ -154,7 +154,7 @@ function CheckoutBanner() {

✓ Payment confirmed. Your workspace is spinning up now — this page - refreshes automatically when it's ready. + refreshes automatically when it's ready.

); @@ -318,7 +318,7 @@ function EmptyState({ banner }: { banner?: React.ReactNode }) { {banner}

- You don't have any organizations yet. Create one to get started — your + You don't have any organizations yet. Create one to get started — your workspace spins up automatically once billing is set up.

@@ -352,7 +352,8 @@ function CreateOrgForm({ onCreated }: { onCreated: (slug: string) => void }) { }); if (!res.ok) { const body = await res.text(); - throw new Error(`${res.status}: ${body}`); + console.error(`[orgs] create ${res.status}: ${body}`); + throw new Error(`Failed to create organization (${res.status})`); } onCreated(slug); } catch (e) { diff --git a/canvas/src/components/BatchActionBar.tsx b/canvas/src/components/BatchActionBar.tsx index 5421fbe2..f207e843 100644 --- a/canvas/src/components/BatchActionBar.tsx +++ b/canvas/src/components/BatchActionBar.tsx @@ -1,6 +1,6 @@ "use client"; -import { useState } from "react"; +import { useEffect, useState } from "react"; import { createPortal } from "react-dom"; import { useCanvasStore } from "@/store/canvas"; import { ConfirmDialog } from "./ConfirmDialog"; @@ -17,14 +17,32 @@ export function BatchActionBar() { const [pending, setPending] = useState(null); const [busy, setBusy] = useState(false); + // Retry survivorship (QA pr-949 follow-up): when a batch action partial-fails + // and leaves a single survivor id, the default `count < 2` gate unmounts the + // bar and forces per-node context-menu retry. Track "active failure" so the + // bar stays mounted with a single item and the user can click the same action + // button to retry without re-selecting. Resets on success or Escape/clear. + const [hasFailedBatch, setHasFailedBatch] = useState(false); const count = selectedNodeIds.size; - if (count < 2) return null; + // Reset failure flag when the user clears selection (Escape / ✕ button). + useEffect(() => { + if (count === 0 && hasFailedBatch) setHasFailedBatch(false); + }, [count, hasFailedBatch]); + // Hide when nothing is selected. Hide for single-node selection UNLESS a + // partial-failure left a survivor awaiting retry. + if (count === 0) return null; + if (count < 2 && !hasFailedBatch) return null; + + // Message copy must handle both multi (count >= 2) and single-survivor retry + // (count === 1 && hasFailedBatch). Use a helper so we render singular form + // only when there is exactly one survivor to act on. + const plural = (n: number) => (n === 1 ? "workspace" : "workspaces"); const confirmMessages: Record, string> = { - restart: `Restart ${count} workspace${count !== 1 ? "s" : ""}? Each will briefly go offline while it restarts.`, - pause: `Pause ${count} workspace${count !== 1 ? "s" : ""}? Their containers will be stopped.`, - delete: `Permanently delete ${count} workspace${count !== 1 ? "s" : ""}? This cannot be undone.`, + restart: `Restart ${count} ${plural(count)}? Each will briefly go offline while it restarts.`, + pause: `Pause ${count} ${plural(count)}? Their containers will be stopped.`, + delete: `Permanently delete ${count} ${plural(count)}? This cannot be undone.`, }; const confirmLabels: Record, string> = { @@ -40,10 +58,18 @@ export function BatchActionBar() { if (pending === "restart") await batchRestart(); if (pending === "pause") await batchPause(); if (pending === "delete") await batchDelete(); - showToast(`${pending.charAt(0).toUpperCase() + pending.slice(1)} applied to ${count} workspace${count !== 1 ? "s" : ""}`, "success"); + // Reaching here means every store call fulfilled (the store throws on + // any partial failure), so `count` is the actual success count. + showToast(`${pending.charAt(0).toUpperCase() + pending.slice(1)} applied to ${count} ${plural(count)}`, "success"); + setHasFailedBatch(false); clearSelection(); - } catch { - showToast(`Batch ${pending} failed`, "error"); + } catch (e) { + const msg = e instanceof Error && e.message ? e.message : `Batch ${pending} failed`; + showToast(msg, "error"); + // Leave the failed IDs selected (the store preserved them) so the user + // can retry without re-selecting, and set hasFailedBatch so the bar + // stays mounted even if a single survivor remains. + setHasFailedBatch(true); } finally { setBusy(false); setPending(null); diff --git a/canvas/src/components/Canvas.tsx b/canvas/src/components/Canvas.tsx index 16335608..0cb3c3de 100644 --- a/canvas/src/components/Canvas.tsx +++ b/canvas/src/components/Canvas.tsx @@ -87,11 +87,23 @@ function CanvasInner() { const onNodeDrag: OnNodeDrag> = useCallback( (_event, node) => { - const intersecting = getIntersectingNodes(node); - const target = intersecting.find( - (n) => n.id !== node.id && !isDescendant(node.id, n.id) - ); - setDragOverNode(target?.id ?? null); + // Only consider nodes within a proximity threshold as nest targets. + // Without this check, getIntersectingNodes returns any node whose bounding + // boxes overlap — which can be hundreds of pixels away on a sparse canvas, + // causing accidental nesting when the user drags a node across the board. + const thresholdPx = 100; + const threshold = thresholdPx * thresholdPx; // compare squared distances + let nearest: { id: string; dist: number } | null = null; + for (const candidate of getIntersectingNodes(node)) { + if (candidate.id === node.id || isDescendant(node.id, candidate.id)) continue; + const dx = candidate.position.x - node.position.x; + const dy = candidate.position.y - node.position.y; + const dist2 = dx * dx + dy * dy; + if (dist2 <= threshold && (!nearest || dist2 < nearest.dist)) { + nearest = { id: candidate.id, dist: dist2 }; + } + } + setDragOverNode(nearest?.id ?? null); }, [getIntersectingNodes, isDescendant, setDragOverNode] ); @@ -117,6 +129,12 @@ function CanvasInner() { } }, [pendingDelete, setPendingDelete, removeNode]); + // Cascade guard: include child count in the warning message when the workspace + // has children, so the user understands the blast radius before clicking Delete All. + const cascadeMessage = pendingDelete?.hasChildren + ? `⚠️ Deleting "${pendingDelete.name}" will permanently delete all child workspaces and their data. This cannot be undone.` + : null; + const onNodeDragStop: OnNodeDrag> = useCallback( (_event, node) => { const { dragOverNodeId, nodes: allNodes } = useCanvasStore.getState(); @@ -381,9 +399,11 @@ function CanvasInner() { {/* Confirmation dialog for workspace delete — driven by store */} setPendingDelete(null)} diff --git a/canvas/src/components/ConsoleModal.tsx b/canvas/src/components/ConsoleModal.tsx new file mode 100644 index 00000000..1a43ca10 --- /dev/null +++ b/canvas/src/components/ConsoleModal.tsx @@ -0,0 +1,160 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { createPortal } from "react-dom"; +import { api } from "@/lib/api"; +import { showToast } from "@/components/Toaster"; + +interface Props { + workspaceId: string; + workspaceName?: string; + open: boolean; + onClose: () => void; +} + +interface ConsoleResponse { + output: string; + instance_id?: string; +} + +// ConsoleModal renders the EC2 serial console output for a workspace. +// Used by the "View Logs" button on failed/stuck workspaces so operators +// can see the actual cloud-init + runtime startup trace without SSH or +// AWS console access. The tenant platform proxies to the control plane; +// this component just consumes GET /workspaces/:id/console. +export function ConsoleModal({ workspaceId, workspaceName, open, onClose }: Props) { + const [output, setOutput] = useState(null); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [mounted, setMounted] = useState(false); + + useEffect(() => { + setMounted(true); + }, []); + + useEffect(() => { + if (!open) return; + let ignore = false; + setLoading(true); + setError(null); + setOutput(null); + api + .get(`/workspaces/${workspaceId}/console`) + .then((data) => { + if (ignore) return; + setOutput(data.output || ""); + }) + .catch((e) => { + if (ignore) return; + // 501 = deployment without a control plane (local docker-compose). + // 404 = EC2 instance has been terminated. Match with word-boundary + // regex so a status code appearing inside an unrelated number + // ("15012") doesn't false-match. + const msg = e instanceof Error ? e.message : "Failed to load console output"; + if (/\b501\b/.test(msg)) { + setError("Console output is only available on cloud (SaaS) deployments."); + } else if (/\b404\b/.test(msg)) { + setError("No EC2 instance found for this workspace — it may have been terminated."); + } else { + setError(msg); + } + }) + .finally(() => { + if (!ignore) setLoading(false); + }); + return () => { + ignore = true; + }; + }, [open, workspaceId]); + + useEffect(() => { + if (!open) return; + const handler = (e: KeyboardEvent) => { + if (e.key === "Escape") onClose(); + }; + window.addEventListener("keydown", handler); + return () => window.removeEventListener("keydown", handler); + }, [open, onClose]); + + if (!open || !mounted) return null; + + return createPortal( +
+
+
+
+
+

+ EC2 console output +

+ {workspaceName && ( +
+ {workspaceName} +
+ )} +
+ +
+ +
+ {loading && ( +
+ Loading console output… +
+ )} + {!loading && error && ( +
+ {error} +
+ )} + {!loading && !error && output !== null && ( +
+              {output || "(console output is empty — the instance may still be booting)"}
+            
+ )} +
+ +
+ {output && ( + + )} + +
+
+
, + document.body, + ); +} diff --git a/canvas/src/components/ContextMenu.tsx b/canvas/src/components/ContextMenu.tsx index 4211b7b4..f9010293 100644 --- a/canvas/src/components/ContextMenu.tsx +++ b/canvas/src/components/ContextMenu.tsx @@ -1,6 +1,6 @@ "use client"; -import { useCallback, useEffect, useRef, useState } from "react"; +import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { useCanvasStore, type WorkspaceNodeData } from "@/store/canvas"; import { api } from "@/lib/api"; import { showToast } from "./Toaster"; @@ -18,13 +18,22 @@ interface MenuItem { export function ContextMenu() { const contextMenu = useCanvasStore((s) => s.contextMenu); const closeContextMenu = useCanvasStore((s) => s.closeContextMenu); - const removeNode = useCanvasStore((s) => s.removeNode); const updateNodeData = useCanvasStore((s) => s.updateNodeData); const selectNode = useCanvasStore((s) => s.selectNode); const setPanelTab = useCanvasStore((s) => s.setPanelTab); const nestNode = useCanvasStore((s) => s.nestNode); const contextNodeId = contextMenu?.nodeId ?? null; - const hasChildren = useCanvasStore((s) => contextNodeId ? s.nodes.some((n) => n.data.parentId === contextNodeId) : false); + // Select the full nodes array (stable reference across unrelated store + // updates) and derive children via useMemo. Filtering inside the + // selector returned a new array every call, which Zustand's + // useSyncExternalStore saw as "snapshot changed" → schedule + // re-render → loop → React error #185. See canvas-store-snapshots. + const nodes = useCanvasStore((s) => s.nodes); + const children = useMemo( + () => (contextNodeId ? nodes.filter((n) => n.data.parentId === contextNodeId) : []), + [nodes, contextNodeId], + ); + const hasChildren = children.length > 0; const setPendingDelete = useCanvasStore((s) => s.setPendingDelete); const ref = useRef(null); const [actionLoading, setActionLoading] = useState(false); @@ -165,7 +174,7 @@ export function ContextMenu() { // it survives ContextMenu unmount. Closing the menu here avoids the // prior race where the portal dialog's Confirm click was treated as // "outside" by the menu's outside-click handler. - setPendingDelete({ id: contextMenu.nodeId, name: contextMenu.nodeData.name }); + setPendingDelete({ id: contextMenu.nodeId, name: contextMenu.nodeData.name, hasChildren, children: children.map(c => ({ id: c.id, name: c.data.name })) }); closeContextMenu(); }, [contextMenu, setPendingDelete, closeContextMenu]); diff --git a/canvas/src/components/DeleteCascadeConfirmDialog.tsx b/canvas/src/components/DeleteCascadeConfirmDialog.tsx new file mode 100644 index 00000000..e31114b7 --- /dev/null +++ b/canvas/src/components/DeleteCascadeConfirmDialog.tsx @@ -0,0 +1,167 @@ +"use client"; + +import { useEffect, useRef, useState } from "react"; +import { createPortal } from "react-dom"; + +interface Child { + id: string; + name: string; +} + +interface Props { + name: string; + children: Child[]; + checked: boolean; + onCheckedChange: (v: boolean) => void; + onConfirm: () => void; + onCancel: () => void; +} + +/** + * Cascade-delete confirmation dialog. + * + * When a workspace has children, the operator must explicitly tick + * "I understand this will cascade" before Delete All activates. This + * prevents accidental mass-deletion when ?confirm=true is always sent. + * + * Per WCAG 2.1 SC 2.4.3: focus moves to dialog on open. + * Per WCAG 2.1 SC 3.3.2: labels associated with inputs. + */ +export function DeleteCascadeConfirmDialog({ + name, + children, + checked, + onCheckedChange, + onConfirm, + onCancel, +}: Props) { + const dialogRef = useRef(null); + const [mounted, setMounted] = useState(false); + + useEffect(() => { + setMounted(true); + }, []); + + // Focus first interactive element when dialog opens (WCAG 2.4.3) + useEffect(() => { + if (!mounted) return; + const raf = requestAnimationFrame(() => { + dialogRef.current?.querySelector("button")?.focus(); + }); + return () => cancelAnimationFrame(raf); + }, [mounted]); + + // Keyboard: Escape cancels, Enter confirms (only when enabled), Tab trapped + useEffect(() => { + const handler = (e: KeyboardEvent) => { + if (e.key === "Escape") { onCancel(); return; } + if (e.key === "Enter" && checked) { onConfirm(); return; } + if (e.key === "Tab" && dialogRef.current) { + const focusable = Array.from( + dialogRef.current.querySelectorAll( + 'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])' + ) + ).filter((el) => !el.hasAttribute("disabled")); + if (focusable.length === 0) { e.preventDefault(); return; } + const first = focusable[0]; + const last = focusable[focusable.length - 1]; + if (e.shiftKey) { + if (document.activeElement === first) { e.preventDefault(); last.focus(); } + } else { + if (document.activeElement === last) { e.preventDefault(); first.focus(); } + } + } + }; + window.addEventListener("keydown", handler); + return () => window.removeEventListener("keydown", handler); + }, [onCancel, onConfirm, checked]); + + if (!mounted) return null; + + return createPortal( +
+ {/* Backdrop */} +
+ + {/* Dialog */} +
+
+

+ Delete Workspace and Children +

+
+ +
+ {/* Warning */} +
+
+ + + + +
+

+ "{name}" has{" "} + {children.length} child{" "} + {children.length === 1 ? "workspace" : "workspaces"}: +

+
+ + {/* Child list */} +
    + {children.map((c) => ( +
  • {c.name}
  • + ))} +
+ + {/* Cascade warning */} +
+

+ Deleting will cascade — all child workspaces and their data will be permanently removed. This cannot be undone. +

+
+ + {/* Checkbox guard */} + +
+ +
+ + +
+
+
, + document.body + ); +} \ No newline at end of file diff --git a/canvas/src/components/MemoryInspectorPanel.tsx b/canvas/src/components/MemoryInspectorPanel.tsx index eac67c65..52f24991 100644 --- a/canvas/src/components/MemoryInspectorPanel.tsx +++ b/canvas/src/components/MemoryInspectorPanel.tsx @@ -6,26 +6,24 @@ import { ConfirmDialog } from "@/components/ConfirmDialog"; // ── Types ───────────────────────────────────────────────────────────────────── -interface MemoryEntry { - key: string; - value: unknown; - version: number; - /** Omitted by the API when there is no TTL (Go omitempty) */ - expires_at?: string; - updated_at: string; +/** Memory entry returned by GET /workspaces/:id/memories */ +export interface MemoryEntry { + id: string; + workspace_id: string; + content: string; + scope: "LOCAL" | "TEAM" | "GLOBAL"; + namespace: string; + created_at: string; /** * Semantic similarity score (0–1). Only present when the API is queried - * with ?q= and the pgvector backend has been deployed (issue #776). + * with ?q= and the pgvector backend has been deployed. * Absent on plain list fetches — renders gracefully without a badge. */ similarity_score?: number; } -interface WriteResult { - status: string; - key: string; - version: number; -} +type Scope = "LOCAL" | "TEAM" | "GLOBAL"; +const SCOPES: Scope[] = ["LOCAL", "TEAM", "GLOBAL"]; interface Props { workspaceId: string; @@ -34,16 +32,10 @@ interface Props { // ── Helpers ─────────────────────────────────────────────────────────────────── /** - * Sanitise a memory key for use in an HTML id attribute. - * HTML IDs must not contain whitespace; many non-alphanumeric characters also - * cause selector or ARIA failures. Replace every non-alphanumeric character - * with a hyphen, collapse consecutive hyphens, then strip leading/trailing ones. + * Sanitise a memory id for use in an HTML id attribute. */ -function sanitizeId(key: string): string { - return key - .replace(/[^a-zA-Z0-9]/g, "-") - .replace(/-+/g, "-") - .replace(/^-|-$/g, ""); +function sanitizeId(id: string): string { + return id.replace(/[^a-zA-Z0-9]/g, "-"); } function formatRelativeTime(iso: string): string { @@ -54,7 +46,7 @@ function formatRelativeTime(iso: string): string { return new Date(iso).toLocaleDateString(); } -// ── Skeleton rows — shown during re-fetches when entries already exist ──────── +// ── Skeleton rows ────────────────────────────────────────────────────────────── function MemorySkeletonRows() { return ( @@ -79,20 +71,16 @@ function MemorySkeletonRows() { // ── Component ───────────────────────────────────────────────────────────────── export function MemoryInspectorPanel({ workspaceId }: Props) { + const [activeScope, setActiveScope] = useState("LOCAL"); + const [activeNamespace, setActiveNamespace] = useState(""); const [entries, setEntries] = useState([]); const [loading, setLoading] = useState(true); const [error, setError] = useState(null); - // ── Search state ──────────────────────────────────────────────────────────── - /** Raw input value — updated on every keystroke. */ + // ── Search state (debounced) ──────────────────────────────────────────────── const [searchQuery, setSearchQuery] = useState(""); - /** - * Debounced value — drives the API fetch. - * Lags searchQuery by 300 ms to avoid hammering the endpoint on every key. - */ const [debouncedQuery, setDebouncedQuery] = useState(""); - // 300 ms debounce: cancel previous timer whenever searchQuery changes. useEffect(() => { const timer = setTimeout( () => setDebouncedQuery(searchQuery.trim()), @@ -101,14 +89,8 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { return () => clearTimeout(timer); }, [searchQuery]); - // ── Expand/edit/delete state (keyed by entry.key — primitives, no new objects) - - const [expandedKey, setExpandedKey] = useState(null); - const [editingKey, setEditingKey] = useState(null); - const [editValue, setEditValue] = useState(""); - const [editError, setEditError] = useState(null); - const [saving, setSaving] = useState(false); - const [pendingDeleteKey, setPendingDeleteKey] = useState(null); + // ── Delete state ───────────────────────────────────────────────────────────── + const [pendingDeleteId, setPendingDeleteId] = useState(null); // ── Data loading ──────────────────────────────────────────────────────────── @@ -116,12 +98,15 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { setLoading(true); setError(null); try { - const url = debouncedQuery - ? `/workspaces/${workspaceId}/memory?q=${encodeURIComponent(debouncedQuery)}` - : `/workspaces/${workspaceId}/memory`; + const params = new URLSearchParams(); + params.set("scope", activeScope); + if (debouncedQuery) params.set("q", debouncedQuery); + if (activeNamespace) params.set("namespace", activeNamespace); + + const url = `/workspaces/${workspaceId}/memories?${params.toString()}`; const data = await api.get(url); + // When a semantic query is active, sort by similarity_score descending. - // Entries without a score (older backend) fall to the end gracefully. const sorted = debouncedQuery ? [...data].sort( (a, b) => (b.similarity_score ?? 0) - (a.similarity_score ?? 0) @@ -129,123 +114,70 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { : data; setEntries(sorted); } catch (e) { - setError(e instanceof Error ? e.message : "Failed to load memory entries"); + setError(e instanceof Error ? e.message : "Failed to load memories"); setEntries([]); } finally { setLoading(false); } - }, [workspaceId, debouncedQuery]); + }, [workspaceId, activeScope, debouncedQuery, activeNamespace]); useEffect(() => { loadEntries(); }, [loadEntries]); - // ── Edit handlers ─────────────────────────────────────────────────────────── - - const startEdit = useCallback((entry: MemoryEntry) => { - setEditingKey(entry.key); - setEditValue(JSON.stringify(entry.value, null, 2)); - setEditError(null); - }, []); - - const cancelEdit = useCallback(() => { - setEditingKey(null); - setEditValue(""); - setEditError(null); - }, []); - - const saveEdit = useCallback( - async (entry: MemoryEntry) => { - let parsed: unknown; - try { - parsed = JSON.parse(editValue); - } catch { - setEditError("Invalid JSON — fix the syntax before saving"); - return; - } - - setSaving(true); - setEditError(null); - - // Optimistic update — capture rollback snapshot before mutating - const snapshot = entries; - setEntries((prev) => - prev.map((e) => - e.key === entry.key - ? { - ...e, - value: parsed, - version: e.version + 1, - updated_at: new Date().toISOString(), - } - : e - ) - ); - setEditingKey(null); - setEditValue(""); - - try { - await api.post(`/workspaces/${workspaceId}/memory`, { - key: entry.key, - value: parsed, - if_match_version: entry.version, - }); - } catch (e) { - // Roll back optimistic update on any error - setEntries(snapshot); - setEditingKey(entry.key); - setEditValue(JSON.stringify(entry.value, null, 2)); - const msg = e instanceof Error ? e.message : "Save failed"; - if (msg.includes("409") || msg.toLowerCase().includes("mismatch")) { - setEditError( - "Version conflict — entry changed elsewhere. Reload to see latest." - ); - } else { - setEditError(msg); - } - } finally { - setSaving(false); - } - }, - [entries, editValue, workspaceId] - ); - // ── Delete handlers ───────────────────────────────────────────────────────── const confirmDelete = useCallback(async () => { - if (!pendingDeleteKey) return; - const key = pendingDeleteKey; - setPendingDeleteKey(null); + if (!pendingDeleteId) return; + const id = pendingDeleteId; + setPendingDeleteId(null); // Optimistic removal - setEntries((prev) => prev.filter((e) => e.key !== key)); - if (expandedKey === key) setExpandedKey(null); + setEntries((prev) => prev.filter((e) => e.id !== id)); try { - await api.del( - `/workspaces/${workspaceId}/memory/${encodeURIComponent(key)}` - ); + await api.del(`/workspaces/${workspaceId}/memories/${encodeURIComponent(id)}`); } catch (e) { setError(e instanceof Error ? e.message : "Delete failed — reloading..."); await loadEntries(); } - }, [pendingDeleteKey, expandedKey, workspaceId, loadEntries]); + }, [pendingDeleteId, workspaceId, loadEntries]); // ── Render ────────────────────────────────────────────────────────────────── - // Full-screen loader — only on the very first fetch (no entries cached yet). if (loading && entries.length === 0 && !error) { return (
- Loading memory… + Loading memories…
); } return (
- {/* Search bar */} + {/* Scope tabs */}
+
+ {SCOPES.map((scope) => ( + + ))} +
+
+ + {/* Search bar + namespace filter */} +
{/* Magnifying glass icon */} setSearchQuery(e.target.value)} placeholder="Semantic search…" - aria-label="Search memory entries" + aria-label="Search memories" className="w-full bg-zinc-900 border border-zinc-700/60 focus:border-blue-500/60 rounded-lg pl-8 pr-7 py-1.5 text-[11px] text-zinc-200 placeholder-zinc-600 focus:outline-none transition-colors" /> - {/* Clear button — only shown when there is a query */} {searchQuery && ( )}
+ + {/* Namespace filter */} +
+ + setActiveNamespace(e.target.value)} + placeholder="all namespaces" + aria-label="Filter by namespace" + className="flex-1 bg-zinc-900 border border-zinc-700/60 focus:border-blue-500/60 rounded px-2 py-1 text-[11px] text-zinc-200 placeholder-zinc-600 focus:outline-none transition-colors min-w-0" + /> +
{/* Toolbar */} @@ -290,13 +236,13 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { {debouncedQuery ? `${entries.length} result${entries.length !== 1 ? "s" : ""}` : entries.length === 1 - ? "1 entry" - : `${entries.length} entries`} + ? "1 memory" + : `${entries.length} memories`} @@ -316,11 +262,9 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { {/* Content */}
{loading ? ( - /* Skeleton rows — visible during search-transition re-fetches */ ) : entries.length === 0 ? ( debouncedQuery ? ( - /* Search-specific empty state */

@@ -341,56 +285,40 @@ export function MemoryInspectorPanel({ workspaceId }: Props) {

) : ( - /* Default empty state */
-

No memory entries yet

+

No {activeScope} memories

- Memory entries will appear here when the workspace writes to its KV - store. + {activeScope === "LOCAL" + ? "This workspace has not written any local memories yet." + : activeScope === "TEAM" + ? "No team memories shared with this workspace yet." + : "No global memories exist yet."}

) ) : (
- {entries.map((entry) => { - const isExpanded = expandedKey === entry.key; - const isEditing = editingKey === entry.key; - return ( - { - const next = isExpanded ? null : entry.key; - setExpandedKey(next); - if (!next && isEditing) cancelEdit(); - }} - onEditValueChange={setEditValue} - onStartEdit={() => startEdit(entry)} - onSave={() => saveEdit(entry)} - onCancelEdit={cancelEdit} - onDelete={() => setPendingDeleteKey(entry.key)} - /> - ); - })} + {entries.map((entry) => ( + setPendingDeleteId(entry.id)} + /> + ))}
)}
{/* Delete confirmation dialog */} setPendingDeleteKey(null)} + onCancel={() => setPendingDeleteId(null)} />
); @@ -400,155 +328,97 @@ export function MemoryInspectorPanel({ workspaceId }: Props) { interface MemoryEntryRowProps { entry: MemoryEntry; - isExpanded: boolean; - isEditing: boolean; - editValue: string; - editError: string | null; - saving: boolean; - onToggle: () => void; - onEditValueChange: (v: string) => void; - onStartEdit: () => void; - onSave: () => void; - onCancelEdit: () => void; onDelete: () => void; } -function MemoryEntryRow({ - entry, - isExpanded, - isEditing, - editValue, - editError, - saving, - onToggle, - onEditValueChange, - onStartEdit, - onSave, - onCancelEdit, - onDelete, -}: MemoryEntryRowProps) { - const bodyId = `mem-body-${sanitizeId(entry.key)}`; +function MemoryEntryRow({ entry, onDelete }: MemoryEntryRowProps) { + const [expanded, setExpanded] = useState(false); + const bodyId = `mem-body-${sanitizeId(entry.id)}`; + return (
- {/* Header row — click to expand/collapse */} + {/* Header row */} {/* Expanded body */} - {isExpanded && ( + {expanded && (
- {entry.expires_at && ( -

- Expires: {new Date(entry.expires_at).toLocaleString()} -

- )} - - {isEditing ? ( - /* Edit mode */ -
-