diff --git a/.gitea/workflows/publish-runtime-autobump.yml b/.gitea/workflows/publish-runtime-autobump.yml new file mode 100644 index 00000000..85afdafd --- /dev/null +++ b/.gitea/workflows/publish-runtime-autobump.yml @@ -0,0 +1,100 @@ +name: publish-runtime-autobump + +# Auto-bump-on-workspace-edit half of the publish pipeline. +# +# Why this file exists (issue #351): +# Gitea Actions does not correctly disambiguate `paths:` from `tags:` +# when both are bundled under a single `on.push` key. The result is +# that tag pushes get filtered out and `publish-runtime.yml` never +# fires — `action_run` rows: 0. This was unnoticed pre-2026-05-11 +# because PYPI_TOKEN was absent (publishes would have failed anyway). +# +# Split design: +# - publish-runtime.yml : on.push.tags only (the publisher) +# - publish-runtime-autobump.yml: on.push.branches+paths (this file — the version-bumper) +# +# This file computes the next version from PyPI's latest, pushes a +# `runtime-v$VERSION` tag, and exits. The tag push then triggers +# publish-runtime.yml via its tags-only trigger. +# +# Concurrency: shares the `publish-runtime` group with publish-runtime.yml +# so concurrent workspace pushes serialize at the bump step. Without +# this, two pushes minutes apart could both read PyPI latest=0.1.129 +# and try to tag 0.1.130 simultaneously, only one of which would land. + +on: + push: + branches: + - main + - staging + paths: + - "workspace/**" + +permissions: + contents: write # required to push tags back + +concurrency: + group: publish-runtime + cancel-in-progress: false + +jobs: + autobump-and-tag: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + # Fetch full tag list so the bump logic can sanity-check against + # what's already in this repo (catches collision with prior + # manual tag pushes). + fetch-depth: 0 + + - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 + with: + python-version: "3.11" + + - name: Compute next version from PyPI latest + id: bump + run: | + set -eu + LATEST=$(curl -fsS --retry 3 https://pypi.org/pypi/molecule-ai-workspace-runtime/json \ + | python -c "import sys,json; print(json.load(sys.stdin)['info']['version'])") + MAJOR=$(echo "$LATEST" | cut -d. -f1) + MINOR=$(echo "$LATEST" | cut -d. -f2) + PATCH=$(echo "$LATEST" | cut -d. -f3) + VERSION="${MAJOR}.${MINOR}.$((PATCH+1))" + echo "PyPI latest=$LATEST -> next=$VERSION" + if ! echo "$VERSION" | grep -qE '^[0-9]+\.[0-9]+\.[0-9]+$'; then + echo "::error::computed version $VERSION does not match PEP 440 X.Y.Z" + exit 1 + fi + if git tag --list | grep -qx "runtime-v$VERSION"; then + echo "::error::tag runtime-v$VERSION already exists in this repo. Manual intervention required (PyPI and Gitea tag history are out of sync)." + exit 1 + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Push runtime-v$VERSION tag + env: + DISPATCH_TOKEN: ${{ secrets.DISPATCH_TOKEN }} + VERSION: ${{ steps.bump.outputs.version }} + GITEA_URL: https://git.moleculesai.app + run: | + set -eu + if [ -z "$DISPATCH_TOKEN" ]; then + echo "::error::DISPATCH_TOKEN secret is not set — needed to push the tag back to molecule-core." + exit 1 + fi + git config user.name "publish-runtime autobump" + git config user.email "publish-runtime@moleculesai.app" + git tag -a "runtime-v$VERSION" \ + -m "Auto-bump on workspace/** edit on $GITHUB_REF" \ + -m "Triggered by: $GITHUB_REF @ $GITHUB_SHA" \ + -m "publish-runtime.yml will pick up this tag and upload to PyPI" + # Push via DISPATCH_TOKEN (a Gitea PAT). Using the bot identity + # ensures the resulting tag-push event is dispatched to + # publish-runtime.yml; act_runner's default GITHUB_TOKEN cannot + # trigger downstream workflows. + git remote set-url origin "${GITEA_URL#https://}" + git remote set-url origin "https://x-access-token:${DISPATCH_TOKEN}@${GITEA_URL#https://}/molecule-ai/molecule-core.git" + git push origin "runtime-v$VERSION" + echo "✓ pushed runtime-v$VERSION — publish-runtime.yml should fire next" diff --git a/.gitea/workflows/publish-runtime.yml b/.gitea/workflows/publish-runtime.yml index 36c861e8..cefd9259 100644 --- a/.gitea/workflows/publish-runtime.yml +++ b/.gitea/workflows/publish-runtime.yml @@ -12,7 +12,24 @@ name: publish-runtime # - Replaced `github.ref_name` (GitHub-only) with `${GITHUB_REF#refs/tags/}` # — Gitea Actions exposes github.ref (the full ref) but not ref_name # - Dropped `merge_group` trigger (Gitea has no merge queue) -# - Dropped `staging` branch trigger (no staging branch exists in this repo) +# +# 2026-05-10 (issue #348): originally restored `staging`/`main` branch + +# `workspace/**` path-filter trigger in PR #349. +# +# 2026-05-11 (issue #351): REVERTED the branches+paths trigger from THIS +# file. Bundling `paths` with `tags` under a single `on.push` key caused +# Gitea Actions to never dispatch the workflow for tag-push events (0 +# runs in `action_run` for workflow_id='publish-runtime.yml' since the +# port, including the runtime-v1.0.0 tag — which is why PyPI is still at +# 0.1.129 despite a v1.0.0 Gitea tag existing). +# +# The auto-bump-on-workspace-edit trigger now lives in +# `.gitea/workflows/publish-runtime-autobump.yml`. That file computes the +# next version from PyPI's latest and pushes a `runtime-v$VERSION` tag, +# which THIS file then picks up via the tags-only trigger below. +# +# This decoupling means Gitea's path-vs-tag evaluator never has to +# disambiguate — each file has a single unambiguous trigger shape. # # PyPI publishing: requires PYPI_TOKEN repository secret (or org-level secret). # Set via: repo Settings → Actions → Variables and Secrets → New Secret. @@ -26,11 +43,17 @@ on: tags: - "runtime-v*" workflow_dispatch: - inputs: - version: - description: "Version to publish (e.g. 0.1.6). Required for manual dispatch." - required: true - type: string + # 2026-05-11 (root cause of #351 / 0 runs ever): + # Gitea 1.22.6's workflow parser rejects `workflow_dispatch.inputs.version` + # with "unknown on type" — it mis-treats the inputs sub-keys as top-level + # `on:` event types. Log line: + # actions/workflows.go:DetectWorkflows() [W] ignore invalid workflow + # "publish-runtime.yml": unknown on type: map["version": {...}] + # That `[W] ignore invalid workflow` is silent UX — the workflow never + # registers, so it never fires for ANY event (push.tags included). + # Removing the inputs block restores parsing. Manual dispatch from the + # Gitea UI now triggers the PyPI auto-bump fallback in `Derive version` + # below (no `inputs.version` to read). permissions: contents: read @@ -55,20 +78,15 @@ jobs: python-version: "3.11" cache: pip - - name: Derive version (tag, manual input, or PyPI auto-bump) + - name: Derive version (tag or PyPI auto-bump) id: version run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - VERSION="${{ inputs.version }}" - elif echo "$GITHUB_REF" | grep -q "^refs/tags/runtime-v"; then + if echo "$GITHUB_REF" | grep -q "^refs/tags/runtime-v"; then # Tag is `runtime-vX.Y.Z` — strip the prefix. VERSION="${GITHUB_REF#refs/tags/runtime-v}" else - # Fallback: derive from PyPI latest + patch bump. - # (The staging-push auto-bump trigger is dropped on Gitea — - # no staging branch exists. This fallback path is kept for - # robustness if a future automation uses workflow_dispatch without - # an explicit version input.) + # workflow_dispatch path (no inputs supported on Gitea 1.22.6) or + # any other non-tag trigger: derive from PyPI latest + patch bump. LATEST=$(curl -fsS --retry 3 https://pypi.org/pypi/molecule-ai-workspace-runtime/json \ | python -c "import sys,json; print(json.load(sys.stdin)['info']['version'])") MAJOR=$(echo "$LATEST" | cut -d. -f1) @@ -121,6 +139,14 @@ jobs: /tmp/smoke/bin/python "$GITHUB_WORKSPACE/scripts/wheel_smoke.py" - name: Publish to PyPI + # working-directory matches the preceding Build/Verify steps. Without + # this, twine runs from the default workspace checkout dir where + # `dist/` doesn't exist and fails with: + # ERROR InvalidDistribution: Cannot find file (or expand pattern): 'dist/*' + # Caught on the first-ever successful dispatch of this workflow + # (run 5097, 2026-05-11 02:08Z) — every other step in the publish + # job already had this working-directory; Publish was missing it. + working-directory: ${{ runner.temp }}/runtime-build env: # PYPI_TOKEN: repository secret scoped to molecule-ai-workspace-runtime. # Set via: Settings → Actions → Variables and Secrets → New Secret. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1c1aab97..550e1d30 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -365,7 +365,7 @@ jobs: cache: pip cache-dependency-path: workspace/requirements.txt - if: needs.changes.outputs.python == 'true' - run: pip install -r requirements.txt pytest pytest-asyncio pytest-cov + run: pip install -r requirements.txt pytest pytest-asyncio pytest-cov sqlalchemy>=2.0.0 # Coverage flags + fail-under floor moved into workspace/pytest.ini # (issue #1817) so local `pytest` and CI use identical config. - if: needs.changes.outputs.python == 'true' diff --git a/.github/workflows/publish-workspace-server-image.yml b/.github/workflows/publish-workspace-server-image.yml deleted file mode 100644 index 7d981c93..00000000 --- a/.github/workflows/publish-workspace-server-image.yml +++ /dev/null @@ -1,278 +0,0 @@ -name: publish-workspace-server-image - -# Builds and pushes Docker images to GHCR on staging or main pushes. -# EC2 tenant instances pull the tenant image from GHCR. -# -# Branch / tag policy (see Compute tags step for the per-branch logic): -# -# staging push → builds image, tags :staging- + :staging-latest. -# staging-CP pins TENANT_IMAGE=:staging-latest, so it -# picks up staging-branch code automatically. This is -# what makes staging-CP actually test staging-branch -# code instead of "yesterday's main" — pre-fix, this -# workflow only ran on main, so staging tenants -# silently served stale code (#2308 fix RFC #2312 -# landed on staging but never reached tenants because -# staging→main was wedged on path-filter parity bugs). -# -# main push → builds image, tags :staging- + :staging-latest -# (same as before). canary-verify.yml retags -# :staging- → :latest after canary tenants -# green-light the digest. The :staging-latest retag -# on main push is intentional: when main lands AFTER a -# staging push, staging-CP gets the post-promote code -# (which equals what it had + any merge resolution), -# so the canary-on-staging-CP step still runs against -# the prod-bound digest. -# -# In the steady state both branches refresh :staging-latest; the -# semantic is "most recent staging-or-main build of tenant code." -# Drift between the two is bounded by the staging→main auto-promote -# cadence and is corrected on the next staging push. - -on: - push: - branches: [main] - paths: - - 'workspace-server/**' - - 'canvas/**' - - 'manifest.json' - - 'scripts/**' - - '.github/workflows/publish-workspace-server-image.yml' - workflow_dispatch: - -# Serialize per-branch so two rapid staging pushes don't race the same -# :staging-latest tag retag. Allow staging and main to run in parallel -# (different github.ref → different concurrency group) since they -# produce different :staging- tags and last-write-wins on -# :staging-latest is acceptable across branches (the post-promote -# main code equals current staging code in a healthy flow). -# -# cancel-in-progress: false → in-flight builds finish; the next push's -# build queues. This avoids a partially-pushed image and keeps the -# canary fleet pin (:staging-) consistent with what was actually -# tested at canary-verify time. -concurrency: - group: publish-workspace-server-image-${{ github.ref }} - cancel-in-progress: false - -permissions: - contents: read - packages: write - -env: - IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform - TENANT_IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/platform-tenant - -jobs: - build-and-push: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - # github-app-auth sibling-checkout removed 2026-05-07 (#157): - # plugin was dropped + workspace-server/Dockerfile no longer - # COPYs it. - - # ECR auth + buildx setup are now inline in each build step - # below (Task #173, 2026-05-07). - # - # Why moved inline: aws-actions/configure-aws-credentials@v4 + - # aws-actions/amazon-ecr-login@v2 + docker/setup-buildx-action - # all left auth state in places that the actual `docker push` - # couldn't see on Gitea Actions: - # - The actions wrote to a step-scoped DOCKER_CONFIG path - # that didn't survive into subsequent shell steps. - # - Buildx couldn't bridge the runner container ↔ - # operator-host docker daemon auth gap (401 on the - # docker-container driver, "no basic auth credentials" - # with the action-driven login). - # - # Doing AWS+ECR auth inline (`aws ecr get-login-password | - # docker login`) in the same shell step as `docker build` + - # `docker push` is the operator-host manual approach, mapped - # 1:1 into CI. Auth state is guaranteed to live in the env that - # `docker push` actually runs from. - # - # Post-suspension target is the operator's ECR org - # (153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/*), - # which already hosts platform-tenant + workspace-template-* + - # runner-base images. AWS creds come from the - # AWS_ACCESS_KEY_ID/SECRET secrets bound to the molecule-cp - # IAM user. Closes #161. - - - name: Compute tags - id: tags - run: | - echo "sha=${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT" - - # Health check: verify Docker daemon is accessible before attempting any - # build steps. This fails loudly at step 1 when the runner's docker.sock - # is inaccessible rather than silently continuing to the build step - # where docker build fails deep in ECR auth with a cryptic error. - - name: Verify Docker daemon access - run: | - set -euo pipefail - echo "::group::Docker daemon health check" - docker info 2>&1 | head -5 || { - echo "::error::Docker daemon is not accessible at /var/run/docker.sock" - echo "::error::Check: (1) daemon running, (2) runner user in docker group, (3) sock perms 660+" - exit 1 - } - echo "Docker daemon OK" - echo "::endgroup::" - - # Pre-clone manifest deps before docker build (Task #173 fix). - # - # Why pre-clone: post-2026-05-06, every workspace-template-* repo on - # Gitea (codex, crewai, deepagents, gemini-cli, langgraph) plus all - # 7 org-template-* repos are private. The pre-fix Dockerfile.tenant - # ran `git clone` inside an in-image stage, which had no auth path - # — every CI build failed with "fatal: could not read Username for - # https://git.moleculesai.app". For weeks, every workspace-server - # rebuild required a manual operator-host push. Now we clone in the - # trusted CI context (where AUTO_SYNC_TOKEN is naturally available) - # and Dockerfile.tenant just COPYs from .tenant-bundle-deps/. - # - # Token shape: AUTO_SYNC_TOKEN is the devops-engineer persona PAT - # (see /etc/molecule-bootstrap/agent-secrets.env). Per saved memory - # `feedback_per_agent_gitea_identity_default`, every CI surface uses - # a per-persona token, never the founder PAT. clone-manifest.sh - # embeds it as basic-auth (oauth2:) for the duration of the - # clones, then strips .git directories — the token never enters - # the resulting image. - # - # Idempotent: if a re-run finds populated dirs, clone-manifest.sh - # skips them; safe to retrigger via path-filter or workflow_dispatch. - - name: Pre-clone manifest deps - env: - MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }} - run: | - set -euo pipefail - if [ -z "${MOLECULE_GITEA_TOKEN}" ]; then - echo "::error::AUTO_SYNC_TOKEN secret is empty — register the devops-engineer persona PAT in repo Actions secrets" - exit 1 - fi - mkdir -p .tenant-bundle-deps - bash scripts/clone-manifest.sh \ - manifest.json \ - .tenant-bundle-deps/workspace-configs-templates \ - .tenant-bundle-deps/org-templates \ - .tenant-bundle-deps/plugins - # Sanity-check counts so a silent partial clone fails fast - # instead of producing a half-empty image. - ws_count=$(find .tenant-bundle-deps/workspace-configs-templates -mindepth 1 -maxdepth 1 -type d | wc -l) - org_count=$(find .tenant-bundle-deps/org-templates -mindepth 1 -maxdepth 1 -type d | wc -l) - plugins_count=$(find .tenant-bundle-deps/plugins -mindepth 1 -maxdepth 1 -type d | wc -l) - echo "Cloned: ws=$ws_count org=$org_count plugins=$plugins_count" - # Counts are derived from manifest.json (9 ws / 7 org / 21 - # plugins as of 2026-05-07). If manifest.json grows but the - # clone step regresses silently, the find above caps at the - # actual disk state — but clone-manifest.sh's own EXPECTED vs - # CLONED check (line ~95) is the authoritative fail-fast. - - # Canary-gated release flow: - # - This step always publishes :staging- + :staging-latest. - # - On staging push, staging-CP picks up :staging-latest immediately - # (its TENANT_IMAGE pin is :staging-latest) — so staging-branch - # code reaches staging tenants without waiting for main. - # - On main push, canary-verify.yml runs smoke tests against - # canary tenants (which pin :staging-), and on green retags - # :staging- → :latest. Prod tenants pull :latest. - # - On red, :latest stays on the prior good digest — prod is safe. - # - # Why :staging-latest is retagged on main push too: when main lands - # after a staging promote, staging-CP gets the post-promote code so - # the canary-on-staging-CP step still runs against the prod-bound - # digest. In a healthy flow the post-promote main code == the - # current staging code, so this is effectively a no-op except for - # the canary fleet pin handoff. - # - # Pre-fix history: this workflow used to only trigger on main. That - # meant staging-CP served "yesterday's main" indefinitely whenever - # staging→main was wedged. The 2026-04-30 dogfooding session - # surfaced this when RFC #2312 (chat upload HTTP-forward) landed on - # staging but staging tenants kept failing chat upload because they - # were running pre-RFC code. Adding the staging trigger above closes - # that gap. Earlier 2026-04-24 incident: a static :staging- pin - # drifted 10 days behind staging — same class of bug, different - # mechanism. ECR repo molecule-ai/platform created 2026-05-07. - # Build + push platform image with plain `docker` (no buildx). - # GIT_SHA bakes into the Go binary via -ldflags so /buildinfo - # returns it at runtime — see Dockerfile + buildinfo/buildinfo.go. - # The OCI revision label below carries the same value for registry - # tooling; the duplication is intentional. - - name: Build & push platform image to ECR (staging- + staging-latest) - env: - IMAGE_NAME: ${{ env.IMAGE_NAME }} - TAG_SHA: staging-${{ steps.tags.outputs.sha }} - TAG_LATEST: staging-latest - GIT_SHA: ${{ github.sha }} - REPO: ${{ github.repository }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: us-east-2 - run: | - set -euo pipefail - # ECR auth in-step so config.json is populated in the same - # shell env that runs `docker push`. ECR get-login-password - # tokens last 12h, plenty for a single-step build+push. - ECR_REGISTRY="${IMAGE_NAME%%/*}" - aws ecr get-login-password --region us-east-2 | \ - docker login --username AWS --password-stdin "${ECR_REGISTRY}" - docker build \ - --file ./workspace-server/Dockerfile \ - --build-arg GIT_SHA="${GIT_SHA}" \ - --label "org.opencontainers.image.source=https://github.com/${REPO}" \ - --label "org.opencontainers.image.revision=${GIT_SHA}" \ - --label "org.opencontainers.image.description=Molecule AI platform (Go API server) — pending canary verify" \ - --tag "${IMAGE_NAME}:${TAG_SHA}" \ - --tag "${IMAGE_NAME}:${TAG_LATEST}" \ - . - docker push "${IMAGE_NAME}:${TAG_SHA}" - docker push "${IMAGE_NAME}:${TAG_LATEST}" - - # Canvas uses same-origin fetches. The tenant Go platform - # reverse-proxies /cp/* to the SaaS CP via its CP_UPSTREAM_URL - # env; the tenant's /canvas/viewport, /approvals/pending, - # /org/templates etc. live on the tenant platform itself. - # Both legs share one origin (the tenant subdomain) so - # PLATFORM_URL="" forces canvas to fetch paths as relative, - # which land same-origin. - # - # Self-hosted / private-label deployments override this at - # build time with a specific backend (e.g. local dev: - # NEXT_PUBLIC_PLATFORM_URL=http://localhost:8080). - - name: Build & push tenant image to ECR (staging- + staging-latest) - env: - TENANT_IMAGE_NAME: ${{ env.TENANT_IMAGE_NAME }} - TAG_SHA: staging-${{ steps.tags.outputs.sha }} - TAG_LATEST: staging-latest - GIT_SHA: ${{ github.sha }} - REPO: ${{ github.repository }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: us-east-2 - run: | - set -euo pipefail - # Re-login: the platform-image step's docker login wrote to - # the same config.json, so this is technically redundant — but - # making each push step self-contained keeps the workflow - # robust to step reordering / future extraction. - ECR_REGISTRY="${TENANT_IMAGE_NAME%%/*}" - aws ecr get-login-password --region us-east-2 | \ - docker login --username AWS --password-stdin "${ECR_REGISTRY}" - docker build \ - --file ./workspace-server/Dockerfile.tenant \ - --build-arg NEXT_PUBLIC_PLATFORM_URL= \ - --build-arg GIT_SHA="${GIT_SHA}" \ - --label "org.opencontainers.image.source=https://github.com/${REPO}" \ - --label "org.opencontainers.image.revision=${GIT_SHA}" \ - --label "org.opencontainers.image.description=Molecule AI tenant platform + canvas — pending canary verify" \ - --tag "${TENANT_IMAGE_NAME}:${TAG_SHA}" \ - --tag "${TENANT_IMAGE_NAME}:${TAG_LATEST}" \ - . - docker push "${TENANT_IMAGE_NAME}:${TAG_SHA}" - docker push "${TENANT_IMAGE_NAME}:${TAG_LATEST}" - diff --git a/canvas/src/app/layout.tsx b/canvas/src/app/layout.tsx index 21ec7962..04786994 100644 --- a/canvas/src/app/layout.tsx +++ b/canvas/src/app/layout.tsx @@ -1,6 +1,22 @@ import type { Metadata } from "next"; +import { Inter, JetBrains_Mono } from "next/font/google"; import { cookies, headers } from "next/headers"; import "./globals.css"; + +// Self-hosted at build time → CSP-safe (font-src 'self' covers them +// because Next.js serves the .woff2 from /_next/static). Exposed as +// CSS variables so the mobile palette can reference them without +// importing this module. +const interFont = Inter({ + subsets: ["latin"], + display: "swap", + variable: "--font-inter", +}); +const monoFont = JetBrains_Mono({ + subsets: ["latin"], + display: "swap", + variable: "--font-jetbrains", +}); import { AuthGate } from "@/components/AuthGate"; import { CookieConsent } from "@/components/CookieConsent"; import { PurchaseSuccessModal } from "@/components/PurchaseSuccessModal"; @@ -79,7 +95,7 @@ export default async function RootLayout({ dangerouslySetInnerHTML={{ __html: themeBootScript }} /> - + {/* AuthGate is a client component; it checks the session on mount and bounces anonymous users to the control plane's login page diff --git a/canvas/src/app/page.tsx b/canvas/src/app/page.tsx index 0bf8f62c..28cb37d9 100644 --- a/canvas/src/app/page.tsx +++ b/canvas/src/app/page.tsx @@ -4,6 +4,7 @@ import { useEffect, useState } from "react"; import { Canvas } from "@/components/Canvas"; import { Legend } from "@/components/Legend"; import { CommunicationOverlay } from "@/components/CommunicationOverlay"; +import { MobileApp } from "@/components/mobile/MobileApp"; import { Spinner } from "@/components/Spinner"; import { connectSocket, disconnectSocket } from "@/store/socket"; import { useCanvasStore } from "@/store/canvas"; @@ -14,6 +15,23 @@ export default function Home() { const hydrationError = useCanvasStore((s) => s.hydrationError); const setHydrationError = useCanvasStore((s) => s.setHydrationError); const [hydrating, setHydrating] = useState(true); + // < 640px viewport renders the dedicated mobile shell instead of the + // desktop canvas. Tri-state: `null` until matchMedia has resolved, + // then `true|false`. While null we keep the existing loading spinner + // up — that way mobile devices never flash the desktop tree (which + // they would if we defaulted to `false` and only flipped post-mount). + const [isMobile, setIsMobile] = useState(null); + useEffect(() => { + if (typeof window === "undefined" || !window.matchMedia) { + setIsMobile(false); + return; + } + const mq = window.matchMedia("(max-width: 639px)"); + const update = () => setIsMobile(mq.matches); + update(); + mq.addEventListener("change", update); + return () => mq.removeEventListener("change", update); + }, []); // Distinct from hydrationError: platform-down is its own UX path // (different copy, different action — the user's next step is to // check local services, not to retry the API call). Tracked @@ -51,7 +69,10 @@ export default function Home() { }; }, []); - if (hydrating) { + // Hold the spinner while data hydrates OR while the viewport + // resolution hasn't settled yet (avoids a desktop-tree flash on + // mobile devices between SSR-paint and matchMedia). + if (hydrating || isMobile === null) { return (
@@ -66,6 +87,32 @@ export default function Home() { return ; } + if (isMobile) { + return ( + <> + + {hydrationError && ( +
+

{hydrationError}

+ +
+ )} + + ); + } + return ( <> diff --git a/canvas/src/components/Canvas.tsx b/canvas/src/components/Canvas.tsx index 5983b72f..888343b0 100644 --- a/canvas/src/components/Canvas.tsx +++ b/canvas/src/components/Canvas.tsx @@ -308,7 +308,9 @@ function CanvasInner() { showInteractive={false} />