chore(simplify): share FALLBACK_POLL_MS as the tombstone TTL + trim verbose comments

Simplify pass on top of #2069 fix:

- Export FALLBACK_POLL_MS from canvas/src/store/socket.ts and import
  it as TOMBSTONE_TTL_MS in deleteTombstones.ts. Single source of
  truth — tuning one without the other would silently re-open the
  hydrate-races-delete window. Required-fix per simplify reviewer.
- Compress deleteTombstones.ts docstring from 30 lines to 10 — keep
  the "what + why module-level"; drop the long-form problem
  description (issue #2069 carries it).
- Compress canvas.ts call-site comments at removeSubtree (4 lines →
  2) and hydrate (2 lines → 2 but tighter).
- Don't reassign the workspaces parameter inside hydrate — use a
  const `live` and thread it through the two downstream calls
  (computeAutoLayout, buildNodesAndEdges). Same effect, no lint
  smell.
- Trim the canvas.test.ts integration-test preamble.

No behaviour change.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
rabbitblood 2026-04-26 13:52:49 -07:00
parent 7bb0bc39a2
commit 8c69a98da2
4 changed files with 20 additions and 41 deletions

View File

@ -552,11 +552,7 @@ describe("removeSubtree", () => {
}
});
// #2069: a `GET /workspaces` that was IN-FLIGHT before the DELETE
// completed can land AFTER removeSubtree, hydrate the store with a
// stale snapshot, and resurrect deleted nodes. The tombstone path
// (deleteTombstones.ts) makes hydrate skip ids deleted within the
// last 10s. Lock the contract end-to-end.
// #2069: lock the tombstone path end-to-end at the store level.
it("hydrate cannot resurrect ids that removeSubtree just dropped (#2069)", () => {
useCanvasStore.getState().removeSubtree("root");
expect(useCanvasStore.getState().nodes.map((n) => n.id).sort())

View File

@ -833,10 +833,8 @@ export const useCanvasStore = create<CanvasState>((set, get) => ({
}
}
}
// Mark every removed id with a tombstone so an in-flight GET
// /workspaces response that lands AFTER this can't resurrect them
// via hydrate (#2069). Tombstones expire after the WS-fallback
// poll cadence so legitimate re-imports flow normally.
// Tombstone removed ids so an in-flight GET /workspaces can't
// resurrect them via hydrate (#2069).
markDeleted(removed);
set({
nodes: nodes.filter((n) => !removed.has(n.id)),
@ -849,10 +847,10 @@ export const useCanvasStore = create<CanvasState>((set, get) => ({
},
hydrate: (workspaces: WorkspaceData[]) => {
// Filter out workspaces whose ids match a fresh tombstone — they
// were just deleted locally and this snapshot is stale (#2069).
workspaces = workspaces.filter((w) => !wasRecentlyDeleted(w.id));
const layoutOverrides = computeAutoLayout(workspaces);
// Drop ids tombstoned by a recent removeSubtree (#2069 — stale
// in-flight GET /workspaces).
const live = workspaces.filter((w) => !wasRecentlyDeleted(w.id));
const layoutOverrides = computeAutoLayout(live);
// Carry the live measured/grown parent sizes from the existing
// store into the rebuild. buildNodesAndEdges runs an auto-rescue
// pass on each child to detach orphans whose stored relative
@ -873,7 +871,7 @@ export const useCanvasStore = create<CanvasState>((set, get) => ({
}
}
const { nodes, edges } = buildNodesAndEdges(
workspaces,
live,
layoutOverrides,
currentParentSizes,
);

View File

@ -1,34 +1,19 @@
/**
* Transient "recently deleted" map used by canvas.ts to short-circuit
* the hydrate-races-cascade-delete window described in #2069.
* Transient "recently deleted" map keyed by workspace id.
*
* Problem
* -------
* `removeSubtree(rootId)` drops a parent + descendants locally after
* `DELETE /workspaces/:id?confirm=true` returns 200. `hydrate()` is
* server-authoritative and rebuilds the entire node array from whatever
* `/workspaces` returns. If a `GET /workspaces` was IN-FLIGHT before the
* DELETE completed, its response (still containing the deleted subtree)
* can land AFTER our local `removeSubtree`, hydrate the store with the
* stale snapshot, and re-introduce the deleted nodes on the canvas.
* `removeSubtree` calls `markDeleted(ids)` on every removal; `hydrate`
* calls `wasRecentlyDeleted(id)` to filter out incoming workspaces
* whose ids match a fresh tombstone prevents an in-flight
* GET /workspaces from resurrecting just-deleted nodes via hydrate.
*
* Fix
* ---
* `removeSubtree` calls `markDeleted(ids)` to record a tombstone for every
* removed id. `hydrate` calls `wasRecentlyDeleted(id)` to filter out any
* incoming workspace whose id matches a fresh tombstone. After
* `TOMBSTONE_TTL_MS`, the entry expires and a legitimately-recreated id
* (template re-import, undo, manual recreate) flows through normally.
*
* GC happens lazily at every read AND at write time so the map stays
* bounded no separate timer / interval / unmount plumbing.
*
* Module-level (not store state) so it doesn't trigger React Flow
* re-renders and isn't part of the public store surface. The store is a
* singleton, so module identity store identity for this purpose.
* TTL is shared with the WS-fallback poll cadence so a single
* round-trip is covered. Module-level (not store state) so it doesn't
* trigger React Flow re-renders. (#2069)
*/
const TOMBSTONE_TTL_MS = 10_000; // matches the 10s WS-fallback poll cadence
import { FALLBACK_POLL_MS } from "./socket";
const TOMBSTONE_TTL_MS = FALLBACK_POLL_MS;
const tombstones = new Map<string, number>();

View File

@ -63,7 +63,7 @@ export class RehydrateDedup {
* network truly is down. The dedup gate inside rehydrate() collapses
* this against the post-onopen rehydrate, so reconnect doesn't pay
* for a duplicate fetch. */
const FALLBACK_POLL_MS = 10_000;
export const FALLBACK_POLL_MS = 10_000;
class ReconnectingSocket {
private ws: WebSocket | null = null;