fix(canvas): tombstone deleted ids so in-flight hydrate can't resurrect them (#2069)

Closes #2069. removeSubtree dropped a parent + descendants locally
after DELETE returned 200, but a GET /workspaces request that was
IN-FLIGHT before the DELETE completed could land AFTER and hydrate
the store with a stale snapshot — re-introducing the deleted nodes
on the canvas until the next 10s fallback poll corrected it.

New module canvas/src/store/deleteTombstones.ts holds a transient
process-lifetime Map<id, deletedAt>. removeSubtree calls
markDeleted(removedIds); hydrate calls wasRecentlyDeleted(id) to
filter the incoming workspaces. TTL is 10s — matches the WS-fallback
poll cadence so a single round-trip is covered, after which a
legitimately re-imported id flows through normally.

GC happens lazily at every read AND at write time so the map stays
bounded — no separate timer / interval / unmount plumbing.

Tests:
- canvas/src/store/__tests__/deleteTombstones.test.ts: 7 cases
  covering immediate flag, never-marked, TTL boundary (9999ms vs
  10001ms), GC-on-read, GC-on-write, re-mark resets timestamp,
  iterable input.
- canvas/src/store/__tests__/canvas.test.ts: end-to-end "hydrate
  cannot resurrect ids that removeSubtree just dropped (#2069)"
  exercises the full chain at the store level.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
rabbitblood 2026-04-26 13:48:15 -07:00
parent 263012249c
commit 7bb0bc39a2
4 changed files with 192 additions and 0 deletions

View File

@ -6,6 +6,7 @@ global.fetch = vi.fn(() =>
);
import { useCanvasStore, summarizeWorkspaceCapabilities } from "../canvas";
import { __resetTombstonesForTest } from "../deleteTombstones";
import type { WorkspaceData, WSMessage } from "../socket";
// Helper to build a WorkspaceData object with sensible defaults
@ -52,6 +53,10 @@ beforeEach(() => {
searchOpen: false,
viewport: { x: 0, y: 0, zoom: 1 },
});
// Tombstones leak across tests because the module-level map is
// process-lifetime by design. Reset between tests so a delete in one
// test doesn't shadow a hydrate in the next.
__resetTombstonesForTest();
vi.clearAllMocks();
});
@ -546,6 +551,32 @@ describe("removeSubtree", () => {
expect(["root", "mid", "leaf", "sibling"]).not.toContain(e.target);
}
});
// #2069: a `GET /workspaces` that was IN-FLIGHT before the DELETE
// completed can land AFTER removeSubtree, hydrate the store with a
// stale snapshot, and resurrect deleted nodes. The tombstone path
// (deleteTombstones.ts) makes hydrate skip ids deleted within the
// last 10s. Lock the contract end-to-end.
it("hydrate cannot resurrect ids that removeSubtree just dropped (#2069)", () => {
useCanvasStore.getState().removeSubtree("root");
expect(useCanvasStore.getState().nodes.map((n) => n.id).sort())
.toEqual(["unrelated"]);
// Simulate the in-flight GET response landing AFTER the delete:
// the snapshot still contains every original workspace, including
// the just-removed subtree.
useCanvasStore.getState().hydrate([
makeWS({ id: "root" }),
makeWS({ id: "mid", parent_id: "root" }),
makeWS({ id: "leaf", parent_id: "mid" }),
makeWS({ id: "sibling", parent_id: "root" }),
makeWS({ id: "unrelated" }),
]);
// root/mid/leaf/sibling MUST stay deleted; only `unrelated` survives.
const ids = useCanvasStore.getState().nodes.map((n) => n.id).sort();
expect(ids).toEqual(["unrelated"]);
});
});
// ---------- isDescendant ----------

View File

@ -0,0 +1,82 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import {
__resetTombstonesForTest,
__tombstoneCountForTest,
markDeleted,
wasRecentlyDeleted,
} from "../deleteTombstones";
// Tombstone TTL is hardcoded at 10s in the module — these tests freeze
// time so the GC + read-time expiry can be exercised deterministically
// without sleeping.
describe("deleteTombstones (#2069)", () => {
beforeEach(() => {
__resetTombstonesForTest();
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-04-26T20:00:00Z"));
});
afterEach(() => {
vi.useRealTimers();
__resetTombstonesForTest();
});
it("flags ids as recently deleted immediately after markDeleted", () => {
markDeleted(["root-1", "child-a"]);
expect(wasRecentlyDeleted("root-1")).toBe(true);
expect(wasRecentlyDeleted("child-a")).toBe(true);
});
it("returns false for ids that were never marked", () => {
markDeleted(["root-1"]);
expect(wasRecentlyDeleted("never-deleted")).toBe(false);
});
it("expires tombstones after the 10s TTL", () => {
markDeleted(["root-1"]);
expect(wasRecentlyDeleted("root-1")).toBe(true);
vi.advanceTimersByTime(9_999);
expect(wasRecentlyDeleted("root-1")).toBe(true);
vi.advanceTimersByTime(2);
expect(wasRecentlyDeleted("root-1")).toBe(false);
});
it("evicts expired entries on read so the map stays bounded", () => {
markDeleted(["root-1"]);
expect(__tombstoneCountForTest()).toBe(1);
vi.advanceTimersByTime(11_000);
// The read itself triggers eviction — no separate GC pass needed.
wasRecentlyDeleted("root-1");
expect(__tombstoneCountForTest()).toBe(0);
});
it("evicts expired entries on write so the map stays bounded across long sessions", () => {
markDeleted(["root-1"]);
expect(__tombstoneCountForTest()).toBe(1);
vi.advanceTimersByTime(11_000);
// markDeleted GCs before inserting, so the second write should
// evict root-1 (now stale) AND insert root-2 — net size 1, not 2.
markDeleted(["root-2"]);
expect(__tombstoneCountForTest()).toBe(1);
expect(wasRecentlyDeleted("root-1")).toBe(false);
expect(wasRecentlyDeleted("root-2")).toBe(true);
});
it("resets the deletedAt timestamp when the same id is marked again", () => {
markDeleted(["root-1"]);
vi.advanceTimersByTime(8_000);
// Same id re-deleted (rare, but legal) — TTL restarts from now.
markDeleted(["root-1"]);
vi.advanceTimersByTime(8_000);
// 16s after the FIRST mark; would have expired without the re-mark.
expect(wasRecentlyDeleted("root-1")).toBe(true);
});
it("accepts any iterable, not just arrays", () => {
const ids = new Set(["root-1", "root-2"]);
markDeleted(ids);
expect(wasRecentlyDeleted("root-1")).toBe(true);
expect(wasRecentlyDeleted("root-2")).toBe(true);
});
});

View File

@ -9,6 +9,7 @@ import { api } from "@/lib/api";
import { showToast } from "@/components/Toaster";
import type { WorkspaceData, WSMessage } from "./socket";
import { handleCanvasEvent } from "./canvas-events";
import { markDeleted, wasRecentlyDeleted } from "./deleteTombstones";
import {
buildNodesAndEdges,
computeAutoLayout,
@ -832,6 +833,11 @@ export const useCanvasStore = create<CanvasState>((set, get) => ({
}
}
}
// Mark every removed id with a tombstone so an in-flight GET
// /workspaces response that lands AFTER this can't resurrect them
// via hydrate (#2069). Tombstones expire after the WS-fallback
// poll cadence so legitimate re-imports flow normally.
markDeleted(removed);
set({
nodes: nodes.filter((n) => !removed.has(n.id)),
edges: edges.filter((e) => !removed.has(e.source) && !removed.has(e.target)),
@ -843,6 +849,9 @@ export const useCanvasStore = create<CanvasState>((set, get) => ({
},
hydrate: (workspaces: WorkspaceData[]) => {
// Filter out workspaces whose ids match a fresh tombstone — they
// were just deleted locally and this snapshot is stale (#2069).
workspaces = workspaces.filter((w) => !wasRecentlyDeleted(w.id));
const layoutOverrides = computeAutoLayout(workspaces);
// Carry the live measured/grown parent sizes from the existing
// store into the rebuild. buildNodesAndEdges runs an auto-rescue

View File

@ -0,0 +1,70 @@
/**
* Transient "recently deleted" map used by canvas.ts to short-circuit
* the hydrate-races-cascade-delete window described in #2069.
*
* Problem
* -------
* `removeSubtree(rootId)` drops a parent + descendants locally after
* `DELETE /workspaces/:id?confirm=true` returns 200. `hydrate()` is
* server-authoritative and rebuilds the entire node array from whatever
* `/workspaces` returns. If a `GET /workspaces` was IN-FLIGHT before the
* DELETE completed, its response (still containing the deleted subtree)
* can land AFTER our local `removeSubtree`, hydrate the store with the
* stale snapshot, and re-introduce the deleted nodes on the canvas.
*
* Fix
* ---
* `removeSubtree` calls `markDeleted(ids)` to record a tombstone for every
* removed id. `hydrate` calls `wasRecentlyDeleted(id)` to filter out any
* incoming workspace whose id matches a fresh tombstone. After
* `TOMBSTONE_TTL_MS`, the entry expires and a legitimately-recreated id
* (template re-import, undo, manual recreate) flows through normally.
*
* GC happens lazily at every read AND at write time so the map stays
* bounded no separate timer / interval / unmount plumbing.
*
* Module-level (not store state) so it doesn't trigger React Flow
* re-renders and isn't part of the public store surface. The store is a
* singleton, so module identity store identity for this purpose.
*/
const TOMBSTONE_TTL_MS = 10_000; // matches the 10s WS-fallback poll cadence
const tombstones = new Map<string, number>();
function gcExpired(now: number): void {
for (const [id, deletedAt] of tombstones) {
if (now - deletedAt >= TOMBSTONE_TTL_MS) {
tombstones.delete(id);
}
}
}
export function markDeleted(ids: Iterable<string>): void {
const now = Date.now();
gcExpired(now);
for (const id of ids) {
tombstones.set(id, now);
}
}
export function wasRecentlyDeleted(id: string): boolean {
const deletedAt = tombstones.get(id);
if (deletedAt === undefined) return false;
if (Date.now() - deletedAt >= TOMBSTONE_TTL_MS) {
tombstones.delete(id);
return false;
}
return true;
}
/** Test-only: clear the module-level map between tests. Production code
* must not call this the map is intentionally process-lifetime. */
export function __resetTombstonesForTest(): void {
tombstones.clear();
}
/** Test-only: inspect the current tombstone count. */
export function __tombstoneCountForTest(): number {
return tombstones.size;
}