chore: drop org_layout_test, hub.go, hub_test.go (already in staging with better coverage)
Some checks failed
CI / all-required (pull_request) Blocked by required conditions
sop-checklist / all-items-acked (pull_request) [info tier:low] acked: 0/7 — missing: comprehensive-testing, local-postgres-e2e, staging-smoke, +4 — body-unfilled: comprehensive-testing, l
Block internal-flavored paths / Block forbidden paths (pull_request) Successful in 16s
CI / Detect changes (pull_request) Successful in 28s
Harness Replays / detect-changes (pull_request) Successful in 18s
E2E Staging Canvas (Playwright) / detect-changes (pull_request) Successful in 39s
E2E API Smoke Test / detect-changes (pull_request) Successful in 41s
Secret scan / Scan diff for credential-shaped strings (pull_request) Successful in 21s
Handlers Postgres Integration / detect-changes (pull_request) Successful in 40s
qa-review / approved (pull_request) Successful in 24s
security-review / approved (pull_request) Failing after 22s
gate-check-v3 / gate-check (pull_request) Successful in 38s
sop-checklist-gate / gate (pull_request) Successful in 23s
Runtime PR-Built Compatibility / detect-changes (pull_request) Successful in 51s
sop-tier-check / tier-check (pull_request) Successful in 27s
lint-required-no-paths / lint-required-no-paths (pull_request) Successful in 1m40s
CI / Canvas (Next.js) (pull_request) Successful in 17s
Runtime PR-Built Compatibility / PR-built wheel + import smoke (pull_request) Successful in 14s
CI / Platform (Go) (pull_request) Failing after 5m21s
CI / Canvas Deploy Reminder (pull_request) Has been skipped
CI / Shellcheck (E2E scripts) (pull_request) Failing after 13m53s
CI / Python Lint & Test (pull_request) Failing after 13m46s
Harness Replays / Harness Replays (pull_request) Failing after 13m37s
E2E Staging Canvas (Playwright) / Canvas tabs E2E (pull_request) Failing after 13m26s
E2E API Smoke Test / E2E API Smoke Test (pull_request) Failing after 13m23s
Handlers Postgres Integration / Handlers Postgres Integration (pull_request) Failing after 13m18s

This commit is contained in:
Molecule AI · core-be 2026-05-13 17:09:19 +00:00
parent dc1d3a1b6e
commit ac8f13354b
2 changed files with 0 additions and 630 deletions

View File

@ -1,244 +0,0 @@
package handlers
// org_layout_test.go — unit coverage for org canvas layout helpers
// (org.go). These functions compute canvas node positions and subtree
// bounding boxes; they are pure (no DB calls, no side effects).
//
// Coverage targets:
// - childSlot: 2-column grid x,y for 0th..Nth child
// - sizeOfSubtree: leaf, single child, multi-child, deep nesting
// - childSlotInGrid: empty siblings, uniform sizes, variable sizes,
// index boundaries
import "testing"
// ---------- childSlot ----------
func TestChildSlot_FirstChild(t *testing.T) {
x, y := childSlot(0)
// col=0, row=0; x=parentSidePadding=16, y=parentHeaderPadding=130
if x != 16.0 {
t.Errorf("x = %v; want 16.0", x)
}
if y != 130.0 {
t.Errorf("y = %v; want 130.0", y)
}
}
func TestChildSlot_SecondChild(t *testing.T) {
x, y := childSlot(1)
// col=1, row=0; x=16+(240+14)=270, y=130
if x != 270.0 {
t.Errorf("x = %v; want 270.0", x)
}
if y != 130.0 {
t.Errorf("y = %v; want 130.0", y)
}
}
func TestChildSlot_ThirdChild(t *testing.T) {
x, y := childSlot(2)
// col=0, row=1; x=16, y=130+(130+14)=274
if x != 16.0 {
t.Errorf("x = %v; want 16.0", x)
}
if y != 274.0 {
t.Errorf("y = %v; want 274.0", y)
}
}
func TestChildSlot_FourthChild(t *testing.T) {
x, y := childSlot(3)
// col=1, row=1; x=270, y=274
if x != 270.0 {
t.Errorf("x = %v; want 270.0", x)
}
if y != 274.0 {
t.Errorf("y = %v; want 274.0", y)
}
}
// ---------- sizeOfSubtree ----------
func TestSizeOfSubtree_Leaf(t *testing.T) {
ws := OrgWorkspace{Name: "leaf"}
size := sizeOfSubtree(ws)
if size.width != 240.0 {
t.Errorf("width = %v; want 240.0", size.width)
}
if size.height != 130.0 {
t.Errorf("height = %v; want 130.0", size.height)
}
}
func TestSizeOfSubtree_SingleChild(t *testing.T) {
ws := OrgWorkspace{
Name: "parent",
Children: []OrgWorkspace{{Name: "child"}},
}
size := sizeOfSubtree(ws)
// cols = min(1,1) = 1; rows = 1
// maxColW = 240 (child default)
// width = 16*2 + 240*1 + 14*0 = 272
// height = 130 + 130 + 14*0 + 16 = 276
if size.width != 272.0 {
t.Errorf("width = %v; want 272.0", size.width)
}
if size.height != 276.0 {
t.Errorf("height = %v; want 276.0", size.height)
}
}
func TestSizeOfSubtree_TwoChildren(t *testing.T) {
ws := OrgWorkspace{
Name: "parent",
Children: []OrgWorkspace{
{Name: "child1"},
{Name: "child2"},
},
}
size := sizeOfSubtree(ws)
// cols = 2; rows = 1; maxColW = 240
// width = 16*2 + 240*2 + 14*1 = 526
// height = 130 + (130+130) + 14*0 + 16 = 276
if size.width != 526.0 {
t.Errorf("width = %v; want 526.0", size.width)
}
if size.height != 276.0 {
t.Errorf("height = %v; want 276.0", size.height)
}
}
func TestSizeOfSubtree_ThreeChildren(t *testing.T) {
ws := OrgWorkspace{
Name: "parent",
Children: []OrgWorkspace{
{Name: "child1"},
{Name: "child2"},
{Name: "child3"},
},
}
size := sizeOfSubtree(ws)
// cols = 2 (len=3, childGridColumnCount=2, min=2); rows = 2
// maxColW = 240
// width = 16*2 + 240*2 + 14*1 = 526
// height = 130 + (130*2) + 14*1 + 16 = 420
if size.width != 526.0 {
t.Errorf("width = %v; want 526.0", size.width)
}
if size.height != 420.0 {
t.Errorf("height = %v; want 420.0", size.height)
}
}
func TestSizeOfSubtree_DeepNesting(t *testing.T) {
// leaf → child → parent
grandchild := OrgWorkspace{Name: "grandchild"}
child := OrgWorkspace{Name: "child", Children: []OrgWorkspace{grandchild}}
parent := OrgWorkspace{Name: "parent", Children: []OrgWorkspace{child}}
size := sizeOfSubtree(parent)
// grandchild: 240x130
// child: cols=1, rows=1, maxColW=240 → 272x276
// parent: cols=1, rows=1, maxColW=272 → 304x422
if size.width != 304.0 {
t.Errorf("width = %v; want 304.0", size.width)
}
if size.height != 422.0 {
t.Errorf("height = %v; want 422.0", size.height)
}
}
// ---------- childSlotInGrid ----------
func TestChildSlotInGrid_EmptySiblings(t *testing.T) {
x, y := childSlotInGrid(0, nil)
if x != 16.0 || y != 130.0 {
t.Errorf("empty siblings: got (%v,%v); want (16.0, 130.0)", x, y)
}
}
func TestChildSlotInGrid_EmptySlice(t *testing.T) {
x, y := childSlotInGrid(0, []nodeSize{})
if x != 16.0 || y != 130.0 {
t.Errorf("empty slice: got (%v,%v); want (16.0, 130.0)", x, y)
}
}
func TestChildSlotInGrid_UniformSizes(t *testing.T) {
sizes := []nodeSize{
{240, 130},
{240, 130},
{240, 130},
}
// maxColW = 240; cols = 2; rows = 2
// slot 0: col=0, row=0 → x=16, y=130
x0, y0 := childSlotInGrid(0, sizes)
if x0 != 16.0 || y0 != 130.0 {
t.Errorf("slot 0: got (%v,%v); want (16.0, 130.0)", x0, y0)
}
// slot 1: col=1, row=0 → x=16+240+14=270, y=130
x1, y1 := childSlotInGrid(1, sizes)
if x1 != 270.0 || y1 != 130.0 {
t.Errorf("slot 1: got (%v,%v); want (270.0, 130.0)", x1, y1)
}
// slot 2: col=0, row=1 → x=16, y=130+130+14=274
x2, y2 := childSlotInGrid(2, sizes)
if x2 != 16.0 || y2 != 274.0 {
t.Errorf("slot 2: got (%v,%v); want (16.0, 274.0)", x2, y2)
}
}
func TestChildSlotInGrid_VariableSizes(t *testing.T) {
sizes := []nodeSize{
{100, 80}, // narrow, short
{300, 200}, // wide, tall
{200, 150}, // medium
}
// maxColW = 300; cols = 2; rows = 2
// slot 0: col=0, row=0 → x=16, y=130
x0, y0 := childSlotInGrid(0, sizes)
if x0 != 16.0 || y0 != 130.0 {
t.Errorf("slot 0: got (%v,%v); want (16.0, 130.0)", x0, y0)
}
// slot 1: col=1, row=0 → x=16+300+14=330, y=130
x1, y1 := childSlotInGrid(1, sizes)
if x1 != 330.0 || y1 != 130.0 {
t.Errorf("slot 1: got (%v,%v); want (330.0, 130.0)", x1, y1)
}
// slot 2: col=0, row=1 → x=16, y=130+200+14=344
x2, y2 := childSlotInGrid(2, sizes)
if x2 != 16.0 || y2 != 344.0 {
t.Errorf("slot 2: got (%v,%v); want (16.0, 344.0)", x2, y2)
}
}
func TestChildSlotInGrid_SingleChild(t *testing.T) {
sizes := []nodeSize{{400, 300}}
x, y := childSlotInGrid(0, sizes)
// cols = 1 (len < 2), maxColW = 400
// x = 16 + 0*(400+14) = 16, y = 130
if x != 16.0 || y != 130.0 {
t.Errorf("single child: got (%v,%v); want (16.0, 130.0)", x, y)
}
}
func TestChildSlotInGrid_LastSlot(t *testing.T) {
sizes := []nodeSize{{200, 100}, {200, 100}, {200, 100}}
// cols = 2, rows = 2, maxColW = 200
// slot 2: col=0, row=1 → x=16, y=130+100+14=244
x, y := childSlotInGrid(2, sizes)
if x != 16.0 || y != 244.0 {
t.Errorf("last slot: got (%v,%v); want (16.0, 244.0)", x, y)
}
}
func TestChildSlotInGrid_OverflowIndex(t *testing.T) {
sizes := []nodeSize{{200, 100}}
// Index beyond array bounds — Go handles this without panic
x, y := childSlotInGrid(5, sizes)
// col = 5 % 2 = 1, row = 5 / 2 = 2
// x = 16 + 1*(200+14) = 230, y = 130 + 2*(100+14) = 358
if x != 230.0 || y != 358.0 {
t.Errorf("overflow index: got (%v,%v); want (230.0, 358.0)", x, y)
}
}

View File

@ -1,386 +0,0 @@
package ws
import (
"sync"
"testing"
"time"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
)
// ─── helpers ────────────────────────────────────────────────────────────────
// mockClient returns a Client with a buffered send channel of the given size
// and a nil WebSocket connection. Nil Conn is safe for our tests because we
// never call WritePump (which uses Conn) — we only test the hub's send channel
// and broadcast logic.
func mockClient(workspaceID string, bufSize int) *Client {
return &Client{
WorkspaceID: workspaceID,
Send: make(chan []byte, bufSize),
// Conn is nil — safe: WritePump (which uses Conn) is never called in tests.
}
}
// ─── NewHub ────────────────────────────────────────────────────────────────
func TestNewHub_NilChecker(t *testing.T) {
// nil AccessChecker is accepted (hub allows all workspace→workspace broadcasts
// when canCommunicate is unset — the gating is purely advisory).
h := NewHub(nil)
if h == nil {
t.Fatal("NewHub(nil) returned nil")
}
if h.canCommunicate != nil {
t.Error("canCommunicate should be nil")
}
}
func TestNewHub_AccessCheckerWired(t *testing.T) {
called := false
checker := func(callerID, targetID string) bool {
called = true
return callerID == targetID // only self-communication allowed
}
h := NewHub(checker)
if h.canCommunicate == nil {
t.Fatal("canCommunicate not wired")
}
// Invoke the wired function directly
allowed := h.canCommunicate("ws-1", "ws-1")
if !called {
t.Error("checker was not called")
}
if !allowed {
t.Error("self-communication should be allowed")
}
if h.canCommunicate("ws-1", "ws-2") {
t.Error("cross-workspace communication should be blocked by checker")
}
}
// ─── safeSend ─────────────────────────────────────────────────────────────
func TestSafeSend_OpenChannel_Sends(t *testing.T) {
c := mockClient("ws-1", 10)
data := []byte(`{"event":"ping"}`)
ok := safeSend(c, data)
if !ok {
t.Error("safeSend should return true for open channel")
}
select {
case got := <-c.Send:
if string(got) != string(data) {
t.Errorf("got %q, want %q", got, data)
}
case <-time.After(100 * time.Millisecond):
t.Error("no message received on channel")
}
}
func TestSafeSend_ClosedChannel_ReturnsFalse(t *testing.T) {
c := mockClient("ws-1", 10)
close(c.Send) // close before safeSend
ok := safeSend(c, []byte("data"))
if ok {
t.Error("safeSend should return false for closed channel")
}
}
func TestSafeSend_FullChannel_ReturnsFalse(t *testing.T) {
c := mockClient("ws-1", 1) // buffer size 1
// Fill the channel
c.Send <- []byte("first")
// Channel is now full
ok := safeSend(c, []byte("second"))
if ok {
t.Error("safeSend should return false when channel buffer is full")
}
// Drain to leave clean state
<-c.Send
}
// ─── Broadcast ────────────────────────────────────────────────────────────
func TestBroadcast_CanvasAlwaysReceives(t *testing.T) {
h := NewHub(nil) // nil checker: canvas always gets messages
// Canvas client (no workspaceID) + two workspace clients
canvas := mockClient("", 10)
ws1 := mockClient("ws-1", 10)
ws2 := mockClient("ws-2", 10)
// Manually register clients into hub state
h.mu.Lock()
h.clients[canvas] = true
h.clients[ws1] = true
h.clients[ws2] = true
h.mu.Unlock()
msg := models.WSMessage{Event: "test", Payload: []byte(`"hello"`)}
h.Broadcast(msg)
// Canvas must receive
select {
case got := <-canvas.Send:
t.Logf("canvas received: %s", got)
case <-time.After(100 * time.Millisecond):
t.Error("canvas client did not receive broadcast")
}
}
func TestBroadcast_WorkspaceCanCommunicateGating(t *testing.T) {
// Only ws-1 can receive messages for ws-2
checker := func(callerID, targetID string) bool {
return callerID == targetID
}
h := NewHub(checker)
ws1 := mockClient("ws-1", 10)
ws2 := mockClient("ws-2", 10)
canvas := mockClient("", 10)
h.mu.Lock()
h.clients[ws1] = true
h.clients[ws2] = true
h.clients[canvas] = true
h.mu.Unlock()
// Broadcast addressed to ws-2
msg := models.WSMessage{Event: "test", WorkspaceID: "ws-2"}
h.Broadcast(msg)
// ws-1 should NOT receive (not the target, checker says no)
select {
case <-ws1.Send:
t.Error("ws-1 should not receive broadcast for ws-2")
case <-time.After(50 * time.Millisecond):
t.Log("ws-1 correctly blocked — no message")
}
// ws-2 should receive
select {
case <-ws2.Send:
t.Log("ws-2 correctly received broadcast")
case <-time.After(100 * time.Millisecond):
t.Error("ws-2 did not receive broadcast")
}
// Canvas always receives
select {
case <-canvas.Send:
t.Log("canvas correctly received broadcast")
case <-time.After(100 * time.Millisecond):
t.Error("canvas did not receive broadcast")
}
}
func TestBroadcast_DropsOnClosedChannel(t *testing.T) {
h := NewHub(nil)
c := mockClient("", 10)
close(c.Send) // pre-close so safeSend returns false
h.mu.Lock()
h.clients[c] = true
h.mu.Unlock()
// Broadcast must not panic; closed client should be dropped silently.
msg := models.WSMessage{Event: "ping"}
h.Broadcast(msg) // should not panic
}
func TestBroadcast_DropsOnFullChannel(t *testing.T) {
h := NewHub(nil)
c := mockClient("", 1)
c.Send <- []byte("blocker") // fill buffer
h.mu.Lock()
h.clients[c] = true
h.mu.Unlock()
msg := models.WSMessage{Event: "ping"}
h.Broadcast(msg) // safeSend returns false; no panic
// Drain to leave clean state
<-c.Send
}
func TestBroadcast_EmptyHubNoPanic(t *testing.T) {
h := NewHub(nil)
msg := models.WSMessage{Event: "ping"}
h.Broadcast(msg) // must not panic with no clients
}
func TestBroadcast_MultiClient(t *testing.T) {
h := NewHub(nil)
clients := make([]*Client, 5)
h.mu.Lock()
for i := 0; i < 5; i++ {
clients[i] = mockClient("", 10)
h.clients[clients[i]] = true
}
h.mu.Unlock()
msg := models.WSMessage{Event: "multi", Payload: []byte(`"all receive"`)}
h.Broadcast(msg)
for i, c := range clients {
select {
case <-c.Send:
t.Logf("client %d received", i)
case <-time.After(100 * time.Millisecond):
t.Errorf("client %d did not receive broadcast", i)
}
}
}
func TestBroadcast_CanvasIgnoresChecker(t *testing.T) {
// Strict checker that blocks ALL cross-workspace (never returns true for different IDs)
strictChecker := func(callerID, targetID string) bool {
return callerID == targetID
}
h := NewHub(strictChecker)
canvas := mockClient("", 10)
h.mu.Lock()
h.clients[canvas] = true
h.mu.Unlock()
msg := models.WSMessage{Event: "ping", WorkspaceID: "ws-1"}
h.Broadcast(msg)
select {
case <-canvas.Send:
t.Log("canvas received message even though checker blocks ws-1")
case <-time.After(100 * time.Millisecond):
t.Error("canvas must always receive — checker should be bypassed")
}
}
// ─── Close ────────────────────────────────────────────────────────────────
func TestClose_DisconnectsAllClients(t *testing.T) {
h := NewHub(nil)
clients := make([]*Client, 3)
h.mu.Lock()
for i := 0; i < 3; i++ {
clients[i] = mockClient("", 10)
h.clients[clients[i]] = true
}
h.mu.Unlock()
// Start Run goroutine so Close can drain Unregister channel
go h.Run()
defer h.Close()
// Unregister all clients so the mutex is released before Close() tries to lock it
for _, c := range clients {
h.Unregister <- c
}
time.Sleep(50 * time.Millisecond)
// Now close — mutex is free, Close() should succeed
h.Close()
// All client channels should be closed
for i, c := range clients {
select {
case _, ok := <-c.Send:
if ok {
t.Errorf("client %d channel still open after Close", i)
}
case <-time.After(100 * time.Millisecond):
// Channel drained and closed
}
}
}
func TestClose_Idempotent(t *testing.T) {
h := NewHub(nil)
c := mockClient("", 10)
h.mu.Lock()
h.clients[c] = true
h.mu.Unlock()
// Close twice — must not panic or deadlock
h.Close()
h.Close() // second call also fine
}
func TestClose_ClosesDoneChannel(t *testing.T) {
h := NewHub(nil)
// Start Run goroutine
done := make(chan struct{})
go func() {
h.Run()
close(done)
}()
h.Close()
select {
case <-done:
t.Log("Run exited after Close")
case <-time.After(200 * time.Millisecond):
t.Error("Run did not exit after Close")
}
}
// ─── Run goroutine (Unregister) ──────────────────────────────────────────
func TestRun_UnregisterClosesClientSend(t *testing.T) {
h := NewHub(nil)
c := mockClient("ws-1", 10)
// Start Run() BEFORE sending to Register — Register is unbuffered,
// so Run() must be ready to receive before the send can complete.
go h.Run()
defer h.Close()
// Register the client
h.Register <- c
// Give Run a moment to register the client
time.Sleep(20 * time.Millisecond)
// Unregister client
h.Unregister <- c
select {
case _, ok := <-c.Send:
if ok {
t.Error("client send channel should be closed after Unregister")
}
case <-time.After(500 * time.Millisecond):
t.Error("client send channel not closed within timeout")
}
}
// ─── Concurrent access ────────────────────────────────────────────────────
func TestBroadcast_ConcurrentSafe(t *testing.T) {
h := NewHub(nil)
clients := make([]*Client, 10)
h.mu.Lock()
for i := 0; i < 10; i++ {
clients[i] = mockClient("", 100)
h.clients[clients[i]] = true
}
h.mu.Unlock()
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 20; j++ {
h.Broadcast(models.WSMessage{Event: "ping", Payload: []byte(`"concurrent"`)})
}
}(i)
}
wg.Wait() // should not deadlock or panic
}