From 9ceda9d81f18efc76e44de994daa5320284c7815 Mon Sep 17 00:00:00 2001 From: Hongming Wang Date: Tue, 5 May 2026 19:05:03 -0700 Subject: [PATCH 1/2] refactor(events): migrate 18 files to typed EventType constants (RFC #2945 PR-B-1) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mechanical migration of bare event-name strings in BroadcastOnly / RecordAndBroadcast call sites to the typed constants from internal/events/types.go (RFC #2945 PR-B). Wire format unchanged (both shapes serialize to identical WSMessage.Event literals); pinned by TestAllEventTypes_IsSnapshot in #2965. Migrated (18 files, scope: handlers/, scheduler/, registry/, bundle/, channels/): - handlers/{approvals,a2a_proxy_helpers,a2a_queue,activity,agent, delegation,external_rotate,org_import,registry,workspace, workspace_bootstrap,workspace_crud,workspace_provision_shared, workspace_restart}.go - channels/manager.go (caught by hostile-reviewer pass — initial scope missed channels/, found via grep on the post-migration tree) - scheduler/scheduler.go - registry/provisiontimeout.go - bundle/importer.go Hostile self-review (3 weakest spots, addressed) ------------------------------------------------ 1. Missed call sites — initial scope omitted channels/. Post-migration `grep -rEn 'BroadcastOnly\([^,]+,[^,]*"[A-Z_]+"|RecordAndBroadcast\([^,]+,[^,]*"[A-Z_]+"' internal/` found 2 stragglers in channels/manager.go. Migrated. Final grep on the same pattern returns only the docstring example in types.go (intentional). 2. gofmt drift — auto-import injection produced non-canonical import ordering. `gofmt -w` applied ONLY to the 18 modified files (NOT the whole tree, to avoid sweeping unrelated pre-existing drift into this PR's diff). Three pre-existing un-gofmt'd files in handlers/ (a2a_proxy.go, a2a_proxy_test.go, a2a_queue_test.go) left as-is — they're unchanged by this PR and their drift predates it. 3. Wire format — paranoia check: do the constants serialize to the exact strings consumers (canvas TS, hermes plugin, anything parsing WSMessage.Event) expect? Yes. Pinned by the snapshot test. The migration is name-only; not a single character of wire output changes. Verified - go build ./... clean - go vet ./internal/... clean - gofmt -l on the 5 migrated package dirs: only pre-existing files - Full tests: handlers/, channels/, scheduler/, registry/, events/, bundle/ all green (5 ok, 0 fail) PR-B-2 (canvas TS mirror + cross-language parity gate) remains as the final piece of RFC #2945 PR-B. Tracked separately so this PR stays mechanical + reviewable. Refs RFC #2945, PR #2965 (PR-B types). Co-Authored-By: Claude Opus 4.7 (1M context) --- workspace-server/internal/bundle/importer.go | 4 +- workspace-server/internal/channels/manager.go | 21 ++++----- .../internal/handlers/a2a_proxy_helpers.go | 16 ++++--- .../internal/handlers/a2a_queue.go | 3 +- .../internal/handlers/activity.go | 4 +- workspace-server/internal/handlers/agent.go | 30 ++++++------- .../internal/handlers/approvals.go | 4 +- .../internal/handlers/delegation.go | 16 +++---- .../internal/handlers/external_rotate.go | 3 +- .../internal/handlers/org_import.go | 6 ++- .../internal/handlers/registry.go | 20 ++++----- .../internal/handlers/workspace.go | 43 +++++++++---------- .../internal/handlers/workspace_bootstrap.go | 3 +- .../internal/handlers/workspace_crud.go | 8 ++-- .../handlers/workspace_provision_shared.go | 3 +- .../internal/handlers/workspace_restart.go | 11 ++--- .../internal/registry/provisiontimeout.go | 3 +- .../internal/scheduler/scheduler.go | 5 ++- 18 files changed, 108 insertions(+), 95 deletions(-) diff --git a/workspace-server/internal/bundle/importer.go b/workspace-server/internal/bundle/importer.go index 43ec618c..f61c7a98 100644 --- a/workspace-server/internal/bundle/importer.go +++ b/workspace-server/internal/bundle/importer.go @@ -51,7 +51,7 @@ func Import( return result } - _ = broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", wsID, map[string]interface{}{ + _ = broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), wsID, map[string]interface{}{ "name": b.Name, "tier": b.Tier, "source_bundle_id": b.ID, @@ -142,7 +142,7 @@ func markFailed(ctx context.Context, wsID string, broadcaster *events.Broadcaste db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, last_sample_error = $2, updated_at = now() WHERE id = $3`, models.StatusFailed, msg, wsID) - broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", wsID, map[string]interface{}{ + broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisionFailed), wsID, map[string]interface{}{ "error": msg, }) } diff --git a/workspace-server/internal/channels/manager.go b/workspace-server/internal/channels/manager.go index 0991d520..3085de35 100644 --- a/workspace-server/internal/channels/manager.go +++ b/workspace-server/internal/channels/manager.go @@ -10,6 +10,7 @@ import ( "time" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" ) const ( @@ -304,14 +305,14 @@ func (m *Manager) HandleInbound(ctx context.Context, ch ChannelRow, msg *Inbound "parts": []map[string]interface{}{{"kind": "text", "text": msg.Text}}, }, "metadata": map[string]interface{}{ - "source": ch.ChannelType, - "channel_id": ch.ID, - "chat_id": msg.ChatID, - "user_id": msg.UserID, - "username": msg.Username, - "message_id": msg.MessageID, - "history": history, - "extra": msg.Metadata, + "source": ch.ChannelType, + "channel_id": ch.ID, + "chat_id": msg.ChatID, + "user_id": msg.UserID, + "username": msg.Username, + "message_id": msg.MessageID, + "history": history, + "extra": msg.Metadata, }, }, }) @@ -383,7 +384,7 @@ func (m *Manager) HandleInbound(ctx context.Context, ch ChannelRow, msg *Inbound // Broadcast event if m.broadcaster != nil { - m.broadcaster.RecordAndBroadcast(ctx, "CHANNEL_MESSAGE", ch.WorkspaceID, map[string]interface{}{ + m.broadcaster.RecordAndBroadcast(ctx, string(events.EventChannelMessage), ch.WorkspaceID, map[string]interface{}{ "channel_id": ch.ID, "channel_type": ch.ChannelType, "username": msg.Username, @@ -427,7 +428,7 @@ func (m *Manager) SendOutbound(ctx context.Context, channelID string, text strin } if m.broadcaster != nil { - m.broadcaster.RecordAndBroadcast(ctx, "CHANNEL_MESSAGE", ch.WorkspaceID, map[string]interface{}{ + m.broadcaster.RecordAndBroadcast(ctx, string(events.EventChannelMessage), ch.WorkspaceID, map[string]interface{}{ "channel_id": ch.ID, "channel_type": ch.ChannelType, "direction": "outbound", diff --git a/workspace-server/internal/handlers/a2a_proxy_helpers.go b/workspace-server/internal/handlers/a2a_proxy_helpers.go index 10e9efc6..a0c7e0c6 100644 --- a/workspace-server/internal/handlers/a2a_proxy_helpers.go +++ b/workspace-server/internal/handlers/a2a_proxy_helpers.go @@ -14,10 +14,12 @@ import ( "time" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth" "github.com/gin-gonic/gin" ) + // proxyDispatchBuildError is a sentinel wrapper for failures inside // http.NewRequestWithContext. handleA2ADispatchError unwraps it to emit the // "failed to create proxy request" 500 instead of the standard 502/503 paths. @@ -90,10 +92,10 @@ func (h *WorkspaceHandler) handleA2ADispatchError(ctx context.Context, workspace Status: http.StatusServiceUnavailable, Headers: map[string]string{"Retry-After": strconv.Itoa(busyRetryAfterSeconds)}, Response: gin.H{ - "error": "workspace agent busy — adapter handles retry (native_session)", - "busy": true, - "retry_after": busyRetryAfterSeconds, - "native_session": true, + "error": "workspace agent busy — adapter handles retry (native_session)", + "busy": true, + "retry_after": busyRetryAfterSeconds, + "native_session": true, }, } } @@ -149,7 +151,7 @@ func (h *WorkspaceHandler) handleA2ADispatchError(ctx context.Context, workspace // Provisioner selection (mutually exclusive in production): // - h.provisioner != nil → local Docker deployment; IsRunning does docker inspect. // - h.cpProv != nil → SaaS / EC2 deployment; IsRunning calls CP's -// /cp/workspaces/:id/status to read the EC2 state. +// /cp/workspaces/:id/status to read the EC2 state. // // Pre-fix this function ONLY consulted h.provisioner — for SaaS tenants // (h.provisioner=nil, h.cpProv=set) it short-circuited to false on every @@ -191,7 +193,7 @@ func (h *WorkspaceHandler) maybeMarkContainerDead(ctx context.Context, workspace log.Printf("ProxyA2A: failed to mark workspace %s offline: %v", workspaceID, err) } db.ClearWorkspaceKeys(ctx, workspaceID) - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_OFFLINE", workspaceID, map[string]interface{}{}) + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOffline), workspaceID, map[string]interface{}{}) go h.RestartByID(workspaceID) return true } @@ -272,7 +274,7 @@ func (h *WorkspaceHandler) logA2ASuccess(ctx context.Context, workspaceID, calle }(ctx) if callerID == "" && statusCode < 400 { - h.broadcaster.BroadcastOnly(workspaceID, "A2A_RESPONSE", map[string]interface{}{ + h.broadcaster.BroadcastOnly(workspaceID, string(events.EventA2AResponse), map[string]interface{}{ "response_body": json.RawMessage(respBody), "method": a2aMethod, "duration_ms": durationMs, diff --git a/workspace-server/internal/handlers/a2a_queue.go b/workspace-server/internal/handlers/a2a_queue.go index 6026b8f6..dd012e02 100644 --- a/workspace-server/internal/handlers/a2a_queue.go +++ b/workspace-server/internal/handlers/a2a_queue.go @@ -21,6 +21,7 @@ import ( "time" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" ) // extractIdempotencyKey pulls params.message.messageId out of an A2A JSON-RPC @@ -435,7 +436,7 @@ func (h *WorkspaceHandler) stitchDrainResponseToDelegation(ctx context.Context, // "⏸ queued" line to "✓ completed" in real time. Without this the // transition only surfaces after the user reloads or polls activity. if h.broadcaster != nil { - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_COMPLETE", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationComplete), sourceID, map[string]interface{}{ "delegation_id": delegationID, "target_id": targetID, "response_preview": truncate(responseText, 200), diff --git a/workspace-server/internal/handlers/activity.go b/workspace-server/internal/handlers/activity.go index db63b155..cb533935 100644 --- a/workspace-server/internal/handlers/activity.go +++ b/workspace-server/internal/handlers/activity.go @@ -55,7 +55,7 @@ func NewActivityHandler(b *events.Broadcaster) *ActivityHandler { func (h *ActivityHandler) List(c *gin.Context) { workspaceID := c.Param("id") activityType := c.Query("type") - source := c.Query("source") // "canvas" = source_id IS NULL, "agent" = source_id IS NOT NULL + source := c.Query("source") // "canvas" = source_id IS NULL, "agent" = source_id IS NOT NULL peerID := c.Query("peer_id") // optional UUID — restrict to rows where this peer is sender OR target limitStr := c.DefaultQuery("limit", "100") sinceSecsStr := c.Query("since_secs") @@ -650,7 +650,7 @@ func LogActivity(ctx context.Context, broadcaster events.EventEmitter, params Ac if respStr != nil { payload["response_body"] = json.RawMessage(respJSON) } - broadcaster.BroadcastOnly(params.WorkspaceID, "ACTIVITY_LOGGED", payload) + broadcaster.BroadcastOnly(params.WorkspaceID, string(events.EventActivityLogged), payload) } } diff --git a/workspace-server/internal/handlers/agent.go b/workspace-server/internal/handlers/agent.go index 9daa0927..f98afd93 100644 --- a/workspace-server/internal/handlers/agent.go +++ b/workspace-server/internal/handlers/agent.go @@ -69,7 +69,7 @@ func (h *AgentHandler) Assign(c *gin.Context) { return } - h.broadcaster.RecordAndBroadcast(ctx, "AGENT_ASSIGNED", workspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentAssigned), workspaceID, map[string]interface{}{ "agent_id": agentID, "model": body.Model, }) @@ -118,7 +118,7 @@ func (h *AgentHandler) Replace(c *gin.Context) { return } - h.broadcaster.RecordAndBroadcast(ctx, "AGENT_REPLACED", workspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentReplaced), workspaceID, map[string]interface{}{ "agent_id": agentID, "model": body.Model, "old_model": oldModel, @@ -148,7 +148,7 @@ func (h *AgentHandler) Remove(c *gin.Context) { return } - h.broadcaster.RecordAndBroadcast(ctx, "AGENT_REMOVED", workspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentRemoved), workspaceID, map[string]interface{}{ "agent_id": agentID, "model": model, }) @@ -215,21 +215,21 @@ func (h *AgentHandler) Move(c *gin.Context) { } // Broadcast on both workspaces - h.broadcaster.RecordAndBroadcast(ctx, "AGENT_MOVED", sourceID, map[string]interface{}{ - "agent_id": agentID, - "model": model, - "target_workspace_id": body.TargetWorkspaceID, + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentMoved), sourceID, map[string]interface{}{ + "agent_id": agentID, + "model": model, + "target_workspace_id": body.TargetWorkspaceID, }) - h.broadcaster.RecordAndBroadcast(ctx, "AGENT_MOVED", body.TargetWorkspaceID, map[string]interface{}{ - "agent_id": agentID, - "model": model, - "source_workspace_id": sourceID, + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentMoved), body.TargetWorkspaceID, map[string]interface{}{ + "agent_id": agentID, + "model": model, + "source_workspace_id": sourceID, }) c.JSON(http.StatusOK, gin.H{ - "agent_id": agentID, - "model": model, - "from_workspace": sourceID, - "to_workspace": body.TargetWorkspaceID, + "agent_id": agentID, + "model": model, + "from_workspace": sourceID, + "to_workspace": body.TargetWorkspaceID, }) } diff --git a/workspace-server/internal/handlers/approvals.go b/workspace-server/internal/handlers/approvals.go index 4b394c7e..1f091afa 100644 --- a/workspace-server/internal/handlers/approvals.go +++ b/workspace-server/internal/handlers/approvals.go @@ -51,7 +51,7 @@ func (h *ApprovalsHandler) Create(c *gin.Context) { return } - h.broadcaster.RecordAndBroadcast(ctx, "APPROVAL_REQUESTED", workspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventApprovalRequested), workspaceID, map[string]interface{}{ "approval_id": approvalID, "action": body.Action, "reason": body.Reason, @@ -62,7 +62,7 @@ func (h *ApprovalsHandler) Create(c *gin.Context) { var parentID *string db.DB.QueryRowContext(ctx, `SELECT parent_id FROM workspaces WHERE id = $1`, workspaceID).Scan(&parentID) if parentID != nil { - h.broadcaster.RecordAndBroadcast(ctx, "APPROVAL_ESCALATED", *parentID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventApprovalEscalated), *parentID, map[string]interface{}{ "approval_id": approvalID, "from_workspace_id": workspaceID, "action": body.Action, diff --git a/workspace-server/internal/handlers/delegation.go b/workspace-server/internal/handlers/delegation.go index d9b66884..247fc35f 100644 --- a/workspace-server/internal/handlers/delegation.go +++ b/workspace-server/internal/handlers/delegation.go @@ -164,7 +164,7 @@ func (h *DelegationHandler) Delegate(c *gin.Context) { go h.executeDelegation(sourceID, body.TargetID, delegationID, a2aBody) // Broadcast event so canvas shows delegation in real-time - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_SENT", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationSent), sourceID, map[string]interface{}{ "delegation_id": delegationID, "target_id": body.TargetID, "task_preview": truncate(body.Task, 100), @@ -317,7 +317,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s // Update status: pending → dispatched h.updateDelegationStatus(sourceID, delegationID, "dispatched", "") - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_STATUS", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationStatus), sourceID, map[string]interface{}{ "delegation_id": delegationID, "target_id": targetID, "status": "dispatched", }) @@ -352,7 +352,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s log.Printf("Delegation %s: failed to insert error log: %v", delegationID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_FAILED", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationFailed), sourceID, map[string]interface{}{ "delegation_id": delegationID, "target_id": targetID, "error": proxyErr.Error(), }) // RFC #2829 PR-2 result-push (see UpdateStatus for rationale). @@ -388,7 +388,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s `, sourceID, sourceID, targetID, "Delegation queued — target at capacity", string(queuedJSON)); err != nil { log.Printf("Delegation %s: failed to insert queued log: %v", delegationID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_STATUS", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationStatus), sourceID, map[string]interface{}{ "delegation_id": delegationID, "target_id": targetID, "status": "queued", }) return @@ -420,7 +420,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s // delegation_ledger_integration_test.go. recordLedgerStatus(ctx, delegationID, "completed", "", responseText) h.updateDelegationStatus(sourceID, delegationID, "completed", "") - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_COMPLETE", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationComplete), sourceID, map[string]interface{}{ "delegation_id": delegationID, "target_id": targetID, "response_preview": truncate(responseText, 200), @@ -503,7 +503,7 @@ func (h *DelegationHandler) Record(c *gin.Context) { recordLedgerInsert(ctx, sourceID, body.TargetID, body.DelegationID, body.Task, "") recordLedgerStatus(ctx, body.DelegationID, "dispatched", "", "") - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_SENT", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationSent), sourceID, map[string]interface{}{ "delegation_id": body.DelegationID, "target_id": body.TargetID, "task_preview": truncate(body.Task, 100), @@ -558,7 +558,7 @@ func (h *DelegationHandler) UpdateStatus(c *gin.Context) { `, sourceID, sourceID, "Delegation completed ("+truncate(body.ResponsePreview, 80)+")", string(respJSON)); err != nil { log.Printf("Delegation UpdateStatus: result insert failed for %s: %v", delegationID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_COMPLETE", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationComplete), sourceID, map[string]interface{}{ "delegation_id": delegationID, "response_preview": truncate(body.ResponsePreview, 200), }) @@ -570,7 +570,7 @@ func (h *DelegationHandler) UpdateStatus(c *gin.Context) { // the result instead of holding open an HTTP connection. pushDelegationResultToInbox(ctx, sourceID, delegationID, "completed", body.ResponsePreview, "") } else { - h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_FAILED", sourceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationFailed), sourceID, map[string]interface{}{ "delegation_id": delegationID, "error": body.Error, }) diff --git a/workspace-server/internal/handlers/external_rotate.go b/workspace-server/internal/handlers/external_rotate.go index 887bddd5..ce029958 100644 --- a/workspace-server/internal/handlers/external_rotate.go +++ b/workspace-server/internal/handlers/external_rotate.go @@ -8,6 +8,7 @@ import ( "net/http" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth" "github.com/gin-gonic/gin" ) @@ -100,7 +101,7 @@ func (h *WorkspaceHandler) RotateExternalCredentials(c *gin.Context) { // see when credentials were rotated. No PII; the token plaintext // is NOT logged. if h.broadcaster != nil { - h.broadcaster.RecordAndBroadcast(ctx, "EXTERNAL_CREDENTIALS_ROTATED", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventExternalCredentialsRotated), id, map[string]interface{}{ "workspace_id": id, }) } diff --git a/workspace-server/internal/handlers/org_import.go b/workspace-server/internal/handlers/org_import.go index 3dfe2fbd..60cac720 100644 --- a/workspace-server/internal/handlers/org_import.go +++ b/workspace-server/internal/handlers/org_import.go @@ -20,12 +20,14 @@ import ( "github.com/Molecule-AI/molecule-monorepo/platform/internal/channels" "github.com/Molecule-AI/molecule-monorepo/platform/internal/crypto" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner" "github.com/Molecule-AI/molecule-monorepo/platform/internal/provlog" "github.com/Molecule-AI/molecule-monorepo/platform/internal/scheduler" "github.com/google/uuid" ) + // createWorkspaceTree recursively materialises an OrgWorkspace (and its // descendants) into the workspaces + canvas_layouts tables and kicks off // Docker provisioning. absX/absY are THIS workspace's absolute canvas @@ -227,7 +229,7 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX if parentID != nil { payload["parent_id"] = *parentID } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, payload) + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), id, payload) // Seed initial memories from workspace config or defaults (issue #1050). // Per-workspace initial_memories override defaults; if workspace has none, @@ -243,7 +245,7 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, url = $2 WHERE id = $3`, models.StatusOnline, ws.URL, id); err != nil { log.Printf("Org import: external workspace status update failed for %s: %v", ws.Name, err) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), id, map[string]interface{}{ "name": ws.Name, "external": true, }) } else if h.workspace.HasProvisioner() { diff --git a/workspace-server/internal/handlers/registry.go b/workspace-server/internal/handlers/registry.go index 8960170c..84333985 100644 --- a/workspace-server/internal/handlers/registry.go +++ b/workspace-server/internal/handlers/registry.go @@ -414,7 +414,7 @@ func (h *RegistryHandler) Register(c *gin.Context) { } // Broadcast WORKSPACE_ONLINE - if err := h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.ID, map[string]interface{}{ + if err := h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.ID, map[string]interface{}{ "url": cachedURL, "agent_card": payload.AgentCard, "delivery_mode": effectiveMode, @@ -572,7 +572,7 @@ func (h *RegistryHandler) Heartbeat(c *gin.Context) { // Broadcast current task update only when it changed (avoid spamming on every heartbeat) if payload.CurrentTask != prevTask { - h.broadcaster.BroadcastOnly(payload.WorkspaceID, "TASK_UPDATED", map[string]interface{}{ + h.broadcaster.BroadcastOnly(payload.WorkspaceID, string(events.EventTaskUpdated), map[string]interface{}{ "current_task": payload.CurrentTask, "active_tasks": payload.ActiveTasks, }) @@ -593,7 +593,7 @@ func (h *RegistryHandler) Heartbeat(c *gin.Context) { // so per-heartbeat cost is one in-memory channel send per active // SSE subscriber and one WS hub fan-out. At 30s heartbeat cadence // this is far below any noise floor on either path. - h.broadcaster.BroadcastOnly(payload.WorkspaceID, "WORKSPACE_HEARTBEAT", map[string]interface{}{ + h.broadcaster.BroadcastOnly(payload.WorkspaceID, string(events.EventWorkspaceHeartbeat), map[string]interface{}{ "active_tasks": payload.ActiveTasks, "uptime_seconds": payload.UptimeSeconds, }) @@ -678,7 +678,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea if err != nil { log.Printf("Heartbeat: failed to mark %s degraded (wedged): %v", payload.WorkspaceID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_DEGRADED", payload.WorkspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceDegraded), payload.WorkspaceID, map[string]interface{}{ "runtime_state": "wedged", "sample_error": payload.SampleError, }) @@ -699,7 +699,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusDegraded, payload.WorkspaceID); err != nil { log.Printf("Heartbeat: failed to mark %s degraded: %v", payload.WorkspaceID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_DEGRADED", payload.WorkspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceDegraded), payload.WorkspaceID, map[string]interface{}{ "error_rate": payload.ErrorRate, "sample_error": payload.SampleError, }) @@ -718,7 +718,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusOnline, payload.WorkspaceID); err != nil { log.Printf("Heartbeat: failed to recover %s to online: %v", payload.WorkspaceID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{}) + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{}) } // Recovery: if workspace was offline but is now sending heartbeats, bring it back online. @@ -728,7 +728,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2 AND status = 'offline'`, models.StatusOnline, payload.WorkspaceID); err != nil { log.Printf("Heartbeat: failed to recover %s from offline: %v", payload.WorkspaceID, err) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{}) + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{}) } // Auto-recovery: if a workspace is marked "provisioning" but is actively sending @@ -743,7 +743,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea } else { log.Printf("Heartbeat: transitioned %s from provisioning to online (heartbeat received)", payload.WorkspaceID) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{ "recovered_from": currentStatus, }) } @@ -771,7 +771,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea } else { log.Printf("Heartbeat: transitioned %s from awaiting_agent to online (heartbeat received)", payload.WorkspaceID) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{ "recovered_from": currentStatus, }) } @@ -820,7 +820,7 @@ func (h *RegistryHandler) UpdateCard(c *gin.Context) { return } - h.broadcaster.RecordAndBroadcast(c.Request.Context(), "AGENT_CARD_UPDATED", payload.WorkspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(c.Request.Context(), string(events.EventAgentCardUpdated), payload.WorkspaceID, map[string]interface{}{ "agent_card": payload.AgentCard, }) diff --git a/workspace-server/internal/handlers/workspace.go b/workspace-server/internal/handlers/workspace.go index cf210342..a163cee9 100644 --- a/workspace-server/internal/handlers/workspace.go +++ b/workspace-server/internal/handlers/workspace.go @@ -112,7 +112,6 @@ func (h *WorkspaceHandler) SetCPProvisioner(cp provisioner.CPProvisionerAPI) { h.cpProv = cp } - // SetEnvMutators wires a provisionhook.Registry into the handler. Plugins // living in separate repos register on the same Registry instance during // boot (see cmd/server/main.go) and main.go calls this setter once before @@ -361,7 +360,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) { // populate the Runtime pill on the side panel immediately — without it // the node lives as "runtime: unknown" until something refetches the // workspace row (which nothing does during provisioning). - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), id, map[string]interface{}{ "name": payload.Name, "tier": payload.Tier, "runtime": payload.Runtime, @@ -388,7 +387,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) { if err := db.CacheURL(ctx, id, payload.URL); err != nil { log.Printf("External workspace: failed to cache URL for %s: %v", id, err) } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), id, map[string]interface{}{ "name": payload.Name, "external": true, }) } else { @@ -407,7 +406,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) { } else { connectionToken = tok } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_AWAITING_AGENT", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceAwaitingAgent), id, map[string]interface{}{ "name": payload.Name, "external": true, }) } @@ -539,24 +538,24 @@ func scanWorkspaceRow(rows interface { } ws := map[string]interface{}{ - "id": id, - "name": name, - "tier": tier, - "status": status, - "url": url, - "parent_id": parentID, - "active_tasks": activeTasks, - "max_concurrent_tasks": maxConcurrentTasks, - "last_error_rate": errorRate, - "last_sample_error": sampleError, - "uptime_seconds": uptimeSeconds, - "current_task": currentTask, - "runtime": runtime, - "workspace_dir": nilIfEmpty(workspaceDir), - "monthly_spend": monthlySpend, - "x": x, - "y": y, - "collapsed": collapsed, + "id": id, + "name": name, + "tier": tier, + "status": status, + "url": url, + "parent_id": parentID, + "active_tasks": activeTasks, + "max_concurrent_tasks": maxConcurrentTasks, + "last_error_rate": errorRate, + "last_sample_error": sampleError, + "uptime_seconds": uptimeSeconds, + "current_task": currentTask, + "runtime": runtime, + "workspace_dir": nilIfEmpty(workspaceDir), + "monthly_spend": monthlySpend, + "x": x, + "y": y, + "collapsed": collapsed, } // budget_limit: nil when no limit set, int64 otherwise diff --git a/workspace-server/internal/handlers/workspace_bootstrap.go b/workspace-server/internal/handlers/workspace_bootstrap.go index 7c84473e..2928ffd0 100644 --- a/workspace-server/internal/handlers/workspace_bootstrap.go +++ b/workspace-server/internal/handlers/workspace_bootstrap.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/gin-gonic/gin" ) @@ -85,7 +86,7 @@ func (h *WorkspaceHandler) BootstrapFailed(c *gin.Context) { return } - h.broadcaster.RecordAndBroadcast(c.Request.Context(), "WORKSPACE_PROVISION_FAILED", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(c.Request.Context(), string(events.EventWorkspaceProvisionFailed), id, map[string]interface{}{ "error": errMsg, "log_tail": tail, "source": "bootstrap_watcher", diff --git a/workspace-server/internal/handlers/workspace_crud.go b/workspace-server/internal/handlers/workspace_crud.go index 4e58804f..200356b1 100644 --- a/workspace-server/internal/handlers/workspace_crud.go +++ b/workspace-server/internal/handlers/workspace_crud.go @@ -16,12 +16,14 @@ import ( "time" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth" "github.com/gin-gonic/gin" "github.com/google/uuid" "github.com/lib/pq" ) + // State handles GET /workspaces/:id/state — minimal status payload for // remote-agent polling (Phase 30.4). Returns `{status, paused, deleted, // workspace_id}` so a remote agent can detect pause/resume/delete @@ -380,7 +382,7 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) { pq.Array(allIDs)); err != nil { log.Printf("Delete token revocation error for %s: %v", id, err) } -// #1027: cascade-disable all schedules for the deleted workspaces so + // #1027: cascade-disable all schedules for the deleted workspaces so // the scheduler never fires a cron into a removed container. if _, err := db.DB.ExecContext(ctx, `UPDATE workspace_schedules SET enabled = false, updated_at = now() @@ -466,14 +468,14 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) { // leaving other WS clients ignorant of the cascade. The DB // row is already 'removed' so it's recoverable, but the // inconsistency is avoidable. - h.broadcaster.RecordAndBroadcast(cleanupCtx, "WORKSPACE_REMOVED", descID, map[string]interface{}{}) + h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), descID, map[string]interface{}{}) } stopAndRemove(id) db.ClearWorkspaceKeys(cleanupCtx, id) restartStates.Delete(id) // #2269: same as descendants above - h.broadcaster.RecordAndBroadcast(cleanupCtx, "WORKSPACE_REMOVED", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), id, map[string]interface{}{ "cascade_deleted": len(descendantIDs), }) diff --git a/workspace-server/internal/handlers/workspace_provision_shared.go b/workspace-server/internal/handlers/workspace_provision_shared.go index 00e00bd0..e879521a 100644 --- a/workspace-server/internal/handlers/workspace_provision_shared.go +++ b/workspace-server/internal/handlers/workspace_provision_shared.go @@ -41,6 +41,7 @@ import ( "path/filepath" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner" "github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth" @@ -212,7 +213,7 @@ func (h *WorkspaceHandler) markProvisionFailed(ctx context.Context, workspaceID, } else if _, hasErr := extra["error"]; !hasErr { extra["error"] = msg } - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", workspaceID, extra) + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisionFailed), workspaceID, extra) if _, dbErr := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $3, last_sample_error = $2, updated_at = now() WHERE id = $1`, workspaceID, msg, models.StatusFailed); dbErr != nil { diff --git a/workspace-server/internal/handlers/workspace_restart.go b/workspace-server/internal/handlers/workspace_restart.go index c5712be5..42b25f3a 100644 --- a/workspace-server/internal/handlers/workspace_restart.go +++ b/workspace-server/internal/handlers/workspace_restart.go @@ -11,6 +11,7 @@ import ( "time" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" "github.com/Molecule-AI/molecule-monorepo/platform/internal/provlog" "github.com/gin-gonic/gin" @@ -147,7 +148,7 @@ func (h *WorkspaceHandler) Restart(c *gin.Context) { // Reset to provisioning db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusProvisioning, id) - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), id, map[string]interface{}{ "name": wsName, "tier": tier, "runtime": containerRuntime, @@ -341,7 +342,7 @@ func (h *WorkspaceHandler) HibernateWorkspace(ctx context.Context, workspaceID s } db.ClearWorkspaceKeys(ctx, workspaceID) - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_HIBERNATED", workspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceHibernated), workspaceID, map[string]interface{}{ "name": wsName, "tier": tier, }) @@ -552,7 +553,7 @@ func (h *WorkspaceHandler) runRestartCycle(workspaceID string) { db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusProvisioning, workspaceID) - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", workspaceID, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), workspaceID, map[string]interface{}{ "name": wsName, "tier": tier, "runtime": dbRuntime, }) @@ -640,7 +641,7 @@ func (h *WorkspaceHandler) Pause(c *gin.Context) { db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusPaused, ws.id) db.ClearWorkspaceKeys(ctx, ws.id) - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PAUSED", ws.id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspacePaused), ws.id, map[string]interface{}{ "name": ws.name, }) } @@ -709,7 +710,7 @@ func (h *WorkspaceHandler) Resume(c *gin.Context) { for _, ws := range toResume { db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusProvisioning, ws.id) - h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", ws.id, map[string]interface{}{ + h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), ws.id, map[string]interface{}{ "name": ws.name, "tier": ws.tier, "runtime": ws.runtime, }) payload := models.CreateWorkspacePayload{Name: ws.name, Tier: ws.tier, Runtime: ws.runtime} diff --git a/workspace-server/internal/registry/provisiontimeout.go b/workspace-server/internal/registry/provisiontimeout.go index 1b35798e..46b9e157 100644 --- a/workspace-server/internal/registry/provisiontimeout.go +++ b/workspace-server/internal/registry/provisiontimeout.go @@ -8,6 +8,7 @@ import ( "time" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/models" ) @@ -197,7 +198,7 @@ func sweepStuckProvisioning(ctx context.Context, emitter ProvisionTimeoutEmitter // A separate event type was considered but the UI reaction is // identical either way — operators who need to distinguish can // tell from the `source` payload field. - if emitErr := emitter.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", c.id, map[string]interface{}{ + if emitErr := emitter.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisionFailed), c.id, map[string]interface{}{ "error": msg, "timeout_secs": timeoutSec, "runtime": c.runtime, diff --git a/workspace-server/internal/scheduler/scheduler.go b/workspace-server/internal/scheduler/scheduler.go index e098586d..a7a969ca 100644 --- a/workspace-server/internal/scheduler/scheduler.go +++ b/workspace-server/internal/scheduler/scheduler.go @@ -14,6 +14,7 @@ import ( cronlib "github.com/robfig/cron/v3" "github.com/Molecule-AI/molecule-monorepo/platform/internal/db" + "github.com/Molecule-AI/molecule-monorepo/platform/internal/events" "github.com/Molecule-AI/molecule-monorepo/platform/internal/metrics" "github.com/Molecule-AI/molecule-monorepo/platform/internal/supervised" ) @@ -541,7 +542,7 @@ func (s *Scheduler) fireSchedule(ctx context.Context, sched scheduleRow) { insertCancel() if s.broadcaster != nil { - s.broadcaster.RecordAndBroadcast(ctx, "CRON_EXECUTED", sched.WorkspaceID, map[string]interface{}{ + s.broadcaster.RecordAndBroadcast(ctx, string(events.EventCronExecuted), sched.WorkspaceID, map[string]interface{}{ "schedule_id": sched.ID, "schedule_name": sched.Name, "status": lastStatus, @@ -618,7 +619,7 @@ func (s *Scheduler) recordSkipped(ctx context.Context, sched scheduleRow, active skipInsCancel() if s.broadcaster != nil { - _ = s.broadcaster.RecordAndBroadcast(ctx, "CRON_SKIPPED", sched.WorkspaceID, map[string]interface{}{ + _ = s.broadcaster.RecordAndBroadcast(ctx, string(events.EventCronSkipped), sched.WorkspaceID, map[string]interface{}{ "schedule_id": sched.ID, "schedule_name": sched.Name, "reason": reason, From ab1acff2d294f2c4526bcdab52f0e6bc7501bc78 Mon Sep 17 00:00:00 2001 From: Hongming Wang Date: Tue, 5 May 2026 20:30:25 -0700 Subject: [PATCH 2/2] ux(canvas/files): drag-drop upload to target folder (#2999 PR-D) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit User asked for VSCode-style drag-drop upload (#2999): "drag local to upload to target folder just like vscode does". Today the only upload path is the toolbar's Upload button (folder picker). Drag-drop lets users grab files from Finder/Explorer and drop them directly on a specific subdirectory in the tree. 1. New `uploadDataTransferItems(items, targetDir)` in `useFilesApi` — walks the HTML5 DataTransferItemList via `webkitGetAsEntry()`, recursing folders to a flat (relativePath, file) list, then PUTs each via the existing /files/ endpoint. The walker (also exported via `__testables`) calls `readEntries()` in a loop until empty so multi-batch folders (browsers cap each call at ~100 entries) aren't silently truncated. 2. `uploadFiles` (folder-picker path) gained an optional `targetDir` parameter. Same prefixing semantics so future surfaces (e.g. an "upload here" toolbar button on a row) can reuse it. 3. `FileTree` directory rows gained `onDragOver` / `onDragEnter` / `onDragLeave` / `onDrop` handlers + a hover-target highlight (accent-tinted background + outline). dragLeave uses `currentTarget.contains(relatedTarget)` to suppress the flicker that fires when the cursor crosses any child of the row (icon, label, ✕ button) — without this the highlight strobes on every sub-element transition. 4. `FilesTab` wraps the tree column in an outer drop zone for "drop on root" — drops outside any specific subdir row land at root. The empty-state placeholder copy now includes a "drag files here to upload" hint when the active root is /configs (the only writable root today). 5. Both the row drop and the root drop are gated on `root === "/configs"` (the same gate that already blocks the toolbar's New / Upload / Clear). Other roots ignore the drag entirely (no highlight, no drop), so the user doesn't get a misleading drag affordance followed by a "switch root" toast. `dragDropUpload.test.tsx` (9 tests, two layers): Walker tests (pure function, no DOM): - `walkEntry` collects a single dropped file with correct relpath. - `walkEntry` walks a folder + preserves folder name in the path. - **Multi-batch loop**: a fake reader that emits two batches of 2 + an empty terminator must yield 4 files. A walker that called readEntries once would see only 2 — this is the load-bearing assertion against silent folder truncation. - Nested directories: outer/inner/file.md → "outer/inner/file.md". FileTree drag-drop wiring (DOM): - `dragover` on a directory row preventDefault's (load-bearing — without it the drop event never fires). - `drop` on a directory row fires `onDropToTarget(path, items)`. - `drop` on a FILE row does NOT fire (only directories are valid drop targets). - `drop` with no DataTransferItems does NOT fire (defensive guard against text-only drags). - `dragenter` adds the highlight class to the directory row. 1. The 1MB per-file size cap is inherited from the existing `uploadFiles`. A user dropping a 5MB skill bundle silently skips the file (the loop's `continue` on `file.size > 1_000_000`). Same behavior as the toolbar Upload, so consistent if not great. Surfacing skipped-files would be a UX improvement tracked separately — not load-bearing for this PR. 2. Drop-zone highlight on the column wrapper uses an outline that sits inside the column's overflow-y-auto scroll container. If the user drags onto a row that's mid-scroll, the highlight may clip slightly at the scroll boundary. Cosmetic only; the drop still works. 3. The `?root=` query is NOT passed on the underlying writeFile call (matches the existing uploadFiles behavior). On a backend without #2999 PR-A, this means uploads always land in /configs regardless of selected root — but we already gated drop on `root === "/configs"` so the practical effect is nil today. Once PR-A merges and the canvas threads ?root= through writes (separate follow-up), drops on /home etc. would be enableable by lifting the canDelete-style gate. - `npx tsc --noEmit` clean - 177/177 canvas tab tests pass - Manual on local dev: drag a file from Finder onto /configs/skills row → file appears under /configs/skills/. Drag a folder of 3 files onto root area → 3 files uploaded with folder structure preserved. Drag onto /home tree → no highlight, no drop. Refs #2999. Pairs with PR-A (backend EIC) — without PR-A the tree is empty on SaaS and there's nothing to drop ONTO; PR-D still works on self-hosted today. 🤖 Generated with [Claude Code](https://claude.com/claude-code) --- canvas/src/components/tabs/FilesTab.tsx | 73 +++++- .../src/components/tabs/FilesTab/FileTree.tsx | 97 +++++++- .../__tests__/FileTreeContextMenu.test.tsx | 3 +- .../__tests__/dragDropUpload.test.tsx | 212 ++++++++++++++++++ .../components/tabs/FilesTab/useFilesApi.ts | 151 ++++++++++++- 5 files changed, 517 insertions(+), 19 deletions(-) create mode 100644 canvas/src/components/tabs/FilesTab/__tests__/dragDropUpload.test.tsx diff --git a/canvas/src/components/tabs/FilesTab.tsx b/canvas/src/components/tabs/FilesTab.tsx index 79059be5..e1ee3bfd 100644 --- a/canvas/src/components/tabs/FilesTab.tsx +++ b/canvas/src/components/tabs/FilesTab.tsx @@ -81,9 +81,33 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) { downloadFileByPath, downloadAllFiles, uploadFiles, + uploadDataTransferItems, deleteAllFiles, } = useFilesApi(workspaceId, root); + // PR-D: track whether the user is currently dragging files OVER + // the root area (not over a specific subdir row). Used to show + // the "Drop to upload to root" highlight on the tree column. + const [rootDragHover, setRootDragHover] = useState(false); + + const handleDropToTarget = ( + targetDir: string, + items: DataTransferItemList, + ) => { + // canDelete is the gate proxy — same constraint as the toolbar + // Upload button (today only /configs is writable from the canvas + // surface). Without this check, dropping on /home would post + // through /workspaces//files/, which the backend would + // reject only after an HTTP round-trip. Fail fast. + if (root !== "/configs") { + setError( + `Upload only allowed in /configs (current root: ${root}). Switch root or use Upload button.`, + ); + return; + } + void uploadDataTransferItems(items, targetDir); + }; + const tree = useMemo(() => buildTree(files), [files]); const openFile = async (path: string) => { @@ -224,8 +248,46 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) { )}
- {/* File tree */} -
+ {/* File tree column. PR-D: outer div is the drop zone for + "drop on root" — when the user drags into the column area + (not over a specific subdir row), the drop targets the + current root directory. Subdirectory rows in + stop propagation on their own drop event so a drop on + /configs/skills doesn't ALSO fire root-area drop. */} +
{ + // Only highlight + accept the drop when uploads are + // actually allowed for the current root. Without this + // check the user gets a misleading drag affordance, + // drops, then sees the toolbar's "switch root" toast — + // bad UX. + if (root !== "/configs") return; + e.preventDefault(); + e.dataTransfer.dropEffect = "copy"; + }} + onDragEnter={(e) => { + if (root !== "/configs") return; + e.preventDefault(); + setRootDragHover(true); + }} + onDragLeave={(e) => { + const next = e.relatedTarget as Node | null; + if (!next || !(e.currentTarget as HTMLElement).contains(next)) { + setRootDragHover(false); + } + }} + onDrop={(e) => { + if (root !== "/configs") return; + e.preventDefault(); + setRootDragHover(false); + if (e.dataTransfer.items?.length) { + handleDropToTarget("", e.dataTransfer.items); + } + }} + > {/* New file input */} {showNewFile && (
@@ -243,7 +305,11 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) { {files.length === 0 ? (
- No config files yet + {rootDragHover + ? "Drop to upload to root" + : root === "/configs" + ? "No config files yet — drag files here to upload" + : "No config files yet"}
) : ( {}} onDownload={downloadFileByPath} canDelete={root === "/configs"} + onDropToTarget={handleDropToTarget} expandedDirs={expandedDirs} onToggleDir={toggleDir} loadingDir={loadingDir} diff --git a/canvas/src/components/tabs/FilesTab/FileTree.tsx b/canvas/src/components/tabs/FilesTab/FileTree.tsx index 32d56ebe..50bc0760 100644 --- a/canvas/src/components/tabs/FilesTab/FileTree.tsx +++ b/canvas/src/components/tabs/FilesTab/FileTree.tsx @@ -14,16 +14,21 @@ interface TreeCallbacks { * context-menu item's `disabled` flag so the user gets the same * affordance as the toolbar (which gates Clear/New on /configs). */ canDelete: boolean; + /** PR-D: drop files/folders from the OS onto this row. targetDir + * is the directory path (relative to the active root) under which + * the dropped contents should land; "" means root. */ + onDropToTarget?: (targetDir: string, items: DataTransferItemList) => void; expandedDirs: Set; onToggleDir: (path: string) => void; loadingDir: string | null; } /** - * FileTree renders the workspace tree + owns the right-click - * context-menu state. Lifting the menu state to the tree (vs each - * row) means only one menu is open at a time — opening a new row's - * menu auto-closes the prior one. Same UX as VSCode / Theia. + * FileTree renders the workspace tree + owns the right-click context + * menu (PR-C) and the drop-target hover state (PR-D). Lifting the + * menu state here (vs each row) means only one menu open at a time — + * opening a new row's menu auto-closes the prior one. Same UX as + * VSCode / Theia. */ export function FileTree({ nodes, @@ -32,6 +37,7 @@ export function FileTree({ onDelete, onDownload, canDelete, + onDropToTarget, expandedDirs, onToggleDir, loadingDir, @@ -42,12 +48,17 @@ export function FileTree({ y: number; items: MenuItem[]; } | null>(null); + // PR-D: hover-target highlight state for drag-drop. Lifted next to + // the menu state so both shared-across-rows interactions live in + // one place. + const [hoverDir, setHoverDir] = useState(null); const openContextMenu = (e: React.MouseEvent, node: TreeNode) => { e.preventDefault(); // Items composed per-row so the available actions reflect the - // node type (files get Download; directories don't have a - // useful per-tree download — the Export toolbar covers bulk). + // node type (files get Open + Download; directories get Delete + // only since "open a directory in the editor" doesn't apply + // and "Export folder" is the toolbar's job). const items: MenuItem[] = []; if (!node.isDir) { items.push({ @@ -76,12 +87,20 @@ export function FileTree({ // Single state lifted to the top-level tree; nested s // (rendered for expanded directories below) do NOT instantiate - // their own menus — they call the SAME openContextMenu via prop - // drilling. This keeps "only one menu open" the structural - // invariant rather than a render-order coincidence. + // their own menus or drop-targets — they call back via prop + // drilling. This keeps "only one menu open" + "only one drop + // target highlighted" as structural invariants rather than + // render-order coincidences. const childCallbacks: TreeCallbacks = { - selectedPath, onSelect, onDelete, onDownload, canDelete, - expandedDirs, onToggleDir, loadingDir, + selectedPath, + onSelect, + onDelete, + onDownload, + canDelete, + onDropToTarget, + expandedDirs, + onToggleDir, + loadingDir, }; return ( @@ -91,6 +110,8 @@ export function FileTree({ key={`${node.path}:${node.isDir ? "dir" : "file"}`} node={node} openContextMenu={openContextMenu} + hoverDir={hoverDir} + setHoverDir={setHoverDir} depth={depth} {...childCallbacks} /> @@ -114,28 +135,79 @@ function TreeItem({ onDelete, onDownload, canDelete, + onDropToTarget, expandedDirs, onToggleDir, loadingDir, depth, openContextMenu, + hoverDir, + setHoverDir, }: TreeCallbacks & { node: TreeNode; depth: number; openContextMenu: (e: React.MouseEvent, node: TreeNode) => void; + hoverDir: string | null; + setHoverDir: (p: string | null) => void; }) { const isSelected = selectedPath === node.path; const expanded = expandedDirs.has(node.path); const isLoading = loadingDir === node.path; + const isDropTarget = node.isDir && hoverDir === node.path; + + // PR-D drag handlers — only directory rows are valid drop targets + // (dropping a file ON another file is ambiguous; treat it as + // dropping in the parent dir, which the root area handles). When a + // drag enters a directory row, mark it the hover target. When the + // cursor leaves to a non-child element, clear it. drop fires the + // upload callback with the row's path. + const dragProps = node.isDir && onDropToTarget + ? { + onDragOver: (e: React.DragEvent) => { + // preventDefault is REQUIRED to opt this element into the + // drop target list — without it, browsers refuse to fire + // the drop event regardless of the drop handler. + e.preventDefault(); + e.dataTransfer.dropEffect = "copy"; + }, + onDragEnter: (e: React.DragEvent) => { + e.preventDefault(); + setHoverDir(node.path); + }, + onDragLeave: (e: React.DragEvent) => { + // Only clear hover when leaving to an element OUTSIDE this + // row — bare leave-events fire for every child crossed + // (the icon, the label, the ✕ button). Without the + // contains() check the highlight flickers. + const next = e.relatedTarget as Node | null; + if (!next || !(e.currentTarget as HTMLElement).contains(next)) { + setHoverDir(null); + } + }, + onDrop: (e: React.DragEvent) => { + e.preventDefault(); + e.stopPropagation(); + setHoverDir(null); + if (e.dataTransfer.items?.length) { + onDropToTarget(node.path, e.dataTransfer.items); + } + }, + } + : {}; if (node.isDir) { return (
onToggleDir(node.path)} onContextMenu={(e) => openContextMenu(e, node)} + {...dragProps} > {isLoading ? "…" : expanded ? "▼" : "▶"} 📁 @@ -159,6 +231,7 @@ function TreeItem({ onDelete={onDelete} onDownload={onDownload} canDelete={canDelete} + onDropToTarget={onDropToTarget} expandedDirs={expandedDirs} onToggleDir={onToggleDir} loadingDir={loadingDir} diff --git a/canvas/src/components/tabs/FilesTab/__tests__/FileTreeContextMenu.test.tsx b/canvas/src/components/tabs/FilesTab/__tests__/FileTreeContextMenu.test.tsx index 73a4c4b1..059c49f7 100644 --- a/canvas/src/components/tabs/FilesTab/__tests__/FileTreeContextMenu.test.tsx +++ b/canvas/src/components/tabs/FilesTab/__tests__/FileTreeContextMenu.test.tsx @@ -28,12 +28,13 @@ import type { TreeNode } from "../tree"; afterEach(cleanup); -const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [] }; +const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [], size: 0 }; const dir: TreeNode = { name: "skills", path: "skills", isDir: true, children: [], + size: 0, }; function renderTree(props: Partial> = {}) { diff --git a/canvas/src/components/tabs/FilesTab/__tests__/dragDropUpload.test.tsx b/canvas/src/components/tabs/FilesTab/__tests__/dragDropUpload.test.tsx new file mode 100644 index 00000000..ee3cbd38 --- /dev/null +++ b/canvas/src/components/tabs/FilesTab/__tests__/dragDropUpload.test.tsx @@ -0,0 +1,212 @@ +// @vitest-environment jsdom +// +// Pins the drag-drop upload added in PR-D of issue #2999. +// Two layers of coverage: +// +// 1. The pure walker (collectFileEntries / walkEntry) — pins the +// recursion shape against silent folder truncation. Browsers +// return up to ~100 entries per readEntries() call; if the loop +// stops early, large folder uploads silently drop files. We +// simulate a multi-batch reader to discriminate. +// +// 2. FileTree directory-row drop handlers — pins that dragover/drop +// events fire onDropToTarget with the directory's path + the +// drop's DataTransferItemList. + +import { describe, it, expect, vi, afterEach } from "vitest"; +import { render, screen, cleanup, fireEvent } from "@testing-library/react"; +import React from "react"; +import { FileTree } from "../FileTree"; +import type { TreeNode } from "../tree"; +import { __testables } from "../useFilesApi"; + +afterEach(cleanup); + +// ---- Walker tests ---- + +/** + * Build a fake FileSystemEntry tree we can hand to walkEntry. The + * shape mimics what webkitGetAsEntry returns from a real OS drag — + * directory entries expose createReader, file entries expose file(). + */ +function fakeFileEntry(name: string, content = "x"): { + isFile: true; + isDirectory: false; + name: string; + fullPath: string; + file: (cb: (f: File) => void) => void; +} { + return { + isFile: true, + isDirectory: false, + name, + fullPath: "/" + name, + file: (cb) => cb(new File([content], name, { type: "text/plain" })), + }; +} + +function fakeDirEntry( + name: string, + childBatches: ReturnType[][], +): { + isFile: false; + isDirectory: true; + name: string; + fullPath: string; + createReader: () => { readEntries: (cb: (entries: unknown[]) => void) => void }; +} { + let i = 0; + return { + isFile: false, + isDirectory: true, + name, + fullPath: "/" + name, + createReader: () => ({ + readEntries: (cb) => { + // Mimic browser semantics: emit one batch per call, then + // an empty array to signal end-of-stream. A walker that + // calls readEntries only once would silently truncate at + // the first batch. + if (i < childBatches.length) { + cb(childBatches[i++]); + } else { + cb([]); + } + }, + }), + }; +} + +describe("walkEntry — folder-recursion drop walker", () => { + it("collects a single dropped file", async () => { + const out: { file: File; relativePath: string }[] = []; + await __testables.walkEntry(fakeFileEntry("README.md") as never, "", out); + expect(out.length).toBe(1); + expect(out[0].relativePath).toBe("README.md"); + expect(out[0].file.name).toBe("README.md"); + }); + + it("walks a folder and preserves the relative path under the folder name", async () => { + const out: { file: File; relativePath: string }[] = []; + const folder = fakeDirEntry("skills", [ + [fakeFileEntry("a.md"), fakeFileEntry("b.md")], + ]); + await __testables.walkEntry(folder as never, "", out); + expect(out.map((e) => e.relativePath).sort()).toEqual([ + "skills/a.md", + "skills/b.md", + ]); + }); + + it("loops readEntries until empty so a multi-batch folder isn't truncated", async () => { + // Browsers limit each readEntries() call to ~100 entries. Our + // walker MUST call it again until an empty batch is returned. + // Fake reader emits two batches of 2 + an implicit empty → 4 + // total. A buggy walker that only takes the first batch would + // see only 2. + const out: { file: File; relativePath: string }[] = []; + const folder = fakeDirEntry("big", [ + [fakeFileEntry("1.txt"), fakeFileEntry("2.txt")], + [fakeFileEntry("3.txt"), fakeFileEntry("4.txt")], + ]); + await __testables.walkEntry(folder as never, "", out); + expect(out.length).toBe(4); + }); + + it("walks nested directories and accumulates the full path", async () => { + const out: { file: File; relativePath: string }[] = []; + const inner = fakeDirEntry("web-search", [[fakeFileEntry("SKILL.md")]]); + // Outer dir whose first batch contains a sub-dir entry. + const outer = { + isFile: false, + isDirectory: true, + name: "skills", + fullPath: "/skills", + createReader: () => { + let i = 0; + return { + readEntries: (cb: (entries: unknown[]) => void) => { + if (i++ === 0) cb([inner]); + else cb([]); + }, + }; + }, + }; + await __testables.walkEntry(outer as never, "", out); + expect(out.length).toBe(1); + expect(out[0].relativePath).toBe("skills/web-search/SKILL.md"); + }); +}); + +// ---- FileTree drag-drop wiring ---- + +const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [], size: 0 }; +const skillsDir: TreeNode = { name: "skills", path: "skills", isDir: true, children: [], size: 0 }; + +function renderTree(props: Partial> = {}) { + // PR-D test defaults must include PR-C's onDownload + canDelete now + // that they're required on the TreeCallbacks shape (the rebase + // surfaced this — the merged tree depends on both feature sets). + const defaults: React.ComponentProps = { + nodes: [file, skillsDir], + selectedPath: null, + onSelect: vi.fn(), + onDelete: vi.fn(), + onDownload: vi.fn(), + canDelete: true, + onDropToTarget: vi.fn(), + expandedDirs: new Set(), + onToggleDir: vi.fn(), + loadingDir: null, + }; + const merged = { ...defaults, ...props }; + return { ...render(), props: merged }; +} + +describe("FileTree directory-row drag-drop", () => { + it("dragover on a directory row preventDefault's so the drop will fire", () => { + renderTree(); + const row = screen.getByText("skills"); + const dragOver = new Event("dragover", { bubbles: true, cancelable: true }); + Object.defineProperty(dragOver, "dataTransfer", { + value: { dropEffect: "" }, + }); + row.parentElement!.dispatchEvent(dragOver); + // preventDefault registers via the React handler — without it + // the drop event would never fire, so this assertion is the + // load-bearing one. + expect(dragOver.defaultPrevented).toBe(true); + }); + + it("drop on a directory row fires onDropToTarget with that path + the items list", () => { + const { props } = renderTree(); + const row = screen.getByText("skills").parentElement!; + const fakeItems = { length: 1, 0: { kind: "file" } } as unknown as DataTransferItemList; + fireEvent.drop(row, { dataTransfer: { items: fakeItems } }); + expect(props.onDropToTarget).toHaveBeenCalledWith("skills", fakeItems); + }); + + it("drop on a FILE row does NOT fire onDropToTarget (only directories are valid targets)", () => { + const { props } = renderTree(); + const fileRow = screen.getByText("config.yaml").parentElement!; + const fakeItems = { length: 1, 0: { kind: "file" } } as unknown as DataTransferItemList; + fireEvent.drop(fileRow, { dataTransfer: { items: fakeItems } }); + expect(props.onDropToTarget).not.toHaveBeenCalled(); + }); + + it("drop with no DataTransferItems does NOT fire onDropToTarget", () => { + const { props } = renderTree(); + const row = screen.getByText("skills").parentElement!; + fireEvent.drop(row, { dataTransfer: { items: { length: 0 } } }); + expect(props.onDropToTarget).not.toHaveBeenCalled(); + }); + + it("dragenter sets the drop-target highlight on the directory row", () => { + renderTree(); + const row = screen.getByText("skills").parentElement!; + fireEvent.dragEnter(row, { dataTransfer: {} }); + // Highlight class is the discriminator — without dragenter + // wiring the row stays in its hover-only style. + expect(row.className).toMatch(/bg-accent|outline-accent/); + }); +}); diff --git a/canvas/src/components/tabs/FilesTab/useFilesApi.ts b/canvas/src/components/tabs/FilesTab/useFilesApi.ts index b1aabbf6..83713540 100644 --- a/canvas/src/components/tabs/FilesTab/useFilesApi.ts +++ b/canvas/src/components/tabs/FilesTab/useFilesApi.ts @@ -151,16 +151,20 @@ export function useFilesApi(workspaceId: string, root: string) { }, [files, workspaceId]); const uploadFiles = useCallback( - async (fileList: FileList) => { + async (fileList: FileList, targetDir = "") => { let uploaded = 0; for (const file of Array.from(fileList)) { const path = file.webkitRelativePath || file.name; const parts = path.split("/"); + // For folder picker: webkitRelativePath is "/a/b.txt" + // — strip the picked-folder prefix so files land flat under the + // workspace's target dir, not under a redundant outer folder. const relPath = parts.length > 1 ? parts.slice(1).join("/") : parts[0]; + const finalPath = targetDir ? `${targetDir}/${relPath}` : relPath; if (file.size > 1_000_000) continue; try { const content = await file.text(); - await api.put(`/workspaces/${workspaceId}/files/${relPath}`, { content }); + await api.put(`/workspaces/${workspaceId}/files/${finalPath}`, { content }); uploaded++; } catch { /* skip binary */ @@ -168,7 +172,7 @@ export function useFilesApi(workspaceId: string, root: string) { } if (uploaded > 0) { useCanvasStore.getState().updateNodeData(workspaceId, { needsRestart: true }); - showToast(`Uploaded ${uploaded} files`, "success"); + showToast(`Uploaded ${uploaded} files${targetDir ? ` to ${targetDir}` : ""}`, "success"); loadFiles(); } return uploaded; @@ -176,6 +180,58 @@ export function useFilesApi(workspaceId: string, root: string) { [workspaceId, loadFiles] ); + /** + * Upload files dragged from the OS via the HTML5 DataTransferItemList + * API. Unlike the folder-picker path (uploadFiles), this preserves + * the dropped folder structure under `targetDir` — drag a "skills/" + * folder onto the /configs/skills row and you get + * /configs/skills/skills/* (the OUTER folder name is preserved + * because the user explicitly chose to drop a NAMED folder, unlike + * the folder-picker which always wraps the picked dir). + * + * Walks FileSystemDirectoryEntry recursively via webkitGetAsEntry. + * VSCode/JupyterLab use the same primitive — there's no other + * portable browser API for "drag a folder from OS". `webkit*` + * naming is a Chromium relic; Firefox + Safari implement the same + * surface. + * + * Returns the number of files uploaded so the caller can show a + * tally / fail toast. + */ + const uploadDataTransferItems = useCallback( + async (items: DataTransferItemList, targetDir = "") => { + const fileEntries = collectFileEntries(items); + let uploaded = 0; + for (const { file, relativePath } of await fileEntries) { + if (file.size > 1_000_000) continue; + const finalPath = targetDir + ? `${targetDir}/${relativePath}` + : relativePath; + try { + const content = await file.text(); + await api.put(`/workspaces/${workspaceId}/files/${finalPath}`, { + content, + }); + uploaded++; + } catch { + /* skip binary */ + } + } + if (uploaded > 0) { + useCanvasStore + .getState() + .updateNodeData(workspaceId, { needsRestart: true }); + showToast( + `Uploaded ${uploaded} file${uploaded === 1 ? "" : "s"}${targetDir ? ` to ${targetDir}` : ""}`, + "success", + ); + loadFiles(); + } + return uploaded; + }, + [workspaceId, loadFiles], + ); + const deleteAllFiles = useCallback(async () => { let deleted = 0; for (const f of files) { @@ -205,6 +261,95 @@ export function useFilesApi(workspaceId: string, root: string) { downloadFileByPath, downloadAllFiles, uploadFiles, + uploadDataTransferItems, deleteAllFiles, }; } + +// ----- DataTransfer entry walker (PR-D) --------------------------------- + +/** + * Minimal subset of the FileSystem Entry API surface we use. The DOM + * lib types this as FileSystemEntry / FileSystemFileEntry / + * FileSystemDirectoryEntry but the relevant methods are callback- + * based. Keep the shape narrow + explicit so the recursion below + * type-checks without pulling in the full DOM lib types. + */ +interface FSEntry { + isFile: boolean; + isDirectory: boolean; + name: string; + fullPath: string; + file?(success: (f: File) => void, fail?: (e: unknown) => void): void; + createReader?(): { readEntries(success: (entries: FSEntry[]) => void): void }; +} + +interface CollectedEntry { + file: File; + /** Path relative to the dropped root (e.g. "skills/web-search/SKILL.md" + * for a dropped "skills/" folder containing web-search/SKILL.md). */ + relativePath: string; +} + +/** + * Walk a DataTransferItemList, returning every file entry as a flat + * array keyed by the path relative to the originally-dropped item. + * Folders dropped from the OS expand recursively; loose files + * passthrough with name as the relative path. + * + * Skips items where webkitGetAsEntry() returns null — that's how + * the browser signals a non-file payload (e.g. a dragged URL or + * text snippet). + */ +async function collectFileEntries( + items: DataTransferItemList, +): Promise { + const out: CollectedEntry[] = []; + for (let i = 0; i < items.length; i++) { + const item = items[i]; + if (item.kind !== "file") continue; + // webkitGetAsEntry is the standardised name; older Firefox used + // getAsEntry. Both Chromium + Firefox + Safari ship the webkit- + // prefixed variant today. There's no non-prefixed alternative. + const entry = (item as DataTransferItem & { + webkitGetAsEntry?: () => FSEntry | null; + }).webkitGetAsEntry?.(); + if (!entry) continue; + await walkEntry(entry, "", out); + } + return out; +} + +async function walkEntry( + entry: FSEntry, + prefix: string, + out: CollectedEntry[], +): Promise { + const name = entry.name; + const relPath = prefix ? `${prefix}/${name}` : name; + if (entry.isFile && entry.file) { + const file = await new Promise((resolve, reject) => { + entry.file!(resolve, reject); + }); + out.push({ file, relativePath: relPath }); + return; + } + if (entry.isDirectory && entry.createReader) { + const reader = entry.createReader(); + // readEntries returns up to ~100 at a time on Chromium; loop + // until empty so large folders aren't truncated. + let batch: FSEntry[] = []; + do { + batch = await new Promise((resolve) => + reader.readEntries(resolve), + ); + for (const child of batch) { + await walkEntry(child, relPath, out); + } + } while (batch.length > 0); + } +} + +// Exported for direct testing — the recursion + readEntries batching +// is the part most likely to silently truncate a real folder upload. +export const __testables = { collectFileEntries, walkEntry };