Merge pull request #1198 from Molecule-AI/staging

staging → main: ConsoleModal + Peers nits (#1197)
This commit is contained in:
Hongming Wang 2026-04-20 18:04:07 -07:00 committed by GitHub
commit f9f037bd09
31 changed files with 395 additions and 103 deletions

View File

@ -45,12 +45,14 @@ export function ConsoleModal({ workspaceId, workspaceName, open, onClose }: Prop
})
.catch((e) => {
if (ignore) return;
// 501 = deployment without a control plane (local docker-compose);
// we render a friendlier message than "501 Not Implemented".
// 501 = deployment without a control plane (local docker-compose).
// 404 = EC2 instance has been terminated. Match with word-boundary
// regex so a status code appearing inside an unrelated number
// ("15012") doesn't false-match.
const msg = e instanceof Error ? e.message : "Failed to load console output";
if (/501/.test(msg)) {
if (/\b501\b/.test(msg)) {
setError("Console output is only available on cloud (SaaS) deployments.");
} else if (/404/.test(msg)) {
} else if (/\b404\b/.test(msg)) {
setError("No EC2 instance found for this workspace — it may have been terminated.");
} else {
setError(msg);

View File

@ -268,6 +268,10 @@ export function DetailsTab({ workspaceId, data }: Props) {
<Section title={`Peers (${peers.length})`}>
{peersError ? (
<p className="text-xs text-red-400">{peersError}</p>
) : peers.length === 0 && data.status !== "online" && data.status !== "degraded" ? (
<p className="text-xs text-zinc-500">
Peers are only discoverable while the workspace is online.
</p>
) : peers.length === 0 ? (
<p className="text-xs text-zinc-500">No reachable peers</p>
) : (

View File

@ -0,0 +1,69 @@
# Phase 30 Launch Plan — Chrome DevTools MCP SEO Campaign
**Owner:** Marketing Lead
**Status:** Draft — CTAs + GA date TBD (blocked on engineering)
**Last updated:** 2026-04-20
---
## Campaign Status
| Deliverable | Owner | Status |
|-------------|-------|--------|
| SEO brief | Marketing Lead | ✅ Complete |
| Blog post | Marketing Lead | ✅ Complete |
| Keywords (P0/P1) | Marketing Lead | ✅ Confirmed |
| Keywords doc | Orchestrator | ✅ Created |
| Social distribution | Social Media Brand / Content Marketer | ⏳ Pending (both busy) |
| CTA links | Engineering | ⏳ TBD |
| GA date | Engineering | ⏳ TBD |
| SEO indexing | SEO Analyst | ⚠️ Unverified |
| Launch announcement | Content Marketer | ⏳ Pending |
---
## Confirmed Content
- **Brief:** `docs/marketing/briefs/2026-04-20-chrome-devtools-mcp-seo-brief.md`
- **Blog post:** `docs/marketing/blog/2026-04-20-how-to-add-browser-automation-to-ai-agents-with-mcp.md`
- **P0 keywords:** "MCP browser automation", "Chrome DevTools MCP"
- **P1 keywords:** "AI agent browser control", "MCP protocol tutorial"
---
## Pending Actions
### CTA Links + GA Date
**Blocked on:** Engineering
**Action required:** Engineering to provide:
1. Final CTA URL for the blog post (e.g. demo, signup, docs link)
2. GA date for the Chrome DevTools MCP feature
**If blocked:** Marketing Lead to escalate to PM for GA timeline.
### SEO Indexing
**Owner:** SEO Analyst
**Status:** Unverified — SEO Analyst reported completion but files not confirmed real.
**Action required:** Once SEO Analyst confirms files, verify in Google Search Console that P0 keywords are indexed. Do not mark indexing complete until confirmed.
### Social Distribution
**Owner:** Social Media Brand (interim) / Content Marketer (primary)
**Action required:** Draft social posts using P0 keywords. Route to blog post CTA once engineering provides link.
### Launch Announcement
**Owner:** Content Marketer
**Action required:** Write and schedule announcement for launch day. Use confirmed keywords and blog post as source.
---
## Open Questions
1. **GA date:** Is there a confirmed ship date for Chrome DevTools MCP?
2. **CTA link:** What is the primary conversion target for the blog post?
3. **SEO Analyst output:** Where did their deliverables actually land?
---
## Next Checkpoint
Review pending items in next marketing lead sync. Escalate blockers to PM if engineering CTAs + GA date are not provided within 24 hours.

View File

@ -0,0 +1,35 @@
# Chrome DevTools MCP — SEO Keyword Brief
**Campaign:** Phase 30 Chrome DevTools MCP SEO launch
**Date:** 2026-04-20
**Owner:** Marketing Lead + SEO Analyst
**Status:** Keywords confirmed — content live
## Primary Keywords (P0)
| Keyword | Intent | Target |
|---------|--------|--------|
| `MCP browser automation` | Informational / Tutorial | Blog post H1 + first 100 words |
| `Chrome DevTools MCP` | Informational / Product | Blog post H2 + meta description |
## Secondary Keywords (P1)
| Keyword | Intent | Target |
|---------|--------|--------|
| `AI agent browser control` | Informational | Blog body sections |
| `MCP protocol tutorial` | Tutorial / How-to | Blog post anchor sections |
## Keyword Strategy
- **P0 keywords** are locked. Both must appear in the blog post title, H1, and first 100 words.
- **P1 keywords** should appear naturally in body content and subheadings.
- Avoid generic marketing language in headings — this is a developer audience.
## Confirmed Deliverables
- **Brief:** `docs/marketing/briefs/2026-04-20-chrome-devtools-mcp-seo-brief.md`
- **Blog post:** `docs/marketing/blog/2026-04-20-how-to-add-browser-automation-to-ai-agents-with-mcp.md`
## SEO Analyst Note
SEO Analyst reported 6 campaign actions complete. File paths `docs/blog/...` and `docs/marketing/seo/keywords.md` — the latter is now confirmed real (this file). The `docs/blog/...` path has been superseded by the confirmed `docs/marketing/blog/...` location. All other SEO Analyst deliverables should be verified before treating as complete.

View File

@ -0,0 +1,65 @@
# Cognee Architecture Deep-Dive — Workspace Isolation
**Date:** 2026-04-20
**Issue:** Molecule-AI/molecule-core#1146
**Research by:** Research Lead
**Status:** Complete
---
## Executive Summary
Cognee has **dataset-level isolation primitives** but **no storage-layer enforcement** and **no native `workspace_id` support** in its MCP tool interface. Cross-workspace isolation is caller-controlled, not enforced by the storage layer.
---
## Isolation Layer Analysis
| Layer | Mechanism | Enforced? | Risk |
|-------|-----------|-----------|------|
| Storage (Postgres) | No RLS, no schema namespacing | ❌ None | High |
| App — dataset | `dataset_name` passed per tool call | ⚠️ Caller-controlled | Medium |
| App — user | `get_default_user()` internal resolver only | ⚠️ Soft | Medium |
| MCP `workspace_id` param | Not present in cognee-mcp interface | ❌ N/A | High |
---
## Key Findings
1. **Storage layer:** No Postgres row-level security (RLS), no schema-level tenant separation. Any admin with DB access can read any tenant's data.
2. **Dataset isolation:** Cognee uses `dataset_name` as a logical namespace, but it's passed by the caller per tool call — not enforced server-side. A misconfigured or malicious caller could read/write across datasets.
3. **MCP interface:** `cognee-mcp` does not expose `workspace_id` as a first-class parameter. Workspaces would need to be mapped to dataset names externally.
4. **User isolation:** `get_default_user()` resolves users internally without verifiable enforcement at the data layer.
---
## Migration Implications
Adopting Cognee as the memory substrate requires an **auth bridge**:
- The bridge wraps cognee-mcp and injects `workspace_id``dataset_name` mapping
- All tool calls are routed through the bridge, which enforces tenant context
- Estimated effort: **~100200 LOC** for the MCP proxy wrapper
- This is a pragmatic path — the bridge provides the isolation Cognee's storage layer lacks
---
## Recommendation
**Attempt the auth bridge prototype first (12 days of engineering):**
1. Build MCP proxy that maps workspace_id to dataset_name on each call
2. Validate that cross-workspace calls are correctly rejected
3. If clean → adopt Cognee for Phase 9
4. If complex → build native with storage-layer enforcement
**Do not proceed with Phase 9 proprietary memory investment until bridge prototype is evaluated.**
---
## Sources
- Cognee GitHub: https://github.com/topoteretes/cognee
- Preliminary eval: /workspace/repo/docs/research/cognee-isolation-eval.md

View File

@ -0,0 +1,37 @@
# Cognee Workspace Isolation Evaluation
**Date:** 2026-04-20
**Issue:** Molecule-AI/molecule-core#1146
**Status:** Preliminary — needs deeper architecture review
## Summary
Cognee (Apache-2.0, by Topoteretes UG) is an open-source AI memory engine with a shipped MCP component. It has direct overlap with Molecule AI's Phase 9 hierarchical memory architecture.
## Workspace Isolation Assessment
**Signal: Partial/Positive**
Cognee's GitHub README explicitly lists "agentic user/tenant isolation, traceability, OTEL collector, audit traits" as a core architectural feature.
This is a positive signal. However:
- The README mention does not specify the technical mechanism (namespace-level separation? separate vector DB instances per tenant? row-level security in a shared DB?)
- The cognee-mcp MCP component's handling of multi-workspace contexts is not documented in the surface-level readme
**Verdict:** Cognee claims tenant isolation. Further due diligence required before treating this as confirmed.
## Next Steps
1. **Deep-dive into cognee architecture docs** — check if isolation is enforced at the storage layer (separate DB/collection per workspace), application layer (row-level), or both
2. **Test cognee-mcp with a multi-workspace scenario** — the MCP tool interface should reveal whether workspace_id is a first-class parameter
3. **Check cognee's GitHub issues/discussions** — any community reports of cross-tenant data leakage?
4. **Evaluate migration path** — if Cognee is adopted, what's involved in migrating existing Phase 9 work?
## Recommendation
Proceed with Phase 9 build-vs-buy review. Cognee is a credible candidate — isolation is claimed but mechanism needs verification. The Phase 9 halt stands until this is resolved.
## Sources
- https://github.com/topoteretes/cognee (README, 2026-04-20)
- /workspace/repo/research/cognee-memo.md

View File

@ -303,7 +303,7 @@ func (h *ActivityHandler) Report(c *gin.Context) {
Metadata interface{} `json:"metadata"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -27,7 +27,7 @@ func (h *AgentHandler) Assign(c *gin.Context) {
Model string `json:"model" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -86,7 +86,7 @@ func (h *AgentHandler) Replace(c *gin.Context) {
Model string `json:"model" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -165,7 +165,7 @@ func (h *AgentHandler) Move(c *gin.Context) {
TargetWorkspaceID string `json:"target_workspace_id" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -30,7 +30,7 @@ func (h *ApprovalsHandler) Create(c *gin.Context) {
Context map[string]interface{} `json:"context"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -170,7 +170,7 @@ func (h *ApprovalsHandler) Decide(c *gin.Context) {
DecidedBy string `json:"decided_by"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -141,7 +141,7 @@ func (h *ArtifactsHandler) Create(c *gin.Context) {
var req createArtifactsRepoRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -302,7 +302,7 @@ func (h *ArtifactsHandler) Fork(c *gin.Context) {
var req forkArtifactsRepoRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -367,7 +367,7 @@ func (h *ArtifactsHandler) Token(c *gin.Context) {
var req artifactsTokenRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -90,7 +90,7 @@ func (h *BudgetHandler) PatchBudget(c *gin.Context) {
// so we unmarshal into a raw map first.
var raw map[string]interface{}
if err := c.ShouldBindJSON(&raw); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -35,7 +35,7 @@ func (h *BundleHandler) Export(c *gin.Context) {
b, err := bundle.Export(ctx, workspaceID, h.configsDir, h.docker)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
c.JSON(http.StatusNotFound, gin.H{"error": "bundle not found"})
return
}
@ -46,7 +46,7 @@ func (h *BundleHandler) Export(c *gin.Context) {
func (h *BundleHandler) Import(c *gin.Context) {
var b bundle.Bundle
if err := c.ShouldBindJSON(&b); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid bundle"})
return
}

View File

@ -136,7 +136,7 @@ func (h *ChannelHandler) Create(c *gin.Context) {
}
if err := adapter.ValidateConfig(body.Config); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid config: " + err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid channel config"})
return
}
@ -294,7 +294,8 @@ func (h *ChannelHandler) Send(c *gin.Context) {
}
if err := h.manager.SendOutbound(ctx, channelID, body.Text); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
log.Printf("Channels: send outbound failed for channel %s: %v", channelID, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "send failed"})
return
}
@ -307,7 +308,8 @@ func (h *ChannelHandler) Test(c *gin.Context) {
ctx := c.Request.Context()
if err := h.manager.SendOutbound(ctx, channelID, "🔔 Molecule AI channel test — connection successful!"); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
log.Printf("Channels: test message failed for channel %s: %v", channelID, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "test message failed"})
return
}
@ -436,7 +438,7 @@ func (h *ChannelHandler) Webhook(c *gin.Context) {
// Parse the webhook first to get the chat_id
msg, err := adapter.ParseWebhook(c, nil)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": "parse error: " + err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "webhook parse failed"})
return
}
if msg == nil {

View File

@ -71,7 +71,7 @@ func (h *CheckpointsHandler) Upsert(c *gin.Context) {
Payload json.RawMessage `json:"payload"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -114,7 +114,7 @@ func (h *DelegationHandler) Delegate(c *gin.Context) {
// the 400 response and returns the error so the caller can return.
func bindDelegateRequest(c *gin.Context, body *delegateRequest) error {
if err := c.ShouldBindJSON(body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid delegation request"})
return err
}
if _, err := uuid.Parse(body.TargetID); err != nil {
@ -344,7 +344,7 @@ func (h *DelegationHandler) Record(c *gin.Context) {
DelegationID string `json:"delegation_id" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
if _, err := uuid.Parse(body.TargetID); err != nil {
@ -392,7 +392,7 @@ func (h *DelegationHandler) UpdateStatus(c *gin.Context) {
ResponsePreview string `json:"response_preview,omitempty"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
if body.Status != "completed" && body.Status != "failed" {

View File

@ -302,7 +302,7 @@ func (h *DiscoveryHandler) CheckAccess(c *gin.Context) {
TargetID string `json:"target_id" binding:"required"`
}
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -145,7 +145,7 @@ func (h *MemoriesHandler) Commit(c *gin.Context) {
Namespace string `json:"namespace,omitempty"` // optional; defaults to "general"
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -116,7 +116,7 @@ func (h *MemoryHandler) Set(c *gin.Context) {
IfMatchVersion *int64 `json:"if_match_version"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -250,7 +250,7 @@ func (h *OrgHandler) Import(c *gin.Context) {
Template OrgTemplate `json:"template"` // or inline template
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -264,7 +264,7 @@ func (h *OrgHandler) Import(c *gin.Context) {
// letting an unauthenticated caller probe arbitrary filesystem paths.
resolved, err := resolveInsideRoot(h.orgDir, body.Dir)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid dir: %v", err)})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid org directory"})
return
}
orgBaseDir = resolved
@ -279,11 +279,11 @@ func (h *OrgHandler) Import(c *gin.Context) {
// refactor. Fails loudly on missing / cyclic / escaping includes.
expanded, err := resolveYAMLIncludes(data, orgBaseDir)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("!include expansion failed: %v", err)})
c.JSON(http.StatusBadRequest, gin.H{"error": "org template expansion failed"})
return
}
if err := yaml.Unmarshal(expanded, &tmpl); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid YAML: %v", err)})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid org template"})
return
}
} else if body.Template.Name != "" {

View File

@ -237,7 +237,7 @@ func (h *OrgPluginAllowlistHandler) PutAllowlist(c *gin.Context) {
var req putAllowlistRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
if req.EnabledBy == "" {

View File

@ -45,7 +45,7 @@ func (h *PluginsHandler) Install(c *gin.Context) {
var req installRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -56,11 +56,9 @@ func (h *PluginsHandler) Install(c *gin.Context) {
c.JSON(he.Status, he.Body)
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": "plugin install failed"})
return
}
// On success, we own stagedDir cleanup. On error, resolveAndStage
// has already cleaned it up (and its returned result is nil).
defer os.RemoveAll(result.StagedDir)
// Org plugin allowlist gate (#591).
@ -77,7 +75,7 @@ func (h *PluginsHandler) Install(c *gin.Context) {
c.JSON(he.Status, he.Body)
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": "plugin deliver failed"})
return
}
@ -96,7 +94,7 @@ func (h *PluginsHandler) Uninstall(c *gin.Context) {
ctx := c.Request.Context()
if err := validatePluginName(pluginName); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid plugin name"})
return
}
@ -179,7 +177,7 @@ func (h *PluginsHandler) Download(c *gin.Context) {
ctx := c.Request.Context()
if err := validatePluginName(pluginName); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid plugin name"})
return
}
@ -223,7 +221,7 @@ func (h *PluginsHandler) Download(c *gin.Context) {
c.JSON(he.Status, he.Body)
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": "plugin download failed"})
return
}
defer os.RemoveAll(result.StagedDir)

View File

@ -126,13 +126,13 @@ func validateAgentURL(rawURL string) error {
func (h *RegistryHandler) Register(c *gin.Context) {
var payload models.RegisterPayload
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
// C6: reject SSRF-capable URLs before persisting or caching them.
if err := validateAgentURL(payload.URL); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -251,7 +251,7 @@ func (h *RegistryHandler) Register(c *gin.Context) {
func (h *RegistryHandler) Heartbeat(c *gin.Context) {
var payload models.HeartbeatPayload
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -390,7 +390,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
func (h *RegistryHandler) UpdateCard(c *gin.Context) {
var payload models.UpdateCardPayload
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -114,7 +114,7 @@ func (h *ScheduleHandler) Create(c *gin.Context) {
// Validate and compute next run
nextRun, err := scheduler.ComputeNextRun(body.CronExpr, body.Timezone, time.Now())
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -198,7 +198,7 @@ func (h *ScheduleHandler) Update(c *gin.Context) {
}
nextRun, err := scheduler.ComputeNextRun(cronExpr, tz, time.Now())
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
nextRunAt = &nextRun

View File

@ -222,7 +222,7 @@ func (h *SecretsHandler) Set(c *gin.Context) {
Value string `json:"value" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -335,7 +335,7 @@ func (h *SecretsHandler) SetGlobal(c *gin.Context) {
Value string `json:"value" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}

View File

@ -122,7 +122,7 @@ func (h *TemplatesHandler) Import(c *gin.Context) {
Files map[string]string `json:"files" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -140,7 +140,7 @@ func (h *TemplatesHandler) Import(c *gin.Context) {
}
if err := writeFiles(destDir, body.Files); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -164,7 +164,7 @@ func (h *TemplatesHandler) ReplaceFiles(c *gin.Context) {
Files map[string]string `json:"files" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -183,7 +183,7 @@ func (h *TemplatesHandler) ReplaceFiles(c *gin.Context) {
// Validate all paths first
for relPath := range body.Files {
if err := validateRelPath(relPath); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
}
@ -227,7 +227,7 @@ func (h *TemplatesHandler) ReplaceFiles(c *gin.Context) {
}
os.MkdirAll(destDir, 0o755)
if err := writeFiles(destDir, body.Files); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
c.JSON(http.StatusOK, gin.H{"status": "replaced", "workspace": workspaceID, "files": len(body.Files), "source": "template"})

View File

@ -134,7 +134,7 @@ func (h *TemplatesHandler) ListFiles(c *gin.Context) {
subPath := c.DefaultQuery("path", "")
if subPath != "" {
if err := validateRelPath(subPath); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid path"})
return
}
}
@ -258,7 +258,7 @@ func (h *TemplatesHandler) ReadFile(c *gin.Context) {
}
if err := validateRelPath(filePath); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid path"})
return
}
@ -318,7 +318,7 @@ func (h *TemplatesHandler) WriteFile(c *gin.Context) {
}
if err := validateRelPath(filePath); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid path"})
return
}
@ -326,7 +326,7 @@ func (h *TemplatesHandler) WriteFile(c *gin.Context) {
Content string `json:"content"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -367,7 +367,7 @@ func (h *TemplatesHandler) DeleteFile(c *gin.Context) {
}
if err := validateRelPath(filePath); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid path"})
return
}

View File

@ -39,7 +39,7 @@ func (h *ViewportHandler) Save(c *gin.Context) {
Zoom float64 `json:"zoom"`
}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid viewport data"})
return
}

View File

@ -76,14 +76,14 @@ func (h *WorkspaceHandler) TokenRegistry() *provisionhook.Registry {
func (h *WorkspaceHandler) Create(c *gin.Context) {
var payload models.CreateWorkspacePayload
if err := c.ShouldBindJSON(&payload); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace payload"})
return
}
// #685/#688: validate field lengths and reject injection characters before
// any DB or provisioner interaction.
if err := validateWorkspaceFields(payload.Name, payload.Role, payload.Model, payload.Runtime); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace fields"})
return
}
@ -133,7 +133,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
var workspaceDir interface{}
if payload.WorkspaceDir != "" {
if err := validateWorkspaceDir(payload.WorkspaceDir); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace directory"})
return
}
workspaceDir = payload.WorkspaceDir
@ -145,7 +145,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
workspaceAccess = provisioner.WorkspaceAccessNone
}
if err := provisioner.ValidateWorkspaceAccess(workspaceAccess, payload.WorkspaceDir); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace access"})
return
}
@ -411,7 +411,7 @@ func (h *WorkspaceHandler) Get(c *gin.Context) {
// #687: reject non-UUID IDs before hitting the DB.
if err := validateWorkspaceID(id); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace ID"})
return
}
@ -569,13 +569,13 @@ func (h *WorkspaceHandler) Update(c *gin.Context) {
// #687: reject non-UUID IDs before hitting the DB.
if err := validateWorkspaceID(id); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace ID"})
return
}
var body map[string]interface{}
if err := c.ShouldBindJSON(&body); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
return
}
@ -591,7 +591,7 @@ func (h *WorkspaceHandler) Update(c *gin.Context) {
if err := validateWorkspaceFields(
strField("name"), strField("role"), "" /*model not patchable*/, strField("runtime"),
); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace fields"})
return
}
@ -644,7 +644,7 @@ func (h *WorkspaceHandler) Update(c *gin.Context) {
if wsDir != nil {
if dirStr, isStr := wsDir.(string); isStr && dirStr != "" {
if err := validateWorkspaceDir(dirStr); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace directory"})
return
}
}
@ -707,7 +707,7 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
// #687: reject non-UUID IDs before hitting the DB.
if err := validateWorkspaceID(id); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid workspace ID"})
return
}
@ -807,9 +807,11 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
pq.Array(allIDs)); err != nil {
log.Printf("Delete token revocation error for %s: %v", id, err)
}
// Disable schedules for removed workspaces (#1027)
// #1027: cascade-disable all schedules for the deleted workspaces so
// the scheduler never fires a cron into a removed container.
if _, err := db.DB.ExecContext(ctx,
`UPDATE workspace_schedules SET enabled = false WHERE workspace_id = ANY($1::uuid[])`,
`UPDATE workspace_schedules SET enabled = false, updated_at = now()
WHERE workspace_id = ANY($1::uuid[]) AND enabled = true`,
pq.Array(allIDs)); err != nil {
log.Printf("Delete schedule disable error for %s: %v", id, err)
}
@ -864,7 +866,7 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
// Hard delete the workspace row
if _, err := db.DB.ExecContext(ctx, "DELETE FROM workspaces WHERE id = ANY($1::uuid[])", purgeIDs); err != nil {
log.Printf("Purge workspace row error for %v: %v", allIDs, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "purge failed: " + err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": "purge failed"})
return
}
c.JSON(http.StatusOK, gin.H{"status": "purged", "cascade_deleted": len(descendantIDs)})

View File

@ -768,3 +768,51 @@ func containsStr(s, substr string) bool {
}
return false
}
// ── seedInitialMemories content length limit (#1066, CWE-400) ─────────────────
// TestSeedInitialMemories_ContentTruncated verifies that memory content exceeding
// maxMemoryContentLength is truncated before INSERT rather than stored in full.
// This prevents storage exhaustion from crafted memory payloads in org templates.
func TestSeedInitialMemories_ContentTruncated(t *testing.T) {
mock := setupTestDB(t)
// Create content just over the limit (100_001 bytes).
largeContent := string(make([]byte, 100_001))
copy([]byte(largeContent), "X") // fill with "X" so test is deterministic
memories := []models.MemorySeed{
{Content: largeContent, Scope: "LOCAL"},
}
mock.ExpectExec(`INSERT INTO agent_memories`).
// content arg is $2; it must be exactly 100_000 bytes.
WithArgs(sqlmock.AnyArg(), strings.Repeat("X", 100_000), "LOCAL", sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(0, 1))
seedInitialMemories(context.Background(), "ws-1066-test", memories, "test-ns")
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("DB expectations not met: %v\n"+
"INSERT should fire with truncated 100_000-byte content, not 100_001-byte", err)
}
}
// TestSeedInitialMemories_ContentUnderLimit passes through unchanged.
func TestSeedInitialMemories_ContentUnderLimit(t *testing.T) {
mock := setupTestDB(t)
memories := []models.MemorySeed{
{Content: "short content", Scope: "TEAM"},
}
mock.ExpectExec(`INSERT INTO agent_memories`).
WithArgs(sqlmock.AnyArg(), "short content", "TEAM", sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(0, 1))
seedInitialMemories(context.Background(), "ws-1066-under", memories, "test-ns")
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("DB expectations not met: %v", err)
}
}

View File

@ -242,7 +242,7 @@ func (s *Scheduler) fireSchedule(ctx context.Context, sched scheduleRow) {
defer func() {
if r := recover(); r != nil {
log.Printf("Scheduler: panic recovered in fireSchedule for '%s' (%s): %v",
sched.Name, sched.ID, r)
sched.Name, sched.ID, r)
// Always advance next_run_at even on panic so the schedule doesn't get
// stuck re-firing the same panicking schedule indefinitely (#1029).
if nextTime, err := ComputeNextRun(sched.CronExpr, sched.Timezone, time.Now()); err == nil {

View File

@ -276,6 +276,12 @@ func TestFireSchedule_ComputeNextRunError(t *testing.T) {
mock.ExpectQuery(`SELECT COALESCE`).
WillReturnRows(sqlmock.NewRows([]string{"coalesce"}).AddRow(0))
// #795 consecutive_empty_runs reset — successProxy returns {"ok":true}
// which is non-empty, so the counter is reset to 0.
mock.ExpectExec(`UPDATE workspace_schedules`).
WithArgs(sched.ID).
WillReturnResult(sqlmock.NewResult(0, 1))
// UPDATE must fire — COALESCE($2, next_run_at) keeps existing value when $2 is nil.
// AnyArg for $2 because it will be nil (ComputeNextRun failed).
mock.ExpectExec(`UPDATE workspace_schedules`).
@ -453,14 +459,19 @@ func TestRecordSkipped_shortWorkspaceIDNoPanic(t *testing.T) {
// ── Panic-recovery next_run_at advancement (#1029) ──────────────────────────
//
// Issue #1029: when fireSchedule panics, the deferred recover must advance
// next_run_at to the next cron window. Without this fix the schedule's
// next_run_at stays in the past and fires on every 30-second tick.
// Issue #1029: when fireSchedule panics (e.g. a nil-pointer in the A2A proxy
// or a bad JSON marshal), the deferred recover must advance next_run_at to the
// next cron window. Without this fix the schedule's next_run_at stays in the
// past and fires on every 30-second tick — a tight retry loop that amplifies
// the original failure.
const testCronPanic = "0 * * * *"
// TestPanicRecovery_AdvancesNextRunAt verifies that recover issues an UPDATE
// to advance next_run_at when the proxy panics. panic -> recover -> advance.
// TestPanicRecovery_AdvancesNextRunAt verifies that the recover block in
// fireSchedule issues an UPDATE to advance next_run_at when the proxy panics.
//
// This is the core invariant of the #1029 fix: panic → recover → advance.
// The test calls fireSchedule directly (not via tick) so the sqlmock
// expectations are precise — we know exactly which queries fire and in what
// order.
func TestPanicRecovery_AdvancesNextRunAt(t *testing.T) {
mock := setupTestDB(t)
@ -468,33 +479,42 @@ func TestPanicRecovery_AdvancesNextRunAt(t *testing.T) {
ID: "aaa11111-1111-1111-1111-111111111111",
WorkspaceID: "bbb22222-2222-2222-2222-222222222222",
Name: "panic-advance-test",
CronExpr: testCronPanic,
CronExpr: "0 * * * *", // every hour — valid expr so ComputeNextRun succeeds
Timezone: "UTC",
Prompt: "trigger panic",
}
// fireSchedule checks active_tasks first.
// 1. fireSchedule first checks active_tasks on the workspace.
// Return 0 so the fire proceeds (not skipped).
mock.ExpectQuery(`SELECT COALESCE`).
WillReturnRows(sqlmock.NewRows([]string{"coalesce"}).AddRow(0))
// panicProxy causes ProxyA2ARequest to panic.
// Deferred recover calls ComputeNextRun, then ExecContext for next_run_at.
// 2. ProxyA2ARequest panics (panicProxy).
// The deferred recover catches it and calls:
// ComputeNextRun(cronExpr, tz, time.Now())
// db.DB.ExecContext(ctx, `UPDATE workspace_schedules SET next_run_at = $1 ... WHERE id = $2`, nextTime, sched.ID)
//
// We expect this UPDATE with the schedule ID as arg 2.
mock.ExpectExec(`UPDATE workspace_schedules SET next_run_at`).
WithArgs(sqlmock.AnyArg(), sched.ID).
WillReturnResult(sqlmock.NewResult(0, 1))
s := New(&panicProxy{}, nil)
// fireSchedule must not propagate the panic — the recover catches it.
s.fireSchedule(context.Background(), sched)
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("unmet DB expectations: %v
"+
"Panic-recovery defer must advance next_run_at via UPDATE (#1029)", err)
t.Errorf("unmet DB expectations: %v\n"+
"The panic-recovery defer must advance next_run_at via UPDATE (#1029)", err)
}
}
// TestFireSchedule_NormalSuccess_AdvancesNextRunAt regression guard: normal
// fireSchedule completion must still advance next_run_at via the post-fire UPDATE.
// TestFireSchedule_NormalSuccess_AdvancesNextRunAt is a regression guard for
// the happy path: when fireSchedule completes without error, next_run_at must
// be advanced as part of the normal UPDATE (not via the panic path).
//
// This ensures the #1029 panic-recovery change didn't accidentally break the
// normal flow where both the proxy call and the post-fire UPDATE succeed.
func TestFireSchedule_NormalSuccess_AdvancesNextRunAt(t *testing.T) {
mock := setupTestDB(t)
@ -502,26 +522,28 @@ func TestFireSchedule_NormalSuccess_AdvancesNextRunAt(t *testing.T) {
ID: "ccc33333-3333-3333-3333-333333333333",
WorkspaceID: "ddd44444-4444-4444-4444-444444444444",
Name: "normal-advance-test",
CronExpr: "30 * * * *",
CronExpr: "30 * * * *", // every hour at :30
Timezone: "UTC",
Prompt: "do work",
}
// active_tasks check -> workspace idle
// 1. active_tasks check → workspace idle
mock.ExpectQuery(`SELECT COALESCE`).
WillReturnRows(sqlmock.NewRows([]string{"coalesce"}).AddRow(0))
// #795 consecutive_empty_runs reset
// 2. #795 consecutive_empty_runs reset — successProxy returns {"ok":true}
// which is non-empty, so the counter is reset to 0.
mock.ExpectExec(`UPDATE workspace_schedules`).
WithArgs(sched.ID).
WillReturnResult(sqlmock.NewResult(0, 1))
// Normal post-fire UPDATE
// 3. Normal UPDATE after successful proxy call.
// Args: $1=sched.ID, $2=nextRunPtr (computed time), $3=lastStatus, $4=lastError
mock.ExpectExec(`UPDATE workspace_schedules`).
WithArgs(sched.ID, sqlmock.AnyArg(), "ok", "").
WillReturnResult(sqlmock.NewResult(0, 1))
// activity_logs INSERT
// 4. activity_logs INSERT
mock.ExpectExec(`INSERT INTO activity_logs`).
WithArgs(sched.WorkspaceID, sqlmock.AnyArg(), sqlmock.AnyArg(), "ok", "").
WillReturnResult(sqlmock.NewResult(0, 1))
@ -530,14 +552,21 @@ func TestFireSchedule_NormalSuccess_AdvancesNextRunAt(t *testing.T) {
s.fireSchedule(context.Background(), sched)
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("unmet DB expectations: %v
"+
t.Errorf("unmet DB expectations: %v\n"+
"Normal fire must still advance next_run_at via the post-fire UPDATE", err)
}
}
// TestRecordSkipped_AdvancesNextRunAt verifies recordSkipped advances
// next_run_at so the schedule doesn't re-fire on the very next tick.
// TestRecordSkipped_AdvancesNextRunAt verifies that when a workspace is busy
// and the cron fire is skipped, recordSkipped advances next_run_at so the
// schedule doesn't re-fire on the very next tick.
//
// This is the third leg of the #1029 invariant: fire, panic, AND skip must
// all advance next_run_at.
//
// We call recordSkipped directly rather than going through fireSchedule
// because #969 added a deferral loop (poll every 10s for up to 2 min) that
// makes end-to-end testing via fireSchedule impractical with sqlmock.
func TestRecordSkipped_AdvancesNextRunAt(t *testing.T) {
mock := setupTestDB(t)
@ -545,27 +574,28 @@ func TestRecordSkipped_AdvancesNextRunAt(t *testing.T) {
ID: "eee55555-5555-5555-5555-555555555555",
WorkspaceID: "fff66666-6666-6666-6666-666666666666",
Name: "skipped-advance-test",
CronExpr: "15 * * * *",
CronExpr: "15 * * * *", // every hour at :15
Timezone: "UTC",
Prompt: "skipped work",
}
// recordSkipped UPDATE
// 1. recordSkipped UPDATE — must include next_run_at ($2) and reason ($3).
mock.ExpectExec(`UPDATE workspace_schedules`).
WithArgs(sched.ID, sqlmock.AnyArg(), sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(0, 1))
// activity_logs INSERT
// 2. activity_logs INSERT for the skip event
mock.ExpectExec(`INSERT INTO activity_logs`).
WithArgs(sched.WorkspaceID, sqlmock.AnyArg(), sqlmock.AnyArg(), sqlmock.AnyArg()).
WillReturnResult(sqlmock.NewResult(0, 1))
s := New(&successProxy{}, nil)
// Call recordSkipped directly — simulates the skip path when workspace is busy.
s.recordSkipped(context.Background(), sched, 2)
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("unmet DB expectations: %v
"+
t.Errorf("unmet DB expectations: %v\n"+
"recordSkipped must advance next_run_at when workspace is busy (#1029)", err)
}
}
// trigger CI