Merge origin/main into staging: resolve conflicts with main's test + security fixes

Conflicts resolved (took main's versions):
- canvas/src/app/__tests__/orgs-page.test.tsx (act() wrappers, PR #1350)
- canvas/src/components/Canvas.tsx (100px proximity threshold, PR #1357)
- canvas/src/components/__tests__/ContextMenu.keyboard.test.tsx (hasChildren fix)
- workspace-server/internal/handlers/container_files.go (CWE-22/CWE-78 fixes, PRs #1281/#1310)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Molecule AI · sdk-lead 2026-04-21 12:25:42 +00:00
commit e9615af169
18 changed files with 814 additions and 213 deletions

View File

@ -15,6 +15,7 @@
* - Polling: provisioning orgs schedule a 5s refresh (fake timers)
*/
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { act } from "react";
import { render, screen, cleanup } from "@testing-library/react";
// ── Hoisted mocks ────────────────────────────────────────────────────────────
@ -127,12 +128,9 @@ describe("/orgs — auth guard", () => {
describe("/orgs — error state", () => {
it("shows error + Retry button when /cp/orgs fails", async () => {
mockFetchSession.mockResolvedValue({ userId: "u-1" });
mockFetch.mockImplementationOnce(() =>
Promise.reject(new Error("GET /cp/orgs: 500"))
);
mockFetch.mockResolvedValueOnce(notOk(500, "db down"));
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await vi.runAllTimersAsync();
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
expect(screen.getByText(/Error:/)).toBeTruthy();
expect(screen.getByRole("button", { name: /retry/i })).toBeTruthy();
});
@ -143,7 +141,7 @@ describe("/orgs — empty list", () => {
mockFetchSession.mockResolvedValue({ userId: "u-1" });
mockFetch.mockResolvedValueOnce(okJson({ orgs: [] }));
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
expect(screen.getByText(/don't have any organizations/i)).toBeTruthy();
expect(screen.getByRole("button", { name: /create organization/i })).toBeTruthy();
});
@ -170,7 +168,7 @@ describe("/orgs — CTAs by status", () => {
})
);
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
const link = screen.getByRole("link", { name: /open/i }) as HTMLAnchorElement;
expect(link.href).toBe("https://acme.moleculesai.app/");
});
@ -193,7 +191,7 @@ describe("/orgs — CTAs by status", () => {
})
);
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
const link = screen.getByRole("link", {
name: /complete payment/i,
}) as HTMLAnchorElement;
@ -218,7 +216,7 @@ describe("/orgs — CTAs by status", () => {
})
);
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
const link = screen.getByRole("link", {
name: /contact support/i,
}) as HTMLAnchorElement;
@ -247,7 +245,7 @@ describe("/orgs — post-checkout banner", () => {
})
);
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
expect(screen.getByText(/Payment confirmed/i)).toBeTruthy();
// URL must be rewritten to drop the ?checkout flag so reload doesn't re-show the banner
expect(replaceState).toHaveBeenCalled();
@ -259,7 +257,7 @@ describe("/orgs — post-checkout banner", () => {
mockFetchSession.mockResolvedValue({ userId: "u-1" });
mockFetch.mockResolvedValueOnce(okJson({ orgs: [] }));
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
expect(screen.getByText(/don't have any organizations/i)).toBeTruthy();
expect(screen.queryByText(/Payment confirmed/i)).toBeNull();
});
@ -270,7 +268,7 @@ describe("/orgs — fetch includes credentials + timeout signal", () => {
mockFetchSession.mockResolvedValue({ userId: "u-1" });
mockFetch.mockResolvedValueOnce(okJson({ orgs: [] }));
render(<OrgsPage />);
await vi.advanceTimersByTimeAsync(50);
await act(async () => { await vi.advanceTimersByTimeAsync(50); });
const callArgs = mockFetch.mock.calls.find((c) =>
String(c[0]).includes("/cp/orgs")
);

View File

@ -30,7 +30,6 @@ import { SearchDialog } from "./SearchDialog";
import { Toaster } from "./Toaster";
import { Toolbar } from "./Toolbar";
import { ConfirmDialog } from "./ConfirmDialog";
import { DeleteCascadeConfirmDialog } from "./DeleteCascadeConfirmDialog";
import { api } from "@/lib/api";
import { showToast } from "./Toaster";
// Phase 20 components
@ -39,14 +38,6 @@ import { SettingsPanel, DeleteConfirmDialog } from "./settings";
import { BatchActionBar } from "./BatchActionBar";
import { ProvisioningTimeout } from "./ProvisioningTimeout";
// Drag-to-nest proximity: nodes must be within this many pixels (center-to-center)
// to trigger the "Nest Workspace" dialog. The default ReactFlow intersection
// detection uses bounding-box overlap which fires from large distances when
// nodes have large CSS min-width/min-height values.
const NEST_PROXIMITY_THRESHOLD = 150; // px — ~60% of a collapsed node width
const DEFAULT_NODE_WIDTH = 245; // px — approx mid-range of min-w-[210px] / max-w-[280px]
const DEFAULT_NODE_HEIGHT = 110; // px — approx min-height for a collapsed node
const nodeTypes = {
workspaceNode: WorkspaceNode,
};
@ -85,6 +76,7 @@ function CanvasInner() {
const nestNode = useCanvasStore((s) => s.nestNode);
const isDescendant = useCanvasStore((s) => s.isDescendant);
const dragStartParentRef = useRef<string | null>(null);
const { getIntersectingNodes } = useReactFlow();
const onNodeDragStart: OnNodeDrag<Node<WorkspaceNodeData>> = useCallback(
(_event, node) => {
@ -95,30 +87,25 @@ function CanvasInner() {
const onNodeDrag: OnNodeDrag<Node<WorkspaceNodeData>> = useCallback(
(_event, node) => {
const { nodes: allNodes } = useCanvasStore.getState();
const nodeCenterX = node.position.x + (node.measured?.width ?? DEFAULT_NODE_WIDTH) / 2;
const nodeCenterY = node.position.y + (node.measured?.height ?? DEFAULT_NODE_HEIGHT) / 2;
let closest: string | null = null;
let closestDist = NEST_PROXIMITY_THRESHOLD;
for (const n of allNodes) {
if (n.id === node.id || isDescendant(node.id, n.id)) continue;
const otherWidth = n.measured?.width ?? DEFAULT_NODE_WIDTH;
const otherHeight = n.measured?.height ?? DEFAULT_NODE_HEIGHT;
const otherCenterX = n.position.x + otherWidth / 2;
const otherCenterY = n.position.y + otherHeight / 2;
const dist = Math.sqrt(
(nodeCenterX - otherCenterX) ** 2 + (nodeCenterY - otherCenterY) ** 2
);
if (dist < closestDist) {
closestDist = dist;
closest = n.id;
// Only consider nodes within a proximity threshold as nest targets.
// Without this check, getIntersectingNodes returns any node whose bounding
// boxes overlap — which can be hundreds of pixels away on a sparse canvas,
// causing accidental nesting when the user drags a node across the board.
const thresholdPx = 100;
const threshold = thresholdPx * thresholdPx; // compare squared distances
let nearest: { id: string; dist: number } | null = null;
for (const candidate of getIntersectingNodes(node)) {
if (candidate.id === node.id || isDescendant(node.id, candidate.id)) continue;
const dx = candidate.position.x - node.position.x;
const dy = candidate.position.y - node.position.y;
const dist2 = dx * dx + dy * dy;
if (dist2 <= threshold && (!nearest || dist2 < nearest.dist)) {
nearest = { id: candidate.id, dist: dist2 };
}
}
setDragOverNode(closest);
setDragOverNode(nearest?.id ?? null);
},
[isDescendant, setDragOverNode]
[getIntersectingNodes, isDescendant, setDragOverNode]
);
// Confirmation dialog state for structure changes
@ -130,23 +117,20 @@ function CanvasInner() {
const pendingDelete = useCanvasStore((s) => s.pendingDelete);
const setPendingDelete = useCanvasStore((s) => s.setPendingDelete);
const removeNode = useCanvasStore((s) => s.removeNode);
// Cascade guard: when deleting a workspace with children, the operator must
// tick "I understand the cascade" before Delete All becomes active.
const [cascadeConfirmChecked, setCascadeConfirmChecked] = useState(false);
const confirmDelete = useCallback(async () => {
if (!pendingDelete) return;
// If hasChildren and checkbox not ticked, do nothing — user must confirm
if (pendingDelete.hasChildren && !cascadeConfirmChecked) return;
const { id } = pendingDelete;
setPendingDelete(null);
setCascadeConfirmChecked(false);
try {
await api.del(`/workspaces/${id}?confirm=true`);
removeNode(id);
} catch (e) {
showToast(e instanceof Error ? e.message : "Delete failed", "error");
}
}, [pendingDelete, cascadeConfirmChecked, setPendingDelete, removeNode]);
}, [pendingDelete, setPendingDelete, removeNode]);
// Cascade guard: include child count in the warning message when the workspace
// has children, so the user understands the blast radius before clicking Delete All.
const cascadeMessage = pendingDelete?.hasChildren
? `⚠️ Deleting "${pendingDelete.name}" will permanently delete all child workspaces and their data. This cannot be undone.`
: null;
@ -413,31 +397,17 @@ function CanvasInner() {
/>
{/* Confirmation dialog for workspace delete — driven by store */}
{/* When the workspace has children, render an inline cascade guard instead
of the generic ConfirmDialog so we can show the child list and require
an explicit checkbox before Delete All activates. */}
{pendingDelete ? (
pendingDelete.hasChildren ? (
<DeleteCascadeConfirmDialog
name={pendingDelete.name}
children={pendingDelete.children}
checked={cascadeConfirmChecked}
onCheckedChange={setCascadeConfirmChecked}
onConfirm={confirmDelete}
onCancel={() => { setPendingDelete(null); setCascadeConfirmChecked(false); }}
/>
) : (
<ConfirmDialog
open={true}
title="Delete Workspace"
message={`Permanently delete "${pendingDelete.name}"? This will stop the container and remove all configuration. This action cannot be undone.`}
confirmLabel="Delete"
confirmVariant="danger"
onConfirm={confirmDelete}
onCancel={() => setPendingDelete(null)}
/>
)
) : null}
<ConfirmDialog
open={!!pendingDelete}
title={pendingDelete?.hasChildren ? "Delete Workspace and Children" : "Delete Workspace"}
message={pendingDelete?.hasChildren
? `⚠️ Deleting "${pendingDelete?.name}" will permanently delete all of its child workspaces and their data. This cannot be undone.`
: `Permanently delete "${pendingDelete?.name}"? This will stop the container and remove all configuration. This action cannot be undone.`}
confirmLabel={pendingDelete?.hasChildren ? "Delete All" : "Delete"}
confirmVariant="danger"
onConfirm={confirmDelete}
onCancel={() => setPendingDelete(null)}
/>
{/* Settings Panel — global secrets management drawer */}
<SettingsPanel workspaceId={settingsWorkspaceId} />

View File

@ -202,6 +202,18 @@ describe("BudgetSection — progress bar", () => {
const bar = screen.getByRole("progressbar");
expect(bar.getAttribute("aria-valuenow")).toBe("30");
});
it("shows 0% progress bar when budget_used is absent from the response", async () => {
// Regression: budget_used is optional (provisioning-stuck workspaces return
// partial shapes). Without the `?? 0` guard the progressPct calculation
// throws a TypeScript strict-null error and the build fails.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
await renderLoaded({ budget_limit: 1000, budget_remaining: null } as any);
const bar = screen.getByRole("progressbar");
expect(bar.getAttribute("aria-valuenow")).toBe("0");
const fill = screen.getByTestId("budget-progress-fill") as HTMLDivElement;
expect(fill.style.width).toBe("0%");
});
});
// ── Input pre-fill ────────────────────────────────────────────────────────────

View File

@ -16,6 +16,7 @@ afterEach(() => {
// ── Shared fitView spy — must be set up before vi.mock hoisting ──────────────
const mockFitView = vi.fn();
const mockFitBounds = vi.fn();
const mockGetIntersectingNodes = vi.fn(() => []);
vi.mock("@xyflow/react", () => {
const ReactFlow = ({
@ -44,7 +45,7 @@ vi.mock("@xyflow/react", () => {
fitView: mockFitView,
fitBounds: mockFitBounds,
setViewport: vi.fn(),
getIntersectingNodes: vi.fn(() => []),
getIntersectingNodes: mockGetIntersectingNodes,
setCenter: vi.fn(),
}),
applyNodeChanges: vi.fn((_: unknown, nodes: unknown) => nodes),
@ -127,6 +128,46 @@ describe("Canvas — molecule:pan-to-node event handler", () => {
beforeEach(() => {
mockFitView.mockClear();
mockFitBounds.mockClear();
mockGetIntersectingNodes.mockClear();
});
// ── Nest proximity threshold (#1052) ─────────────────────────────────────
// onNodeDrag filters getIntersectingNodes results by distance <= 100px.
// We test this by verifying that getIntersectingNodes is called and
// setDragOverNode receives the correct nearest-within-threshold ID.
it("setDragOverNode is NOT called when all intersecting nodes are >100px away", () => {
const setDragOverNode = vi.fn();
mockStoreState.setDragOverNode = setDragOverNode;
mockGetIntersectingNodes.mockReturnValueOnce([
{ id: "far-ws", position: { x: 500, y: 500 } },
]);
render(<Canvas />);
// Trigger onNodeDrag by dispatching a drag start event on a node
const canvas = document.querySelector('[data-testid="react-flow"]');
expect(canvas).toBeTruthy();
// The component renders with getIntersectingNodes returning the far node.
// Since it's >100px away, setDragOverNode should never have been called
// with "far-ws" from the drag handler.
// Note: we verify the mock is configured correctly but the actual filter
// logic is exercised in the component — the regression test is visual:
// drag a node 200px+ from any target and confirm no "Nest Workspace" dialog.
});
it("getIntersectingNodes is called on drag events", () => {
mockGetIntersectingNodes.mockReturnValueOnce([]);
render(<Canvas />);
mockGetIntersectingNodes.mockClear();
// Trigger drag — dispatch node drag event
act(() => {
window.dispatchEvent(
new CustomEvent("molecule:pan-to-node", { detail: { nodeId: "ws-1" } })
);
});
// getIntersectingNodes is called on mouse drag (tested via implementation)
expect(mockGetIntersectingNodes).not.toHaveBeenCalled();
// (No DOM drag event in jsdom — the regression is confirmed by the
// Canvas.tsx change itself; the test confirms the mock hook is wired.)
});
it("calls fitView with the provisioned nodeId after a 100ms debounce", async () => {

View File

@ -49,11 +49,8 @@ const mockStore = {
};
vi.mock("@/store/canvas", () => ({
// PR #1243 refactored delete flow: hoists confirmation to Canvas-level dialog
// via setPendingDelete, including hasChildren for correct warning text.
useCanvasStore: Object.assign(
vi.fn((selector: (s: typeof mockStore) => unknown) => selector(mockStore)),
{ getState: () => mockStore }
useCanvasStore: vi.fn(
(selector: (s: typeof mockStore) => unknown) => selector(mockStore)
),
}));
@ -225,14 +222,11 @@ describe("ContextMenu — keyboard accessibility", () => {
const items = screen.getAllByRole("menuitem");
const deleteItem = items.find((el) => el.textContent?.includes("Delete"))!;
fireEvent.click(deleteItem);
expect(mockStore.setPendingDelete).toHaveBeenCalledWith(
expect.objectContaining({
id: "ws-1",
name: "Alpha Workspace",
hasChildren: false,
children: [],
})
);
expect(mockStore.setPendingDelete).toHaveBeenCalledWith({
id: "ws-1",
name: "Alpha Workspace",
hasChildren: false,
});
expect(closeContextMenu).toHaveBeenCalled();
});
});

View File

@ -35,6 +35,9 @@ features:
- title: Operational Control Plane
details: Registry, heartbeats, pause/resume/restart, approvals, activity logs, traces, terminal access, and runtime tiered provisioning.
icon: "🛡️"
- title: Remote Agent Support
details: Register agents on any infrastructure — Docker, Fly Machines, bare metal, or laptops — and manage the full fleet from one canvas with bearer token auth and 30s heartbeat visibility.
icon: "🌐"
- title: Global Secrets
details: Platform-wide API keys can be inherited by every workspace, with workspace-level overrides when a role needs custom credentials.
icon: "🔐"
@ -71,3 +74,5 @@ features:
- [Deploy AI Agents on Fly.io — or Any Cloud — with One Config Change](/blog/deploy-anywhere) *(2026-04-17)*
- [Give Your AI Agent a Real Browser: MCP + Chrome DevTools](/blog/browser-automation-ai-agents-mcp) *(2026-04-20)*
- [Give Your AI Agent a Git Repository: Molecule AI + Cloudflare Artifacts](/blog/cloudflare-artifacts-molecule-ai) *(2026-04-21)*
- [One Canvas, Every Agent: Remote AI Agents and Fleet Visibility](/blog/remote-workspaces) *(2026-04-20)*
- [Skills Over Bundled Tools: Why Composable AI Beats Platform Primitives](/blog/skills-vs-bundled-tools-ai-agent-platforms) *(2026-04-21)*

View File

@ -104,13 +104,13 @@ The issue #1126 acceptance criteria specifies: "Coordinate with PMM (issue #1116
| # | Action | Owner | Status |
|---|---|---|---|
| 1 | Keyword research (this brief) | SEO Analyst | ✅ Draft done |
| 2 | PMM positioning review | PMM (issue #1116) | ⏸ Pending |
| 2 | PMM positioning review | PMM (issue #1116) | ⏸ Holding — PMM Slack: "Phase 30 position holding" |
| 3 | Expand blog post with step-by-step | Content Marketer | ⏸ Pending PMM |
| 4 | Draft tutorial: "Register a Remote Agent" | Content Marketer | ⏸ Pending |
| 5 | Draft tutorial: "Self-Hosted AI Agents" | Content Marketer | ⏸ Pending |
| 6 | Update workspace-runtime.md | DevRel | ⏸ Flag to DevRel |
| 7 | Audit/create external-agent-registration.md | DevRel | ⏸ Flag to DevRel |
| 8 | Update quickstart.md | DevRel | ⏸ Flag to DevRel |
| 4 | Draft tutorial: "Register a Remote Agent" | SEO Analyst | ✅ Done — `docs/tutorials/register-remote-agent.md`, pushed to molecule-core@main |
| 5 | Draft tutorial: "Self-Hosted AI Agents" | SEO Analyst | ✅ Done — `docs/tutorials/self-hosted-ai-agents.md`, pushed to molecule-core@main |
| 6 | Update workspace-runtime.md | DevRel | ✅ Done — remote agent registration section already on main |
| 7 | Audit/create external-agent-registration.md | DevRel | ✅ Done — already on main, full coverage |
| 8 | Update quickstart.md + docs/index.md | DevRel | ✅ Done — Remote Agent path in quickstart; docs/index.md updated with Remote Agents feature card + blog links |
---

View File

@ -0,0 +1,191 @@
---
title: Workspace File Copy API
description: API reference for the workspace file copy and write operations, including CWE-22 path traversal protection.
---
# Workspace File Copy API
> **Source:** `workspace-server/internal/handlers/container_files.go` + `templates.go`
> **Handler:** `TemplatesHandler.WriteFile` → `copyFilesToContainer`
> **Security:** CWE-22 path traversal protection (PRs #1267, #1270, #1271)
`copyFilesToContainer` is the internal Go implementation that powers workspace file write operations. It is not called directly by API clients — clients reach it through the HTTP handler `PUT /workspaces/:id/files/*path`.
## Endpoint Overview
`PUT /workspaces/:id/files/*path` writes a single file to a workspace container or its config volume.
```
PUT /workspaces/:id/files/*path
Authorization: Bearer <workspace-token>
Content-Type: application/json
{
"content": "string"
}
```
The handler (`TemplatesHandler.WriteFile`) validates the path, then routes to one of two backends:
| Workspace state | Backend | Method |
|---|---|---|
| Container running | Docker `CopyToContainer` (tar) | `copyFilesToContainer` |
| Container offline | Ephemeral Alpine container | `writeViaEphemeral` → `copyFilesToContainer` |
Both paths use `copyFilesToContainer` internally. The ephemeral container path mounts the config volume as `/configs` and calls the same function, so CWE-22 protection applies regardless of container state.
## Function Signature
```go
func (h *TemplatesHandler) copyFilesToContainer(
ctx context.Context,
containerName string,
destPath string,
files map[string]string, // filename → content
) error
```
| Parameter | Type | Description |
|---|---|---|
| `ctx` | `context.Context` | Request-scoped context |
| `containerName` | `string` | Docker container name or ID |
| `destPath` | `string` | Target directory inside the container (typically `/configs`) |
| `files` | `map[string]string` | Map of relative filenames to file contents |
## Parameters
### `containerName`
The running container for the workspace. Resolved by `TemplatesHandler.findContainer`, which checks three candidates in order:
1. Platform provisioner naming convention (`ws-<uuid>`)
2. The full workspace container ID
3. The workspace name from the database (spaces replaced with dashes)
If the container is not running, `findContainer` returns `""` and the handler falls back to `writeViaEphemeral`.
### `destPath`
The directory inside the container where files are written. In normal operation this is `/configs`, which is mounted from the platform-managed config volume. All file operations are constrained to this volume.
### `files` (`map[string]string`)
A map of relative filenames to their string content. File names are **relative paths only** — absolute paths and `..` traversal sequences are rejected before the tar header is written.
## Security Notes
### CWE-22 Path Traversal Protection
**PRs #1267, #1270, #1271** added path traversal protection at the tar-archive-write boundary.
Before these PRs, `copyFilesToContainer` used raw map keys as tar header names without validation:
```go
// Before — UNSAFE
header := &tar.Header{
Name: name, // name came directly from map key
Mode: 0644,
Size: int64(len(data)),
}
```
A malicious caller embedding `../` in a file name could write outside the volume mount. Now:
```go
// After — SAFE (PRs #1267 / #1270)
clean := filepath.Clean(name)
if filepath.IsAbs(clean) || strings.HasPrefix(clean, "..") {
return fmt.Errorf("unsafe file path in archive: %s", name)
}
archiveName := filepath.Join(destPath, name)
header := &tar.Header{
Name: archiveName, // always inside destPath
Mode: 0644,
Size: int64(len(data)),
}
```
The validation works in three stages:
1. **`filepath.Clean`** normalizes the path (removes redundant separators, resolves `.`).
2. **Absolute path check** (`filepath.IsAbs`) rejects any path that resolves to an absolute OS path.
3. **`..` prefix check** (`strings.HasPrefix`) rejects paths that would escape the destination via parent-directory traversal.
The resulting `archiveName` is always inside `destPath`, so the tar header can never write outside the mounted volume regardless of input.
> **Defense in depth:** `WriteFile` (the HTTP handler) also calls `validateRelPath(filePath)` **before** passing the path to `copyFilesToContainer`. This closes the gap for any future caller that bypasses the handler-level check. Do not remove handler-level `validateRelPath` when modifying this code.
### Handler-Level Validation (`validateRelPath`)
```go
func validateRelPath(relPath string) error {
clean := filepath.Clean(relPath)
if filepath.IsAbs(clean) || strings.HasPrefix(clean, "..") {
return fmt.Errorf("path traversal blocked: %s", relPath)
}
return nil
}
```
`validateRelPath` is called at the start of every file operation handler (`WriteFile`, `ReadFile`, `DeleteFile`, `ListFiles`). Invalid paths return `400 Bad Request` with `{"error": "invalid path"}`.
Allowed root paths are also allow-listed: `root` must be one of `/configs`, `/workspace`, `/home`, or `/plugins`. Other values return `400 Bad Request`.
## Error Codes
`copyFilesToContainer` returns errors directly. The `WriteFile` HTTP handler wraps them:
| HTTP status | Condition | Response body |
|---|---|---|
| `400 Bad Request` | `validateRelPath` rejects the path (traversal attempt) | `{"error": "invalid path"}` |
| `400 Bad Request` | Malformed JSON body | `{"error": "invalid request body"}` |
| `404 Not Found` | Workspace not found in database | `{"error": "workspace not found"}` |
| `500 Internal Server Error` | Docker unavailable | `{"error": "failed to write file: docker not available"}` |
| `500 Internal Server Error` | Tar header write failure | `{"error": "failed to write file: failed to write tar header for <name>: ..."}` |
| `500 Internal Server Error` | Docker `CopyToContainer` failure | `{"error": "failed to write file: <docker error>"}` |
## Example
### Write a file to a workspace
```bash
curl -X PUT https://platform.example.com/workspaces/ws-abc123/files/claude.md \
-H "Authorization: Bearer <workspace-token>" \
-H "Content-Type: application/json" \
-d '{
"content": "# My Agent\n\nThis agent specializes in code review.\n"
}'
```
**Success response (`200 OK`):**
```json
{
"status": "saved",
"path": "claude.md"
}
```
### Path traversal rejected
```bash
curl -X PUT https://platform.example.com/workspaces/ws-abc123/files/../../etc/passwd \
-H "Authorization: Bearer <workspace-token>" \
-H "Content-Type: application/json" \
-d '{"content": "hacked"}'
```
**Rejection response (`400 Bad Request`):**
```json
{
"error": "invalid path"
}
```
## Related
- [Platform API Reference](./platform-api.md) — full API endpoint table
- [Workspace Runtime](../agent-runtime/workspace-runtime.md) — runtime environment model
- `workspace-server/internal/handlers/templates.go` — `WriteFile`, `validateRelPath`
- `workspace-server/internal/handlers/container_files.go` — `copyFilesToContainer`, `writeViaEphemeral`

View File

@ -71,7 +71,7 @@ func Import(
}
}
// Store runtime in DB
_ = db.DB.ExecContext(ctx, `UPDATE workspaces SET runtime = $1 WHERE id = $2`, bundleRuntime, wsID)
_, _ = db.DB.ExecContext(ctx, `UPDATE workspaces SET runtime = $1 WHERE id = $2`, bundleRuntime, wsID)
// Provision the container if provisioner is available
if prov != nil {

View File

@ -6,9 +6,12 @@ import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
@ -731,6 +734,125 @@ func parseUsageFromA2AResponse(body []byte) (inputTokens, outputTokens int64) {
return 0, 0
}
// isSafeURL validates that a URL resolves to a publicly-routable address,
// preventing A2A requests from being redirected to internal/cloud-metadata
// infrastructure (SSRF, CWE-918). Workspace URLs come from DB/Redis caches
// so we validate before making any outbound HTTP call.
func isSafeURL(rawURL string) error {
u, err := url.Parse(rawURL)
if err != nil {
return fmt.Errorf("invalid URL: %w", err)
}
// Reject non-HTTP(S) schemes.
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("forbidden scheme: %s (only http/https allowed)", u.Scheme)
}
host := u.Hostname()
if host == "" {
return fmt.Errorf("empty hostname")
}
// Block direct IP addresses.
if ip := net.ParseIP(host); ip != nil {
if ip.IsLoopback() || ip.IsUnspecified() || ip.IsLinkLocalUnicast() {
return fmt.Errorf("forbidden loopback/unspecified IP: %s", ip)
}
if isPrivateOrMetadataIP(ip) {
return fmt.Errorf("forbidden private/metadata IP: %s", ip)
}
return nil
}
// For hostnames, resolve and validate each returned IP.
addrs, err := net.LookupHost(host)
if err != nil {
// DNS resolution failure — block it. Could be an internal hostname.
return fmt.Errorf("DNS resolution blocked for hostname: %s (%v)", host, err)
}
if len(addrs) == 0 {
return fmt.Errorf("DNS returned no addresses for: %s", host)
}
for _, addr := range addrs {
ip := net.ParseIP(addr)
if ip != nil && (ip.IsLoopback() || ip.IsUnspecified() || ip.IsLinkLocalUnicast() || isPrivateOrMetadataIP(ip)) {
return fmt.Errorf("hostname %s resolves to forbidden IP: %s", host, ip)
}
}
return nil
}
// isPrivateOrMetadataIP returns true for cloud-metadata / loopback / link-local
// ranges (always) and RFC-1918 / IPv6 ULA ranges (self-hosted only).
//
// In SaaS cross-EC2 mode (see saasMode() in registry.go) the tenant platform
// and its workspaces share a VPC, so workspaces register with their
// VPC-private IP — typically 172.31.x.x on AWS default VPCs. Blocking RFC-1918
// unconditionally would reject every legitimate registration. Cloud metadata
// (169.254.0.0/16, fe80::/10), loopback, and TEST-NET ranges stay blocked in
// both modes; they are never a legitimate agent URL.
//
// Both IPv4 and IPv6 are checked. The previous implementation returned false
// for every non-IPv4 input, which meant a registered `[::1]` or `[fe80::…]`
// URL would bypass the SSRF gate entirely.
func isPrivateOrMetadataIP(ip net.IP) bool {
// Always blocked — IPv4 cloud metadata + network-test ranges.
metadataRangesV4 := []string{
"169.254.0.0/16", // link-local / IMDSv1-v2
"100.64.0.0/10", // CGNAT — reachable via some VPC configs, not a legit agent URL
"192.0.2.0/24", // TEST-NET-1
"198.51.100.0/24", // TEST-NET-2
"203.0.113.0/24", // TEST-NET-3
}
// Always blocked — IPv6 cloud-metadata / loopback equivalents.
metadataRangesV6 := []string{
"::1/128", // loopback
"fe80::/10", // link-local (IMDS analogue)
"::ffff:0:0/96", // IPv4-mapped loopback (defence-in-depth; To4() below usually normalises first)
}
// RFC-1918 private — blocked in self-hosted, allowed in SaaS.
rfc1918RangesV4 := []string{
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
}
// RFC-4193 ULA — IPv6 analogue of RFC-1918. Same SaaS-mode treatment.
ulaRangesV6 := []string{
"fc00::/7",
}
contains := func(cidrs []string, target net.IP) bool {
for _, c := range cidrs {
_, n, err := net.ParseCIDR(c)
if err != nil {
continue
}
if n.Contains(target) {
return true
}
}
return false
}
// Prefer IPv4 semantics when the input is an IPv4 address encoded in any
// form (raw v4, ::ffff:a.b.c.d, etc.) — To4() normalises all of them.
if ip4 := ip.To4(); ip4 != nil {
if contains(metadataRangesV4, ip4) {
return true
}
if saasMode() {
return false
}
return contains(rfc1918RangesV4, ip4)
}
// True IPv6 path.
if contains(metadataRangesV6, ip) {
return true
}
if saasMode() {
return false
}
return contains(ulaRangesV6, ip)
}
// readUsageMap extracts input_tokens / output_tokens from the "usage" key of m.
// Returns (0, 0, false) when the key is absent or contains no non-zero values.
func readUsageMap(m map[string]json.RawMessage) (inputTokens, outputTokens int64, ok bool) {

View File

@ -18,16 +18,6 @@ import (
// maxExecOutput limits container exec output to 5MB to prevent OOM.
const maxExecOutput = 5 * 1024 * 1024
// validateRelPath checks that a relative path is safe to use inside a
// bind-mounted directory. Blocks absolute paths and ".." traversal.
func validateRelPath(filePath string) error {
clean := filepath.Clean(filePath)
if filepath.IsAbs(clean) || strings.Contains(clean, "..") {
return fmt.Errorf("unsafe path: %s", filePath)
}
return nil
}
// findContainer finds a running container for the workspace.
// Checks provisioner name, full ID, and DB workspace name (same candidates as terminal handler).
func (h *TemplatesHandler) findContainer(ctx context.Context, workspaceID string) string {
@ -77,24 +67,26 @@ func (h *TemplatesHandler) execInContainer(ctx context.Context, containerName st
}
// copyFilesToContainer creates a tar archive from a map of files and copies it into a container.
// The destPath is prepended to each file name. File names must be relative and must not escape
// destPath via ".." segments — otherwise the tar header name could escape the mounted volume.
func (h *TemplatesHandler) copyFilesToContainer(ctx context.Context, containerName, destPath string, files map[string]string) error {
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
createdDirs := map[string]bool{}
for name, content := range files {
// CWE-22: reject absolute paths and path-traversal sequences
// before using the name in the tar header.
// Block absolute paths and traversal attempts at the archive-write boundary.
// Files are written inside destPath (typically /configs); anything that escapes
// via ".." or an absolute name could reach other volumes or system paths.
clean := filepath.Clean(name)
if filepath.IsAbs(clean) || strings.Contains(clean, "..") {
return fmt.Errorf("path traversal blocked: %s", name)
if filepath.IsAbs(clean) || strings.HasPrefix(clean, "..") {
return fmt.Errorf("unsafe file path in archive: %s", name)
}
// Use the safe, cleaned name joined with destPath so the tar
// header Name is always a relative path inside destPath.
safeName := filepath.Join(destPath, clean)
// Prepend destPath so relative paths land inside the volume mount.
archiveName := filepath.Join(destPath, name)
// Create parent directories in tar (deduplicated)
dir := filepath.Dir(safeName)
dir := filepath.Dir(archiveName)
if dir != destPath && !createdDirs[dir] {
tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeDir,
@ -106,7 +98,7 @@ func (h *TemplatesHandler) copyFilesToContainer(ctx context.Context, containerNa
data := []byte(content)
header := &tar.Header{
Name: safeName,
Name: archiveName,
Mode: 0644,
Size: int64(len(data)),
}
@ -162,10 +154,9 @@ func (h *TemplatesHandler) deleteViaEphemeral(ctx context.Context, volumeName, f
if h.docker == nil {
return fmt.Errorf("docker not available")
}
// CWE-78/CWE-22: validate before use. Also switch to exec form
// ([]string{...}) so filePath is passed as a plain argument — eliminates
// shell injection entirely.
// CWE-78/CWE-22: validate before use. Also switches to exec form
// ([]string{...}) so filePath is passed as a plain argument, not
// interpolated into a shell string — eliminates shell injection entirely.
if err := validateRelPath(filePath); err != nil {
return err
}
@ -184,7 +175,7 @@ func (h *TemplatesHandler) deleteViaEphemeral(ctx context.Context, volumeName, f
if err := h.docker.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
return err
}
// Wait for rm to finish before removing the container
// Wait for the rm command to finish before removing the container
statusCh, errCh := h.docker.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning)
select {
case <-statusCh:

View File

@ -27,9 +27,7 @@ import (
"fmt"
"io"
"log"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
@ -826,75 +824,10 @@ func (h *MCPHandler) toolRecallMemory(ctx context.Context, workspaceID string, a
return string(b), nil
}
// isSafeURL validates that a URL resolves to a publicly-routable address,
// preventing A2A requests from being redirected to internal/cloud-metadata
// infrastructure (SSRF, CWE-918). Workspace URLs come from DB/Redis caches
// so we validate before making any outbound HTTP call.
func isSafeURL(rawURL string) error {
u, err := url.Parse(rawURL)
if err != nil {
return fmt.Errorf("invalid URL: %w", err)
}
// Reject non-HTTP(S) schemes.
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("forbidden scheme: %s (only http/https allowed)", u.Scheme)
}
host := u.Hostname()
if host == "" {
return fmt.Errorf("empty hostname")
}
// Block direct IP addresses.
if ip := net.ParseIP(host); ip != nil {
if ip.IsLoopback() || ip.IsUnspecified() || ip.IsLinkLocalUnicast() {
return fmt.Errorf("forbidden loopback/unspecified IP: %s", ip)
}
if isPrivateOrMetadataIP(ip) {
return fmt.Errorf("forbidden private/metadata IP: %s", ip)
}
return nil
}
// For hostnames, resolve and validate each returned IP.
addrs, err := net.LookupHost(host)
if err != nil {
// DNS resolution failure — block it. Could be an internal hostname.
return fmt.Errorf("DNS resolution blocked for hostname: %s (%v)", host, err)
}
if len(addrs) == 0 {
return fmt.Errorf("DNS returned no addresses for: %s", host)
}
for _, addr := range addrs {
ip := net.ParseIP(addr)
if ip != nil && (ip.IsLoopback() || ip.IsUnspecified() || ip.IsLinkLocalUnicast() || isPrivateOrMetadataIP(ip)) {
return fmt.Errorf("hostname %s resolves to forbidden IP: %s", host, ip)
}
}
return nil
}
// isPrivateOrMetadataIP returns true for RFC-1918 private, carrier-grade NAT,
// link-local, and cloud metadata ranges.
func isPrivateOrMetadataIP(ip net.IP) bool {
var privateRanges = []net.IPNet{
{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)},
{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)},
{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)},
{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)},
{IP: net.ParseIP("100.64.0.0"), Mask: net.CIDRMask(10, 32)},
{IP: net.ParseIP("192.0.2.0"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("198.51.100.0"), Mask: net.CIDRMask(24, 32)},
{IP: net.ParseIP("203.0.113.0"), Mask: net.CIDRMask(24, 32)},
}
ip = ip.To4()
if ip == nil {
return false
}
for _, r := range privateRanges {
if r.Contains(ip) {
return true
}
}
return false
}
// isSafeURL and isPrivateOrMetadataIP live in a2a_proxy.go -- same package,
// shared across MCP + A2A proxy call sites. Keeping a single copy avoids
// drift between the two SSRF gates when one is tightened and the other
// isn't.
// ─────────────────────────────────────────────────────────────────────────────
// Helpers
@ -998,3 +931,4 @@ func extractA2AText(body []byte) string {
b, _ := json.Marshal(result)
return string(b)
}

View File

@ -5,6 +5,7 @@ import (
"context"
"database/sql"
"encoding/json"
"net"
"net/http"
"net/http/httptest"
"os"
@ -713,3 +714,146 @@ func TestExtractA2AText_InvalidJSON_ReturnRaw(t *testing.T) {
t.Errorf("extractA2AText: expected raw fallback, got %q", got)
}
}
// ==================== SSRF Defence — isSafeURL ====================
func TestIsSafeURL_AllowsHTTPS(t *testing.T) {
err := isSafeURL("https://api.openai.com/v1/models")
if err != nil {
t.Errorf("isSafeURL: expected https://api.openai.com to be allowed, got %v", err)
}
}
func TestIsSafeURL_AllowsPublicHTTP(t *testing.T) {
err := isSafeURL("http://example.com/agent")
if err != nil {
t.Errorf("isSafeURL: expected http://example.com to be allowed, got %v", err)
}
}
func TestIsSafeURL_BlocksFileScheme(t *testing.T) {
err := isSafeURL("file:///etc/passwd")
if err == nil {
t.Errorf("isSafeURL: expected file:// to be blocked, got nil")
}
}
func TestIsSafeURL_BlocksFtpScheme(t *testing.T) {
err := isSafeURL("ftp://internal-host/file")
if err == nil {
t.Errorf("isSafeURL: expected ftp:// to be blocked, got nil")
}
}
func TestIsSafeURL_BlocksLocalhost(t *testing.T) {
err := isSafeURL("http://127.0.0.1:8080/agent")
if err == nil {
t.Errorf("isSafeURL: expected 127.0.0.1 to be blocked, got nil")
}
}
func TestIsSafeURL_BlocksLocalhostV6(t *testing.T) {
err := isSafeURL("http://[::1]:8080/agent")
if err == nil {
t.Errorf("isSafeURL: expected [::1] to be blocked, got nil")
}
}
func TestIsSafeURL_Blocks169_254_Metadata(t *testing.T) {
err := isSafeURL("http://169.254.169.254/latest/meta-data/")
if err == nil {
t.Errorf("isSafeURL: expected 169.254.169.254 to be blocked, got nil")
}
}
func TestIsSafeURL_Blocks10xPrivate(t *testing.T) {
err := isSafeURL("http://10.0.0.1/agent")
if err == nil {
t.Errorf("isSafeURL: expected 10.x.x.x to be blocked, got nil")
}
}
func TestIsSafeURL_Blocks172Private(t *testing.T) {
err := isSafeURL("http://172.16.0.1/agent")
if err == nil {
t.Errorf("isSafeURL: expected 172.16.0.0/12 to be blocked, got nil")
}
}
func TestIsSafeURL_Blocks192_168Private(t *testing.T) {
err := isSafeURL("http://192.168.1.100/agent")
if err == nil {
t.Errorf("isSafeURL: expected 192.168.x.x to be blocked, got nil")
}
}
func TestIsSafeURL_BlocksEmptyHost(t *testing.T) {
err := isSafeURL("http:///")
if err == nil {
t.Errorf("isSafeURL: expected empty hostname to be blocked, got nil")
}
}
func TestIsSafeURL_BlocksInvalidURL(t *testing.T) {
err := isSafeURL("http://[invalid")
if err == nil {
t.Errorf("isSafeURL: expected invalid URL to be blocked, got nil")
}
}
// ==================== SSRF Defence — isPrivateOrMetadataIP ====================
func TestIsPrivateOrMetadataIP_10Range(t *testing.T) {
tests := []string{"10.0.0.0", "10.255.255.255", "10.1.2.3"}
for _, ip := range tests {
if !isPrivateOrMetadataIP(net.ParseIP(ip)) {
t.Errorf("isPrivateOrMetadataIP: expected %s to be private", ip)
}
}
}
func TestIsPrivateOrMetadataIP_172Range(t *testing.T) {
tests := []string{"172.16.0.0", "172.31.255.255", "172.20.1.1"}
for _, ip := range tests {
if !isPrivateOrMetadataIP(net.ParseIP(ip)) {
t.Errorf("isPrivateOrMetadataIP: expected %s to be private", ip)
}
}
}
func TestIsPrivateOrMetadataIP_192_168Range(t *testing.T) {
tests := []string{"192.168.0.0", "192.168.255.255", "192.168.1.1"}
for _, ip := range tests {
if !isPrivateOrMetadataIP(net.ParseIP(ip)) {
t.Errorf("isPrivateOrMetadataIP: expected %s to be private", ip)
}
}
}
func TestIsPrivateOrMetadataIP_169_254Metadata(t *testing.T) {
if !isPrivateOrMetadataIP(net.ParseIP("169.254.169.254")) {
t.Errorf("isPrivateOrMetadataIP: expected 169.254.169.254 to be metadata")
}
if !isPrivateOrMetadataIP(net.ParseIP("169.254.0.1")) {
t.Errorf("isPrivateOrMetadataIP: expected 169.254.0.1 to be metadata")
}
}
func TestIsPrivateOrMetadataIP_100_64CarrierNAT(t *testing.T) {
if !isPrivateOrMetadataIP(net.ParseIP("100.64.0.1")) {
t.Errorf("isPrivateOrMetadataIP: expected 100.64.0.0/10 to be carrier-NAT private")
}
}
func TestIsPrivateOrMetadataIP_PublicAllowed(t *testing.T) {
public := []net.IP{
net.ParseIP("8.8.8.8"),
net.ParseIP("1.1.1.1"),
net.ParseIP("34.117.59.81"),
}
for _, ip := range public {
if isPrivateOrMetadataIP(ip) {
t.Errorf("isPrivateOrMetadataIP: expected %s to be public", ip)
}
}
}

View File

@ -111,7 +111,7 @@ func (h *OrgTokenHandler) Revoke(c *gin.Context) {
c.JSON(http.StatusNotFound, gin.H{"error": "token not found or already revoked"})
return
}
actor := orgTokenActor(c)
actor, _ := orgTokenActor(c)
log.Printf("orgtoken: revoked id=%s by=%s", id, actor)
c.JSON(http.StatusOK, gin.H{"revoked": id})
}

View File

@ -8,7 +8,9 @@ import (
"net"
"net/http"
"net/url"
"os"
"strings"
"sync"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
@ -17,6 +19,55 @@ import (
"github.com/gin-gonic/gin"
)
// blockedRange is a named CIDR block so the conditional blocklist in
// validateAgentURL reads as a slice of homogeneous values instead of
// repeated anonymous struct literals.
type blockedRange struct {
cidr string
label string
}
// saasMode reports whether this tenant platform is running in SaaS cross-EC2
// mode, where workspaces live on sibling EC2s in the same VPC and register
// themselves by their RFC-1918 VPC-private IP (typically 172.31.x.x on AWS
// default VPCs). In that shape, the SSRF hardening that blocks RFC-1918
// addresses would reject every legitimate workspace registration — the
// control plane provisioned these instances, so their intra-VPC URLs are
// trusted by construction.
//
// Resolution order:
// 1. MOLECULE_DEPLOY_MODE set — explicit operator flag is authoritative.
// Recognised values: "saas" → true. "self-hosted" / "selfhosted" /
// "standalone" → false. Any other non-empty value logs a warning and
// falls closed (false) so a typo like MOLECULE_DEPLOY_MODE=prod can't
// silently flip a self-hosted deployment into the relaxed SSRF posture.
// 2. MOLECULE_DEPLOY_MODE unset — fall back to the MOLECULE_ORG_ID presence
// signal for deployments that predate the explicit flag.
//
// Self-hosted / single-container deployments set neither and keep the strict
// blocklist.
func saasMode() bool {
raw := os.Getenv("MOLECULE_DEPLOY_MODE")
trimmed := strings.TrimSpace(raw)
if trimmed != "" {
switch strings.ToLower(trimmed) {
case "saas":
return true
case "self-hosted", "selfhosted", "standalone":
return false
default:
// Warn-once so operators notice the typo without spamming logs.
saasModeWarnUnknownOnce.Do(func() {
log.Printf("saasMode: MOLECULE_DEPLOY_MODE=%q not recognised; falling back to strict (non-SaaS) mode. Valid values: saas | self-hosted.", raw)
})
return false
}
}
return strings.TrimSpace(os.Getenv("MOLECULE_ORG_ID")) != ""
}
var saasModeWarnUnknownOnce sync.Once
type RegistryHandler struct {
broadcaster *events.Broadcaster
}
@ -64,18 +115,27 @@ func validateAgentURL(rawURL string) error {
}
hostname := parsed.Hostname()
blockedRanges := []struct {
cidr string
label string
}{
// Link-local / loopback / IPv6 metadata classes are blocked in every
// mode — they are never a legitimate agent URL and they cover the AWS/
// GCP/Azure IMDS endpoints. RFC-1918 ranges are conditionally blocked:
// in SaaS mode workspaces register with their VPC-private IP and the
// control plane is the source of truth for which instances exist, so
// allowing 10/8, 172.16/12, 192.168/16 is safe. In self-hosted mode
// we keep the strict blocklist — those deployments have no legitimate
// reason to accept private-range URLs from agents.
blockedRanges := []blockedRange{
{"169.254.0.0/16", "link-local address (cloud metadata endpoint)"},
{"127.0.0.0/8", "loopback address"},
{"10.0.0.0/8", "RFC-1918 private address"},
{"172.16.0.0/12", "RFC-1918 private address"},
{"192.168.0.0/16", "RFC-1918 private address"},
{"fe80::/10", "IPv6 link-local address (cloud metadata analogue)"},
{"::1/128", "IPv6 loopback address"},
{"fc00::/7", "IPv6 ULA address (RFC-4193 private)"},
}
if !saasMode() {
blockedRanges = append(blockedRanges,
blockedRange{"10.0.0.0/8", "RFC-1918 private address"},
blockedRange{"172.16.0.0/12", "RFC-1918 private address"},
blockedRange{"192.168.0.0/16", "RFC-1918 private address"},
blockedRange{"fc00::/7", "IPv6 ULA address (RFC-4193 private)"},
)
}
// Helper: check a single IP against the blocklist.

View File

@ -7,6 +7,124 @@ import (
// isSafeURL is defined in mcp.go.
// isPrivateOrMetadataIP is defined in mcp.go.
// saasMode is defined in registry.go.
// TestSaasMode covers the env-resolution ladder so a self-hosted
// operator can't accidentally flip into SaaS mode by leaving a stale
// MOLECULE_ORG_ID around, and an explicit MOLECULE_DEPLOY_MODE wins
// over the legacy implicit signal.
func TestSaasMode(t *testing.T) {
cases := []struct {
name string
deployMode string
orgID string
want bool
}{
{"both unset", "", "", false},
{"legacy org id only", "", "7b2179dc-8cc6-4581-a3c6-c8bff4481086", true},
{"explicit saas", "saas", "", true},
{"explicit saas overrides missing org", "SaaS", "", true}, // case-insensitive
{"explicit self-hosted wins over legacy org id", "self-hosted", "some-org", false},
{"explicit selfhosted wins over legacy org id", "selfhosted", "some-org", false},
{"explicit standalone wins over legacy org id", "standalone", "some-org", false},
{"whitespace-only deploy mode falls through to legacy", " ", "some-org", true},
{"whitespace-only org id falls through to false", "", " ", false},
// Typo / unknown values: must fall closed (strict / self-hosted)
// instead of falling through to the MOLECULE_ORG_ID legacy signal.
// Any tenant deployment has MOLECULE_ORG_ID set, so a typo like
// MOLECULE_DEPLOY_MODE=prod used to silently flip into SaaS mode.
{"typo prod falls closed even with org id set", "prod", "some-org", false},
{"typo SaaS-mode falls closed even with org id set", "SaaS-mode", "some-org", false},
{"typo production falls closed", "production", "", false},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Setenv("MOLECULE_DEPLOY_MODE", tc.deployMode)
t.Setenv("MOLECULE_ORG_ID", tc.orgID)
if got := saasMode(); got != tc.want {
t.Errorf("saasMode() = %v, want %v (MOLECULE_DEPLOY_MODE=%q MOLECULE_ORG_ID=%q)",
got, tc.want, tc.deployMode, tc.orgID)
}
})
}
}
// TestIsPrivateOrMetadataIP_SaaSMode covers the SaaS-mode relaxation:
// RFC-1918 and ULA ranges are allowed, but metadata / loopback / TEST-NET
// classes stay blocked in every mode. Regression guard for the core
// SaaS provisioning fix (issue: workspaces register with their VPC
// private IP, which is 172.31.x.x on AWS default VPCs).
func TestIsPrivateOrMetadataIP_SaaSMode(t *testing.T) {
t.Setenv("MOLECULE_DEPLOY_MODE", "saas")
t.Setenv("MOLECULE_ORG_ID", "")
cases := []struct {
name string
ipStr string
want bool
}{
// RFC-1918 must be ALLOWED in SaaS mode.
{"172.31 allowed in saas", "172.31.44.78", false},
{"10/8 allowed in saas", "10.0.0.5", false},
{"192.168 allowed in saas", "192.168.1.1", false},
// IPv6 ULA must be ALLOWED in SaaS mode (AWS IPv6 VPC analogue).
{"fd00 ULA allowed in saas", "fd12:3456:789a::1", false},
// Metadata stays BLOCKED even in SaaS mode.
{"169.254 still blocked", "169.254.169.254", true},
// 127/8 loopback is NOT checked by isPrivateOrMetadataIP itself --
// the caller (isSafeURL) checks ip.IsLoopback() separately. We assert
// the helper's own semantics here, not the aggregate gate.
{"127/8 not checked by this helper (isSafeURL covers it)", "127.0.0.1", false},
{"::1 still blocked (IPv6 metadata)", "::1", true},
{"fe80 still blocked", "fe80::1", true},
// TEST-NET stays blocked.
{"192.0.2.x still blocked", "192.0.2.5", true},
{"198.51.100.x still blocked", "198.51.100.5", true},
{"203.0.113.x still blocked", "203.0.113.5", true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ip := net.ParseIP(tc.ipStr)
if ip == nil {
t.Fatalf("ParseIP(%q) returned nil", tc.ipStr)
}
if got := isPrivateOrMetadataIP(ip); got != tc.want {
t.Errorf("isPrivateOrMetadataIP(%s) = %v, want %v", tc.ipStr, got, tc.want)
}
})
}
}
// TestIsPrivateOrMetadataIP_IPv6 covers the IPv6 gap the previous
// implementation had — it returned false for every IPv6 literal
// unconditionally, which would let a registered [::1] or [fe80::…]
// URL bypass the SSRF check entirely.
func TestIsPrivateOrMetadataIP_IPv6(t *testing.T) {
t.Setenv("MOLECULE_DEPLOY_MODE", "")
t.Setenv("MOLECULE_ORG_ID", "")
cases := []struct {
name string
ipStr string
want bool
}{
{"::1 loopback blocked", "::1", true},
{"fe80 link-local blocked", "fe80::1", true},
{"fe80 link-local with mac blocked", "fe80::a00:27ff:fe00:1", true},
{"fc00 ULA blocked (non-saas)", "fc00::1", true},
{"fd00 ULA blocked (non-saas)", "fd12::1", true},
{"public v6 allowed", "2606:4700:4700::1111", false}, // 1.1.1.1 v6
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ip := net.ParseIP(tc.ipStr)
if ip == nil {
t.Fatalf("ParseIP(%q) returned nil", tc.ipStr)
}
if got := isPrivateOrMetadataIP(ip); got != tc.want {
t.Errorf("isPrivateOrMetadataIP(%s) = %v, want %v", tc.ipStr, got, tc.want)
}
})
}
}
func TestIsPrivateOrMetadataIP(t *testing.T) {
cases := []struct {
@ -34,7 +152,11 @@ func TestIsPrivateOrMetadataIP(t *testing.T) {
// Must be allowed: public IP addresses
{"8.8.8.8", "8.8.8.8", false},
{"1.1.1.1", "1.1.1.1", false},
{"203.0.113.254", "203.0.113.254", false}, // TEST-NET-3 max — above 203.0.113.0/24 range end
// Previously asserted (incorrectly) that 203.0.113.254 is public --
// the original test's comment claimed the address was "above 203.0.113.0/24
// range end", but 203.0.113.0/24 spans 203.0.113.0-255, so .254 IS in
// range and correctly blocked. Assertion flipped to match reality.
{"203.0.113.254 (TEST-NET-3)", "203.0.113.254", true},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {

View File

@ -247,10 +247,11 @@ func seedInitialMemories(ctx context.Context, workspaceID string, memories []mod
log.Printf("seedInitialMemories: truncated memory content for %s (scope=%s) from %d to %d bytes",
workspaceID, scope, len(mem.Content), maxMemoryContentLength)
}
redactedContent, _ := redactSecrets(workspaceID, content)
if _, err := db.DB.ExecContext(ctx, `
INSERT INTO agent_memories (workspace_id, content, scope, namespace)
VALUES ($1, $2, $3, $4)
`, workspaceID, redactSecrets(workspaceID, content), scope, awarenessNamespace); err != nil {
`, workspaceID, redactedContent, scope, awarenessNamespace); err != nil {
log.Printf("seedInitialMemories: failed to insert memory for %s (scope=%s): %v", workspaceID, scope, err)
}
}
@ -333,13 +334,29 @@ func (h *WorkspaceHandler) buildProvisionerConfig(
// provisioning continues — the workspace will get 401 on its first heartbeat
// and can recover on the next restart.
func (h *WorkspaceHandler) issueAndInjectToken(ctx context.Context, workspaceID string, cfg *provisioner.WorkspaceConfig) {
// Revoke any existing live tokens. If this fails we bail out rather than
// issuing a second live token whose plaintext we can't also deliver.
// Revoke any existing live tokens FIRST — this must run in both modes.
// In SaaS mode the revoke is load-bearing on re-provision: without it,
// the previous workspace instance's live token sits in the DB, and
// RegistryHandler.requireWorkspaceToken on the fresh instance's first
// /registry/register would reject it (live token exists → no
// bootstrap allowance, but the new workspace has no plaintext because
// the CP provisioner doesn't carry cfg.ConfigFiles across user-data).
// Revoking clears the gate so the register handler's bootstrap path
// can mint a fresh token and return the plaintext in the response.
if err := wsauth.RevokeAllForWorkspace(ctx, db.DB, workspaceID); err != nil {
log.Printf("Provisioner: failed to revoke existing tokens for %s: %v — skipping auth-token injection", workspaceID, err)
return
}
// SaaS mode skips the IssueToken + ConfigFiles write because both
// only make sense on the Docker provisioner's volume-mount delivery
// path. The register handler mints a fresh token on first successful
// register and returns the plaintext in the response body for the
// runtime to persist locally.
if saasMode() {
return
}
token, err := wsauth.IssueToken(ctx, db.DB, workspaceID)
if err != nil {
log.Printf("Provisioner: failed to issue auth token for %s: %v — skipping auth-token injection", workspaceID, err)
@ -615,14 +632,13 @@ func (h *WorkspaceHandler) provisionWorkspaceCP(workspaceID, templatePath string
}
log.Printf("CPProvisioner: workspace %s started as machine %s via control plane", workspaceID, machineID)
// Issue token so the agent can authenticate on boot
token, tokenErr := wsauth.IssueToken(ctx, db.DB, workspaceID)
if tokenErr != nil {
log.Printf("CPProvisioner: failed to issue token for %s: %v", workspaceID, tokenErr)
} else {
// Don't log any prefix of the token. Earlier H1 regression showed
// this slice pattern (token[:8]) panics when a helper returns a
// short value. Length alone is enough to confirm a token issued.
log.Printf("CPProvisioner: issued auth token for workspace %s (len=%d)", workspaceID, len(token))
}
// Token issuance is deliberately deferred to the workspace's first
// /registry/register call. Minting here without also delivering the
// plaintext to the workspace (via user-data or a follow-up callback)
// would leave a live token in DB that the workspace has no copy of —
// RegistryHandler.requireWorkspaceToken would then 401 every
// /registry/register attempt because the workspace is no longer in the
// "no live tokens → bootstrap-allowed" state. The register handler
// already mints a token on first successful register and returns it in
// the response body for the workspace to persist.
}

View File

@ -255,6 +255,7 @@ func (s *Scheduler) fireSchedule(ctx context.Context, sched scheduleRow) {
if _, execErr := db.DB.ExecContext(context.Background(), `UPDATE workspace_schedules SET next_run_at=$1, updated_at=now() WHERE id=$2`, nextTime, sched.ID); execErr != nil {
log.Printf("Scheduler: panic-recovery next_run_at UPDATE failed for schedule %s: %v", sched.ID, execErr)
}
}
}
}()