forked from molecule-ai/molecule-core
Merge staging into rfc-2991-pr-1 to clear BEHIND (post PR-2993 + PR-3005)
This commit is contained in:
commit
e87df906bd
@ -81,9 +81,33 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
|
||||
downloadFileByPath,
|
||||
downloadAllFiles,
|
||||
uploadFiles,
|
||||
uploadDataTransferItems,
|
||||
deleteAllFiles,
|
||||
} = useFilesApi(workspaceId, root);
|
||||
|
||||
// PR-D: track whether the user is currently dragging files OVER
|
||||
// the root area (not over a specific subdir row). Used to show
|
||||
// the "Drop to upload to root" highlight on the tree column.
|
||||
const [rootDragHover, setRootDragHover] = useState(false);
|
||||
|
||||
const handleDropToTarget = (
|
||||
targetDir: string,
|
||||
items: DataTransferItemList,
|
||||
) => {
|
||||
// canDelete is the gate proxy — same constraint as the toolbar
|
||||
// Upload button (today only /configs is writable from the canvas
|
||||
// surface). Without this check, dropping on /home would post
|
||||
// through /workspaces/<id>/files/<path>, which the backend would
|
||||
// reject only after an HTTP round-trip. Fail fast.
|
||||
if (root !== "/configs") {
|
||||
setError(
|
||||
`Upload only allowed in /configs (current root: ${root}). Switch root or use Upload button.`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
void uploadDataTransferItems(items, targetDir);
|
||||
};
|
||||
|
||||
const tree = useMemo(() => buildTree(files), [files]);
|
||||
|
||||
const openFile = async (path: string) => {
|
||||
@ -224,8 +248,46 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
|
||||
)}
|
||||
|
||||
<div className="flex flex-1 min-h-0">
|
||||
{/* File tree */}
|
||||
<div className="w-[180px] border-r border-line/40 overflow-y-auto shrink-0">
|
||||
{/* File tree column. PR-D: outer div is the drop zone for
|
||||
"drop on root" — when the user drags into the column area
|
||||
(not over a specific subdir row), the drop targets the
|
||||
current root directory. Subdirectory rows in <FileTree>
|
||||
stop propagation on their own drop event so a drop on
|
||||
/configs/skills doesn't ALSO fire root-area drop. */}
|
||||
<div
|
||||
className={`w-[180px] border-r border-line/40 overflow-y-auto shrink-0 transition-colors ${
|
||||
rootDragHover ? "bg-accent/10 outline outline-1 outline-accent/40 -outline-offset-2" : ""
|
||||
}`}
|
||||
onDragOver={(e) => {
|
||||
// Only highlight + accept the drop when uploads are
|
||||
// actually allowed for the current root. Without this
|
||||
// check the user gets a misleading drag affordance,
|
||||
// drops, then sees the toolbar's "switch root" toast —
|
||||
// bad UX.
|
||||
if (root !== "/configs") return;
|
||||
e.preventDefault();
|
||||
e.dataTransfer.dropEffect = "copy";
|
||||
}}
|
||||
onDragEnter={(e) => {
|
||||
if (root !== "/configs") return;
|
||||
e.preventDefault();
|
||||
setRootDragHover(true);
|
||||
}}
|
||||
onDragLeave={(e) => {
|
||||
const next = e.relatedTarget as Node | null;
|
||||
if (!next || !(e.currentTarget as HTMLElement).contains(next)) {
|
||||
setRootDragHover(false);
|
||||
}
|
||||
}}
|
||||
onDrop={(e) => {
|
||||
if (root !== "/configs") return;
|
||||
e.preventDefault();
|
||||
setRootDragHover(false);
|
||||
if (e.dataTransfer.items?.length) {
|
||||
handleDropToTarget("", e.dataTransfer.items);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{/* New file input */}
|
||||
{showNewFile && (
|
||||
<div className="px-2 py-1 border-b border-line/40">
|
||||
@ -243,7 +305,11 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
|
||||
|
||||
{files.length === 0 ? (
|
||||
<div className="px-3 py-4 text-[10px] text-ink-soft text-center">
|
||||
No config files yet
|
||||
{rootDragHover
|
||||
? "Drop to upload to root"
|
||||
: root === "/configs"
|
||||
? "No config files yet — drag files here to upload"
|
||||
: "No config files yet"}
|
||||
</div>
|
||||
) : (
|
||||
<FileTree
|
||||
@ -259,6 +325,7 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
|
||||
onDelete={root === "/configs" ? setConfirmDelete : () => {}}
|
||||
onDownload={downloadFileByPath}
|
||||
canDelete={root === "/configs"}
|
||||
onDropToTarget={handleDropToTarget}
|
||||
expandedDirs={expandedDirs}
|
||||
onToggleDir={toggleDir}
|
||||
loadingDir={loadingDir}
|
||||
|
||||
@ -14,16 +14,21 @@ interface TreeCallbacks {
|
||||
* context-menu item's `disabled` flag so the user gets the same
|
||||
* affordance as the toolbar (which gates Clear/New on /configs). */
|
||||
canDelete: boolean;
|
||||
/** PR-D: drop files/folders from the OS onto this row. targetDir
|
||||
* is the directory path (relative to the active root) under which
|
||||
* the dropped contents should land; "" means root. */
|
||||
onDropToTarget?: (targetDir: string, items: DataTransferItemList) => void;
|
||||
expandedDirs: Set<string>;
|
||||
onToggleDir: (path: string) => void;
|
||||
loadingDir: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* FileTree renders the workspace tree + owns the right-click
|
||||
* context-menu state. Lifting the menu state to the tree (vs each
|
||||
* row) means only one menu is open at a time — opening a new row's
|
||||
* menu auto-closes the prior one. Same UX as VSCode / Theia.
|
||||
* FileTree renders the workspace tree + owns the right-click context
|
||||
* menu (PR-C) and the drop-target hover state (PR-D). Lifting the
|
||||
* menu state here (vs each row) means only one menu open at a time —
|
||||
* opening a new row's menu auto-closes the prior one. Same UX as
|
||||
* VSCode / Theia.
|
||||
*/
|
||||
export function FileTree({
|
||||
nodes,
|
||||
@ -32,6 +37,7 @@ export function FileTree({
|
||||
onDelete,
|
||||
onDownload,
|
||||
canDelete,
|
||||
onDropToTarget,
|
||||
expandedDirs,
|
||||
onToggleDir,
|
||||
loadingDir,
|
||||
@ -42,12 +48,17 @@ export function FileTree({
|
||||
y: number;
|
||||
items: MenuItem[];
|
||||
} | null>(null);
|
||||
// PR-D: hover-target highlight state for drag-drop. Lifted next to
|
||||
// the menu state so both shared-across-rows interactions live in
|
||||
// one place.
|
||||
const [hoverDir, setHoverDir] = useState<string | null>(null);
|
||||
|
||||
const openContextMenu = (e: React.MouseEvent, node: TreeNode) => {
|
||||
e.preventDefault();
|
||||
// Items composed per-row so the available actions reflect the
|
||||
// node type (files get Download; directories don't have a
|
||||
// useful per-tree download — the Export toolbar covers bulk).
|
||||
// node type (files get Open + Download; directories get Delete
|
||||
// only since "open a directory in the editor" doesn't apply
|
||||
// and "Export folder" is the toolbar's job).
|
||||
const items: MenuItem[] = [];
|
||||
if (!node.isDir) {
|
||||
items.push({
|
||||
@ -76,12 +87,20 @@ export function FileTree({
|
||||
|
||||
// Single state lifted to the top-level tree; nested <FileTree>s
|
||||
// (rendered for expanded directories below) do NOT instantiate
|
||||
// their own menus — they call the SAME openContextMenu via prop
|
||||
// drilling. This keeps "only one menu open" the structural
|
||||
// invariant rather than a render-order coincidence.
|
||||
// their own menus or drop-targets — they call back via prop
|
||||
// drilling. This keeps "only one menu open" + "only one drop
|
||||
// target highlighted" as structural invariants rather than
|
||||
// render-order coincidences.
|
||||
const childCallbacks: TreeCallbacks = {
|
||||
selectedPath, onSelect, onDelete, onDownload, canDelete,
|
||||
expandedDirs, onToggleDir, loadingDir,
|
||||
selectedPath,
|
||||
onSelect,
|
||||
onDelete,
|
||||
onDownload,
|
||||
canDelete,
|
||||
onDropToTarget,
|
||||
expandedDirs,
|
||||
onToggleDir,
|
||||
loadingDir,
|
||||
};
|
||||
|
||||
return (
|
||||
@ -91,6 +110,8 @@ export function FileTree({
|
||||
key={`${node.path}:${node.isDir ? "dir" : "file"}`}
|
||||
node={node}
|
||||
openContextMenu={openContextMenu}
|
||||
hoverDir={hoverDir}
|
||||
setHoverDir={setHoverDir}
|
||||
depth={depth}
|
||||
{...childCallbacks}
|
||||
/>
|
||||
@ -114,28 +135,79 @@ function TreeItem({
|
||||
onDelete,
|
||||
onDownload,
|
||||
canDelete,
|
||||
onDropToTarget,
|
||||
expandedDirs,
|
||||
onToggleDir,
|
||||
loadingDir,
|
||||
depth,
|
||||
openContextMenu,
|
||||
hoverDir,
|
||||
setHoverDir,
|
||||
}: TreeCallbacks & {
|
||||
node: TreeNode;
|
||||
depth: number;
|
||||
openContextMenu: (e: React.MouseEvent, node: TreeNode) => void;
|
||||
hoverDir: string | null;
|
||||
setHoverDir: (p: string | null) => void;
|
||||
}) {
|
||||
const isSelected = selectedPath === node.path;
|
||||
const expanded = expandedDirs.has(node.path);
|
||||
const isLoading = loadingDir === node.path;
|
||||
const isDropTarget = node.isDir && hoverDir === node.path;
|
||||
|
||||
// PR-D drag handlers — only directory rows are valid drop targets
|
||||
// (dropping a file ON another file is ambiguous; treat it as
|
||||
// dropping in the parent dir, which the root area handles). When a
|
||||
// drag enters a directory row, mark it the hover target. When the
|
||||
// cursor leaves to a non-child element, clear it. drop fires the
|
||||
// upload callback with the row's path.
|
||||
const dragProps = node.isDir && onDropToTarget
|
||||
? {
|
||||
onDragOver: (e: React.DragEvent) => {
|
||||
// preventDefault is REQUIRED to opt this element into the
|
||||
// drop target list — without it, browsers refuse to fire
|
||||
// the drop event regardless of the drop handler.
|
||||
e.preventDefault();
|
||||
e.dataTransfer.dropEffect = "copy";
|
||||
},
|
||||
onDragEnter: (e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
setHoverDir(node.path);
|
||||
},
|
||||
onDragLeave: (e: React.DragEvent) => {
|
||||
// Only clear hover when leaving to an element OUTSIDE this
|
||||
// row — bare leave-events fire for every child crossed
|
||||
// (the icon, the label, the ✕ button). Without the
|
||||
// contains() check the highlight flickers.
|
||||
const next = e.relatedTarget as Node | null;
|
||||
if (!next || !(e.currentTarget as HTMLElement).contains(next)) {
|
||||
setHoverDir(null);
|
||||
}
|
||||
},
|
||||
onDrop: (e: React.DragEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setHoverDir(null);
|
||||
if (e.dataTransfer.items?.length) {
|
||||
onDropToTarget(node.path, e.dataTransfer.items);
|
||||
}
|
||||
},
|
||||
}
|
||||
: {};
|
||||
|
||||
if (node.isDir) {
|
||||
return (
|
||||
<div>
|
||||
<div
|
||||
className="group w-full flex items-center gap-1 px-2 py-0.5 text-left hover:bg-surface-card/40 transition-colors cursor-pointer"
|
||||
className={`group w-full flex items-center gap-1 px-2 py-0.5 text-left transition-colors cursor-pointer ${
|
||||
isDropTarget
|
||||
? "bg-accent/20 outline outline-1 outline-accent/60"
|
||||
: "hover:bg-surface-card/40"
|
||||
}`}
|
||||
style={{ paddingLeft: `${depth * 12 + 8}px` }}
|
||||
onClick={() => onToggleDir(node.path)}
|
||||
onContextMenu={(e) => openContextMenu(e, node)}
|
||||
{...dragProps}
|
||||
>
|
||||
<span className="text-[9px] text-ink-soft w-3">{isLoading ? "…" : expanded ? "▼" : "▶"}</span>
|
||||
<span className="text-[10px]">📁</span>
|
||||
@ -159,6 +231,7 @@ function TreeItem({
|
||||
onDelete={onDelete}
|
||||
onDownload={onDownload}
|
||||
canDelete={canDelete}
|
||||
onDropToTarget={onDropToTarget}
|
||||
expandedDirs={expandedDirs}
|
||||
onToggleDir={onToggleDir}
|
||||
loadingDir={loadingDir}
|
||||
|
||||
@ -28,12 +28,13 @@ import type { TreeNode } from "../tree";
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [] };
|
||||
const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [], size: 0 };
|
||||
const dir: TreeNode = {
|
||||
name: "skills",
|
||||
path: "skills",
|
||||
isDir: true,
|
||||
children: [],
|
||||
size: 0,
|
||||
};
|
||||
|
||||
function renderTree(props: Partial<React.ComponentProps<typeof FileTree>> = {}) {
|
||||
|
||||
@ -0,0 +1,212 @@
|
||||
// @vitest-environment jsdom
|
||||
//
|
||||
// Pins the drag-drop upload added in PR-D of issue #2999.
|
||||
// Two layers of coverage:
|
||||
//
|
||||
// 1. The pure walker (collectFileEntries / walkEntry) — pins the
|
||||
// recursion shape against silent folder truncation. Browsers
|
||||
// return up to ~100 entries per readEntries() call; if the loop
|
||||
// stops early, large folder uploads silently drop files. We
|
||||
// simulate a multi-batch reader to discriminate.
|
||||
//
|
||||
// 2. FileTree directory-row drop handlers — pins that dragover/drop
|
||||
// events fire onDropToTarget with the directory's path + the
|
||||
// drop's DataTransferItemList.
|
||||
|
||||
import { describe, it, expect, vi, afterEach } from "vitest";
|
||||
import { render, screen, cleanup, fireEvent } from "@testing-library/react";
|
||||
import React from "react";
|
||||
import { FileTree } from "../FileTree";
|
||||
import type { TreeNode } from "../tree";
|
||||
import { __testables } from "../useFilesApi";
|
||||
|
||||
afterEach(cleanup);
|
||||
|
||||
// ---- Walker tests ----
|
||||
|
||||
/**
|
||||
* Build a fake FileSystemEntry tree we can hand to walkEntry. The
|
||||
* shape mimics what webkitGetAsEntry returns from a real OS drag —
|
||||
* directory entries expose createReader, file entries expose file().
|
||||
*/
|
||||
function fakeFileEntry(name: string, content = "x"): {
|
||||
isFile: true;
|
||||
isDirectory: false;
|
||||
name: string;
|
||||
fullPath: string;
|
||||
file: (cb: (f: File) => void) => void;
|
||||
} {
|
||||
return {
|
||||
isFile: true,
|
||||
isDirectory: false,
|
||||
name,
|
||||
fullPath: "/" + name,
|
||||
file: (cb) => cb(new File([content], name, { type: "text/plain" })),
|
||||
};
|
||||
}
|
||||
|
||||
function fakeDirEntry(
|
||||
name: string,
|
||||
childBatches: ReturnType<typeof fakeFileEntry>[][],
|
||||
): {
|
||||
isFile: false;
|
||||
isDirectory: true;
|
||||
name: string;
|
||||
fullPath: string;
|
||||
createReader: () => { readEntries: (cb: (entries: unknown[]) => void) => void };
|
||||
} {
|
||||
let i = 0;
|
||||
return {
|
||||
isFile: false,
|
||||
isDirectory: true,
|
||||
name,
|
||||
fullPath: "/" + name,
|
||||
createReader: () => ({
|
||||
readEntries: (cb) => {
|
||||
// Mimic browser semantics: emit one batch per call, then
|
||||
// an empty array to signal end-of-stream. A walker that
|
||||
// calls readEntries only once would silently truncate at
|
||||
// the first batch.
|
||||
if (i < childBatches.length) {
|
||||
cb(childBatches[i++]);
|
||||
} else {
|
||||
cb([]);
|
||||
}
|
||||
},
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
describe("walkEntry — folder-recursion drop walker", () => {
|
||||
it("collects a single dropped file", async () => {
|
||||
const out: { file: File; relativePath: string }[] = [];
|
||||
await __testables.walkEntry(fakeFileEntry("README.md") as never, "", out);
|
||||
expect(out.length).toBe(1);
|
||||
expect(out[0].relativePath).toBe("README.md");
|
||||
expect(out[0].file.name).toBe("README.md");
|
||||
});
|
||||
|
||||
it("walks a folder and preserves the relative path under the folder name", async () => {
|
||||
const out: { file: File; relativePath: string }[] = [];
|
||||
const folder = fakeDirEntry("skills", [
|
||||
[fakeFileEntry("a.md"), fakeFileEntry("b.md")],
|
||||
]);
|
||||
await __testables.walkEntry(folder as never, "", out);
|
||||
expect(out.map((e) => e.relativePath).sort()).toEqual([
|
||||
"skills/a.md",
|
||||
"skills/b.md",
|
||||
]);
|
||||
});
|
||||
|
||||
it("loops readEntries until empty so a multi-batch folder isn't truncated", async () => {
|
||||
// Browsers limit each readEntries() call to ~100 entries. Our
|
||||
// walker MUST call it again until an empty batch is returned.
|
||||
// Fake reader emits two batches of 2 + an implicit empty → 4
|
||||
// total. A buggy walker that only takes the first batch would
|
||||
// see only 2.
|
||||
const out: { file: File; relativePath: string }[] = [];
|
||||
const folder = fakeDirEntry("big", [
|
||||
[fakeFileEntry("1.txt"), fakeFileEntry("2.txt")],
|
||||
[fakeFileEntry("3.txt"), fakeFileEntry("4.txt")],
|
||||
]);
|
||||
await __testables.walkEntry(folder as never, "", out);
|
||||
expect(out.length).toBe(4);
|
||||
});
|
||||
|
||||
it("walks nested directories and accumulates the full path", async () => {
|
||||
const out: { file: File; relativePath: string }[] = [];
|
||||
const inner = fakeDirEntry("web-search", [[fakeFileEntry("SKILL.md")]]);
|
||||
// Outer dir whose first batch contains a sub-dir entry.
|
||||
const outer = {
|
||||
isFile: false,
|
||||
isDirectory: true,
|
||||
name: "skills",
|
||||
fullPath: "/skills",
|
||||
createReader: () => {
|
||||
let i = 0;
|
||||
return {
|
||||
readEntries: (cb: (entries: unknown[]) => void) => {
|
||||
if (i++ === 0) cb([inner]);
|
||||
else cb([]);
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
await __testables.walkEntry(outer as never, "", out);
|
||||
expect(out.length).toBe(1);
|
||||
expect(out[0].relativePath).toBe("skills/web-search/SKILL.md");
|
||||
});
|
||||
});
|
||||
|
||||
// ---- FileTree drag-drop wiring ----
|
||||
|
||||
const file: TreeNode = { name: "config.yaml", path: "config.yaml", isDir: false, children: [], size: 0 };
|
||||
const skillsDir: TreeNode = { name: "skills", path: "skills", isDir: true, children: [], size: 0 };
|
||||
|
||||
function renderTree(props: Partial<React.ComponentProps<typeof FileTree>> = {}) {
|
||||
// PR-D test defaults must include PR-C's onDownload + canDelete now
|
||||
// that they're required on the TreeCallbacks shape (the rebase
|
||||
// surfaced this — the merged tree depends on both feature sets).
|
||||
const defaults: React.ComponentProps<typeof FileTree> = {
|
||||
nodes: [file, skillsDir],
|
||||
selectedPath: null,
|
||||
onSelect: vi.fn(),
|
||||
onDelete: vi.fn(),
|
||||
onDownload: vi.fn(),
|
||||
canDelete: true,
|
||||
onDropToTarget: vi.fn(),
|
||||
expandedDirs: new Set<string>(),
|
||||
onToggleDir: vi.fn(),
|
||||
loadingDir: null,
|
||||
};
|
||||
const merged = { ...defaults, ...props };
|
||||
return { ...render(<FileTree {...merged} />), props: merged };
|
||||
}
|
||||
|
||||
describe("FileTree directory-row drag-drop", () => {
|
||||
it("dragover on a directory row preventDefault's so the drop will fire", () => {
|
||||
renderTree();
|
||||
const row = screen.getByText("skills");
|
||||
const dragOver = new Event("dragover", { bubbles: true, cancelable: true });
|
||||
Object.defineProperty(dragOver, "dataTransfer", {
|
||||
value: { dropEffect: "" },
|
||||
});
|
||||
row.parentElement!.dispatchEvent(dragOver);
|
||||
// preventDefault registers via the React handler — without it
|
||||
// the drop event would never fire, so this assertion is the
|
||||
// load-bearing one.
|
||||
expect(dragOver.defaultPrevented).toBe(true);
|
||||
});
|
||||
|
||||
it("drop on a directory row fires onDropToTarget with that path + the items list", () => {
|
||||
const { props } = renderTree();
|
||||
const row = screen.getByText("skills").parentElement!;
|
||||
const fakeItems = { length: 1, 0: { kind: "file" } } as unknown as DataTransferItemList;
|
||||
fireEvent.drop(row, { dataTransfer: { items: fakeItems } });
|
||||
expect(props.onDropToTarget).toHaveBeenCalledWith("skills", fakeItems);
|
||||
});
|
||||
|
||||
it("drop on a FILE row does NOT fire onDropToTarget (only directories are valid targets)", () => {
|
||||
const { props } = renderTree();
|
||||
const fileRow = screen.getByText("config.yaml").parentElement!;
|
||||
const fakeItems = { length: 1, 0: { kind: "file" } } as unknown as DataTransferItemList;
|
||||
fireEvent.drop(fileRow, { dataTransfer: { items: fakeItems } });
|
||||
expect(props.onDropToTarget).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("drop with no DataTransferItems does NOT fire onDropToTarget", () => {
|
||||
const { props } = renderTree();
|
||||
const row = screen.getByText("skills").parentElement!;
|
||||
fireEvent.drop(row, { dataTransfer: { items: { length: 0 } } });
|
||||
expect(props.onDropToTarget).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("dragenter sets the drop-target highlight on the directory row", () => {
|
||||
renderTree();
|
||||
const row = screen.getByText("skills").parentElement!;
|
||||
fireEvent.dragEnter(row, { dataTransfer: {} });
|
||||
// Highlight class is the discriminator — without dragenter
|
||||
// wiring the row stays in its hover-only style.
|
||||
expect(row.className).toMatch(/bg-accent|outline-accent/);
|
||||
});
|
||||
});
|
||||
@ -151,16 +151,20 @@ export function useFilesApi(workspaceId: string, root: string) {
|
||||
}, [files, workspaceId]);
|
||||
|
||||
const uploadFiles = useCallback(
|
||||
async (fileList: FileList) => {
|
||||
async (fileList: FileList, targetDir = "") => {
|
||||
let uploaded = 0;
|
||||
for (const file of Array.from(fileList)) {
|
||||
const path = file.webkitRelativePath || file.name;
|
||||
const parts = path.split("/");
|
||||
// For folder picker: webkitRelativePath is "<picked-folder>/a/b.txt"
|
||||
// — strip the picked-folder prefix so files land flat under the
|
||||
// workspace's target dir, not under a redundant outer folder.
|
||||
const relPath = parts.length > 1 ? parts.slice(1).join("/") : parts[0];
|
||||
const finalPath = targetDir ? `${targetDir}/${relPath}` : relPath;
|
||||
if (file.size > 1_000_000) continue;
|
||||
try {
|
||||
const content = await file.text();
|
||||
await api.put(`/workspaces/${workspaceId}/files/${relPath}`, { content });
|
||||
await api.put(`/workspaces/${workspaceId}/files/${finalPath}`, { content });
|
||||
uploaded++;
|
||||
} catch {
|
||||
/* skip binary */
|
||||
@ -168,7 +172,7 @@ export function useFilesApi(workspaceId: string, root: string) {
|
||||
}
|
||||
if (uploaded > 0) {
|
||||
useCanvasStore.getState().updateNodeData(workspaceId, { needsRestart: true });
|
||||
showToast(`Uploaded ${uploaded} files`, "success");
|
||||
showToast(`Uploaded ${uploaded} files${targetDir ? ` to ${targetDir}` : ""}`, "success");
|
||||
loadFiles();
|
||||
}
|
||||
return uploaded;
|
||||
@ -176,6 +180,58 @@ export function useFilesApi(workspaceId: string, root: string) {
|
||||
[workspaceId, loadFiles]
|
||||
);
|
||||
|
||||
/**
|
||||
* Upload files dragged from the OS via the HTML5 DataTransferItemList
|
||||
* API. Unlike the folder-picker path (uploadFiles), this preserves
|
||||
* the dropped folder structure under `targetDir` — drag a "skills/"
|
||||
* folder onto the /configs/skills row and you get
|
||||
* /configs/skills/skills/* (the OUTER folder name is preserved
|
||||
* because the user explicitly chose to drop a NAMED folder, unlike
|
||||
* the folder-picker which always wraps the picked dir).
|
||||
*
|
||||
* Walks FileSystemDirectoryEntry recursively via webkitGetAsEntry.
|
||||
* VSCode/JupyterLab use the same primitive — there's no other
|
||||
* portable browser API for "drag a folder from OS". `webkit*`
|
||||
* naming is a Chromium relic; Firefox + Safari implement the same
|
||||
* surface.
|
||||
*
|
||||
* Returns the number of files uploaded so the caller can show a
|
||||
* tally / fail toast.
|
||||
*/
|
||||
const uploadDataTransferItems = useCallback(
|
||||
async (items: DataTransferItemList, targetDir = "") => {
|
||||
const fileEntries = collectFileEntries(items);
|
||||
let uploaded = 0;
|
||||
for (const { file, relativePath } of await fileEntries) {
|
||||
if (file.size > 1_000_000) continue;
|
||||
const finalPath = targetDir
|
||||
? `${targetDir}/${relativePath}`
|
||||
: relativePath;
|
||||
try {
|
||||
const content = await file.text();
|
||||
await api.put(`/workspaces/${workspaceId}/files/${finalPath}`, {
|
||||
content,
|
||||
});
|
||||
uploaded++;
|
||||
} catch {
|
||||
/* skip binary */
|
||||
}
|
||||
}
|
||||
if (uploaded > 0) {
|
||||
useCanvasStore
|
||||
.getState()
|
||||
.updateNodeData(workspaceId, { needsRestart: true });
|
||||
showToast(
|
||||
`Uploaded ${uploaded} file${uploaded === 1 ? "" : "s"}${targetDir ? ` to ${targetDir}` : ""}`,
|
||||
"success",
|
||||
);
|
||||
loadFiles();
|
||||
}
|
||||
return uploaded;
|
||||
},
|
||||
[workspaceId, loadFiles],
|
||||
);
|
||||
|
||||
const deleteAllFiles = useCallback(async () => {
|
||||
let deleted = 0;
|
||||
for (const f of files) {
|
||||
@ -205,6 +261,95 @@ export function useFilesApi(workspaceId: string, root: string) {
|
||||
downloadFileByPath,
|
||||
downloadAllFiles,
|
||||
uploadFiles,
|
||||
uploadDataTransferItems,
|
||||
deleteAllFiles,
|
||||
};
|
||||
}
|
||||
|
||||
// ----- DataTransfer entry walker (PR-D) ---------------------------------
|
||||
|
||||
/**
|
||||
* Minimal subset of the FileSystem Entry API surface we use. The DOM
|
||||
* lib types this as FileSystemEntry / FileSystemFileEntry /
|
||||
* FileSystemDirectoryEntry but the relevant methods are callback-
|
||||
* based. Keep the shape narrow + explicit so the recursion below
|
||||
* type-checks without pulling in the full DOM lib types.
|
||||
*/
|
||||
interface FSEntry {
|
||||
isFile: boolean;
|
||||
isDirectory: boolean;
|
||||
name: string;
|
||||
fullPath: string;
|
||||
file?(success: (f: File) => void, fail?: (e: unknown) => void): void;
|
||||
createReader?(): { readEntries(success: (entries: FSEntry[]) => void): void };
|
||||
}
|
||||
|
||||
interface CollectedEntry {
|
||||
file: File;
|
||||
/** Path relative to the dropped root (e.g. "skills/web-search/SKILL.md"
|
||||
* for a dropped "skills/" folder containing web-search/SKILL.md). */
|
||||
relativePath: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Walk a DataTransferItemList, returning every file entry as a flat
|
||||
* array keyed by the path relative to the originally-dropped item.
|
||||
* Folders dropped from the OS expand recursively; loose files
|
||||
* passthrough with name as the relative path.
|
||||
*
|
||||
* Skips items where webkitGetAsEntry() returns null — that's how
|
||||
* the browser signals a non-file payload (e.g. a dragged URL or
|
||||
* text snippet).
|
||||
*/
|
||||
async function collectFileEntries(
|
||||
items: DataTransferItemList,
|
||||
): Promise<CollectedEntry[]> {
|
||||
const out: CollectedEntry[] = [];
|
||||
for (let i = 0; i < items.length; i++) {
|
||||
const item = items[i];
|
||||
if (item.kind !== "file") continue;
|
||||
// webkitGetAsEntry is the standardised name; older Firefox used
|
||||
// getAsEntry. Both Chromium + Firefox + Safari ship the webkit-
|
||||
// prefixed variant today. There's no non-prefixed alternative.
|
||||
const entry = (item as DataTransferItem & {
|
||||
webkitGetAsEntry?: () => FSEntry | null;
|
||||
}).webkitGetAsEntry?.();
|
||||
if (!entry) continue;
|
||||
await walkEntry(entry, "", out);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
async function walkEntry(
|
||||
entry: FSEntry,
|
||||
prefix: string,
|
||||
out: CollectedEntry[],
|
||||
): Promise<void> {
|
||||
const name = entry.name;
|
||||
const relPath = prefix ? `${prefix}/${name}` : name;
|
||||
if (entry.isFile && entry.file) {
|
||||
const file = await new Promise<File>((resolve, reject) => {
|
||||
entry.file!(resolve, reject);
|
||||
});
|
||||
out.push({ file, relativePath: relPath });
|
||||
return;
|
||||
}
|
||||
if (entry.isDirectory && entry.createReader) {
|
||||
const reader = entry.createReader();
|
||||
// readEntries returns up to ~100 at a time on Chromium; loop
|
||||
// until empty so large folders aren't truncated.
|
||||
let batch: FSEntry[] = [];
|
||||
do {
|
||||
batch = await new Promise<FSEntry[]>((resolve) =>
|
||||
reader.readEntries(resolve),
|
||||
);
|
||||
for (const child of batch) {
|
||||
await walkEntry(child, relPath, out);
|
||||
}
|
||||
} while (batch.length > 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Exported for direct testing — the recursion + readEntries batching
|
||||
// is the part most likely to silently truncate a real folder upload.
|
||||
export const __testables = { collectFileEntries, walkEntry };
|
||||
|
||||
@ -51,7 +51,7 @@ func Import(
|
||||
return result
|
||||
}
|
||||
|
||||
_ = broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", wsID, map[string]interface{}{
|
||||
_ = broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), wsID, map[string]interface{}{
|
||||
"name": b.Name,
|
||||
"tier": b.Tier,
|
||||
"source_bundle_id": b.ID,
|
||||
@ -142,7 +142,7 @@ func markFailed(ctx context.Context, wsID string, broadcaster *events.Broadcaste
|
||||
db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, last_sample_error = $2, updated_at = now() WHERE id = $3`,
|
||||
models.StatusFailed, msg, wsID)
|
||||
broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", wsID, map[string]interface{}{
|
||||
broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisionFailed), wsID, map[string]interface{}{
|
||||
"error": msg,
|
||||
})
|
||||
}
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -304,14 +305,14 @@ func (m *Manager) HandleInbound(ctx context.Context, ch ChannelRow, msg *Inbound
|
||||
"parts": []map[string]interface{}{{"kind": "text", "text": msg.Text}},
|
||||
},
|
||||
"metadata": map[string]interface{}{
|
||||
"source": ch.ChannelType,
|
||||
"channel_id": ch.ID,
|
||||
"chat_id": msg.ChatID,
|
||||
"user_id": msg.UserID,
|
||||
"username": msg.Username,
|
||||
"message_id": msg.MessageID,
|
||||
"history": history,
|
||||
"extra": msg.Metadata,
|
||||
"source": ch.ChannelType,
|
||||
"channel_id": ch.ID,
|
||||
"chat_id": msg.ChatID,
|
||||
"user_id": msg.UserID,
|
||||
"username": msg.Username,
|
||||
"message_id": msg.MessageID,
|
||||
"history": history,
|
||||
"extra": msg.Metadata,
|
||||
},
|
||||
},
|
||||
})
|
||||
@ -383,7 +384,7 @@ func (m *Manager) HandleInbound(ctx context.Context, ch ChannelRow, msg *Inbound
|
||||
|
||||
// Broadcast event
|
||||
if m.broadcaster != nil {
|
||||
m.broadcaster.RecordAndBroadcast(ctx, "CHANNEL_MESSAGE", ch.WorkspaceID, map[string]interface{}{
|
||||
m.broadcaster.RecordAndBroadcast(ctx, string(events.EventChannelMessage), ch.WorkspaceID, map[string]interface{}{
|
||||
"channel_id": ch.ID,
|
||||
"channel_type": ch.ChannelType,
|
||||
"username": msg.Username,
|
||||
@ -427,7 +428,7 @@ func (m *Manager) SendOutbound(ctx context.Context, channelID string, text strin
|
||||
}
|
||||
|
||||
if m.broadcaster != nil {
|
||||
m.broadcaster.RecordAndBroadcast(ctx, "CHANNEL_MESSAGE", ch.WorkspaceID, map[string]interface{}{
|
||||
m.broadcaster.RecordAndBroadcast(ctx, string(events.EventChannelMessage), ch.WorkspaceID, map[string]interface{}{
|
||||
"channel_id": ch.ID,
|
||||
"channel_type": ch.ChannelType,
|
||||
"direction": "outbound",
|
||||
|
||||
@ -14,10 +14,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// proxyDispatchBuildError is a sentinel wrapper for failures inside
|
||||
// http.NewRequestWithContext. handleA2ADispatchError unwraps it to emit the
|
||||
// "failed to create proxy request" 500 instead of the standard 502/503 paths.
|
||||
@ -90,10 +92,10 @@ func (h *WorkspaceHandler) handleA2ADispatchError(ctx context.Context, workspace
|
||||
Status: http.StatusServiceUnavailable,
|
||||
Headers: map[string]string{"Retry-After": strconv.Itoa(busyRetryAfterSeconds)},
|
||||
Response: gin.H{
|
||||
"error": "workspace agent busy — adapter handles retry (native_session)",
|
||||
"busy": true,
|
||||
"retry_after": busyRetryAfterSeconds,
|
||||
"native_session": true,
|
||||
"error": "workspace agent busy — adapter handles retry (native_session)",
|
||||
"busy": true,
|
||||
"retry_after": busyRetryAfterSeconds,
|
||||
"native_session": true,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -149,7 +151,7 @@ func (h *WorkspaceHandler) handleA2ADispatchError(ctx context.Context, workspace
|
||||
// Provisioner selection (mutually exclusive in production):
|
||||
// - h.provisioner != nil → local Docker deployment; IsRunning does docker inspect.
|
||||
// - h.cpProv != nil → SaaS / EC2 deployment; IsRunning calls CP's
|
||||
// /cp/workspaces/:id/status to read the EC2 state.
|
||||
// /cp/workspaces/:id/status to read the EC2 state.
|
||||
//
|
||||
// Pre-fix this function ONLY consulted h.provisioner — for SaaS tenants
|
||||
// (h.provisioner=nil, h.cpProv=set) it short-circuited to false on every
|
||||
@ -191,7 +193,7 @@ func (h *WorkspaceHandler) maybeMarkContainerDead(ctx context.Context, workspace
|
||||
log.Printf("ProxyA2A: failed to mark workspace %s offline: %v", workspaceID, err)
|
||||
}
|
||||
db.ClearWorkspaceKeys(ctx, workspaceID)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_OFFLINE", workspaceID, map[string]interface{}{})
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOffline), workspaceID, map[string]interface{}{})
|
||||
go h.RestartByID(workspaceID)
|
||||
return true
|
||||
}
|
||||
@ -272,7 +274,7 @@ func (h *WorkspaceHandler) logA2ASuccess(ctx context.Context, workspaceID, calle
|
||||
}(ctx)
|
||||
|
||||
if callerID == "" && statusCode < 400 {
|
||||
h.broadcaster.BroadcastOnly(workspaceID, "A2A_RESPONSE", map[string]interface{}{
|
||||
h.broadcaster.BroadcastOnly(workspaceID, string(events.EventA2AResponse), map[string]interface{}{
|
||||
"response_body": json.RawMessage(respBody),
|
||||
"method": a2aMethod,
|
||||
"duration_ms": durationMs,
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
)
|
||||
|
||||
// extractIdempotencyKey pulls params.message.messageId out of an A2A JSON-RPC
|
||||
@ -435,7 +436,7 @@ func (h *WorkspaceHandler) stitchDrainResponseToDelegation(ctx context.Context,
|
||||
// "⏸ queued" line to "✓ completed" in real time. Without this the
|
||||
// transition only surfaces after the user reloads or polls activity.
|
||||
if h.broadcaster != nil {
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_COMPLETE", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationComplete), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID,
|
||||
"target_id": targetID,
|
||||
"response_preview": truncate(responseText, 200),
|
||||
|
||||
@ -55,7 +55,7 @@ func NewActivityHandler(b *events.Broadcaster) *ActivityHandler {
|
||||
func (h *ActivityHandler) List(c *gin.Context) {
|
||||
workspaceID := c.Param("id")
|
||||
activityType := c.Query("type")
|
||||
source := c.Query("source") // "canvas" = source_id IS NULL, "agent" = source_id IS NOT NULL
|
||||
source := c.Query("source") // "canvas" = source_id IS NULL, "agent" = source_id IS NOT NULL
|
||||
peerID := c.Query("peer_id") // optional UUID — restrict to rows where this peer is sender OR target
|
||||
limitStr := c.DefaultQuery("limit", "100")
|
||||
sinceSecsStr := c.Query("since_secs")
|
||||
@ -650,7 +650,7 @@ func LogActivity(ctx context.Context, broadcaster events.EventEmitter, params Ac
|
||||
if respStr != nil {
|
||||
payload["response_body"] = json.RawMessage(respJSON)
|
||||
}
|
||||
broadcaster.BroadcastOnly(params.WorkspaceID, "ACTIVITY_LOGGED", payload)
|
||||
broadcaster.BroadcastOnly(params.WorkspaceID, string(events.EventActivityLogged), payload)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -69,7 +69,7 @@ func (h *AgentHandler) Assign(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "AGENT_ASSIGNED", workspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentAssigned), workspaceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": body.Model,
|
||||
})
|
||||
@ -118,7 +118,7 @@ func (h *AgentHandler) Replace(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "AGENT_REPLACED", workspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentReplaced), workspaceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": body.Model,
|
||||
"old_model": oldModel,
|
||||
@ -148,7 +148,7 @@ func (h *AgentHandler) Remove(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "AGENT_REMOVED", workspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentRemoved), workspaceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
})
|
||||
@ -215,21 +215,21 @@ func (h *AgentHandler) Move(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Broadcast on both workspaces
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "AGENT_MOVED", sourceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
"target_workspace_id": body.TargetWorkspaceID,
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentMoved), sourceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
"target_workspace_id": body.TargetWorkspaceID,
|
||||
})
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "AGENT_MOVED", body.TargetWorkspaceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
"source_workspace_id": sourceID,
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventAgentMoved), body.TargetWorkspaceID, map[string]interface{}{
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
"source_workspace_id": sourceID,
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
"from_workspace": sourceID,
|
||||
"to_workspace": body.TargetWorkspaceID,
|
||||
"agent_id": agentID,
|
||||
"model": model,
|
||||
"from_workspace": sourceID,
|
||||
"to_workspace": body.TargetWorkspaceID,
|
||||
})
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ func (h *ApprovalsHandler) Create(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "APPROVAL_REQUESTED", workspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventApprovalRequested), workspaceID, map[string]interface{}{
|
||||
"approval_id": approvalID,
|
||||
"action": body.Action,
|
||||
"reason": body.Reason,
|
||||
@ -62,7 +62,7 @@ func (h *ApprovalsHandler) Create(c *gin.Context) {
|
||||
var parentID *string
|
||||
db.DB.QueryRowContext(ctx, `SELECT parent_id FROM workspaces WHERE id = $1`, workspaceID).Scan(&parentID)
|
||||
if parentID != nil {
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "APPROVAL_ESCALATED", *parentID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventApprovalEscalated), *parentID, map[string]interface{}{
|
||||
"approval_id": approvalID,
|
||||
"from_workspace_id": workspaceID,
|
||||
"action": body.Action,
|
||||
|
||||
@ -164,7 +164,7 @@ func (h *DelegationHandler) Delegate(c *gin.Context) {
|
||||
go h.executeDelegation(sourceID, body.TargetID, delegationID, a2aBody)
|
||||
|
||||
// Broadcast event so canvas shows delegation in real-time
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_SENT", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationSent), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID,
|
||||
"target_id": body.TargetID,
|
||||
"task_preview": truncate(body.Task, 100),
|
||||
@ -317,7 +317,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s
|
||||
|
||||
// Update status: pending → dispatched
|
||||
h.updateDelegationStatus(sourceID, delegationID, "dispatched", "")
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_STATUS", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationStatus), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID, "target_id": targetID, "status": "dispatched",
|
||||
})
|
||||
|
||||
@ -352,7 +352,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s
|
||||
log.Printf("Delegation %s: failed to insert error log: %v", delegationID, err)
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_FAILED", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationFailed), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID, "target_id": targetID, "error": proxyErr.Error(),
|
||||
})
|
||||
// RFC #2829 PR-2 result-push (see UpdateStatus for rationale).
|
||||
@ -388,7 +388,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s
|
||||
`, sourceID, sourceID, targetID, "Delegation queued — target at capacity", string(queuedJSON)); err != nil {
|
||||
log.Printf("Delegation %s: failed to insert queued log: %v", delegationID, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_STATUS", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationStatus), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID, "target_id": targetID, "status": "queued",
|
||||
})
|
||||
return
|
||||
@ -420,7 +420,7 @@ func (h *DelegationHandler) executeDelegation(sourceID, targetID, delegationID s
|
||||
// delegation_ledger_integration_test.go.
|
||||
recordLedgerStatus(ctx, delegationID, "completed", "", responseText)
|
||||
h.updateDelegationStatus(sourceID, delegationID, "completed", "")
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_COMPLETE", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationComplete), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID,
|
||||
"target_id": targetID,
|
||||
"response_preview": truncate(responseText, 200),
|
||||
@ -503,7 +503,7 @@ func (h *DelegationHandler) Record(c *gin.Context) {
|
||||
recordLedgerInsert(ctx, sourceID, body.TargetID, body.DelegationID, body.Task, "")
|
||||
recordLedgerStatus(ctx, body.DelegationID, "dispatched", "", "")
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_SENT", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationSent), sourceID, map[string]interface{}{
|
||||
"delegation_id": body.DelegationID,
|
||||
"target_id": body.TargetID,
|
||||
"task_preview": truncate(body.Task, 100),
|
||||
@ -558,7 +558,7 @@ func (h *DelegationHandler) UpdateStatus(c *gin.Context) {
|
||||
`, sourceID, sourceID, "Delegation completed ("+truncate(body.ResponsePreview, 80)+")", string(respJSON)); err != nil {
|
||||
log.Printf("Delegation UpdateStatus: result insert failed for %s: %v", delegationID, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_COMPLETE", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationComplete), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID,
|
||||
"response_preview": truncate(body.ResponsePreview, 200),
|
||||
})
|
||||
@ -570,7 +570,7 @@ func (h *DelegationHandler) UpdateStatus(c *gin.Context) {
|
||||
// the result instead of holding open an HTTP connection.
|
||||
pushDelegationResultToInbox(ctx, sourceID, delegationID, "completed", body.ResponsePreview, "")
|
||||
} else {
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "DELEGATION_FAILED", sourceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventDelegationFailed), sourceID, map[string]interface{}{
|
||||
"delegation_id": delegationID,
|
||||
"error": body.Error,
|
||||
})
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
@ -100,7 +101,7 @@ func (h *WorkspaceHandler) RotateExternalCredentials(c *gin.Context) {
|
||||
// see when credentials were rotated. No PII; the token plaintext
|
||||
// is NOT logged.
|
||||
if h.broadcaster != nil {
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "EXTERNAL_CREDENTIALS_ROTATED", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventExternalCredentialsRotated), id, map[string]interface{}{
|
||||
"workspace_id": id,
|
||||
})
|
||||
}
|
||||
|
||||
@ -20,12 +20,14 @@ import (
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/channels"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/crypto"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provlog"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/scheduler"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// createWorkspaceTree recursively materialises an OrgWorkspace (and its
|
||||
// descendants) into the workspaces + canvas_layouts tables and kicks off
|
||||
// Docker provisioning. absX/absY are THIS workspace's absolute canvas
|
||||
@ -227,7 +229,7 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX
|
||||
if parentID != nil {
|
||||
payload["parent_id"] = *parentID
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, payload)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), id, payload)
|
||||
|
||||
// Seed initial memories from workspace config or defaults (issue #1050).
|
||||
// Per-workspace initial_memories override defaults; if workspace has none,
|
||||
@ -243,7 +245,7 @@ func (h *OrgHandler) createWorkspaceTree(ws OrgWorkspace, parentID *string, absX
|
||||
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, url = $2 WHERE id = $3`, models.StatusOnline, ws.URL, id); err != nil {
|
||||
log.Printf("Org import: external workspace status update failed for %s: %v", ws.Name, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), id, map[string]interface{}{
|
||||
"name": ws.Name, "external": true,
|
||||
})
|
||||
} else if h.workspace.HasProvisioner() {
|
||||
|
||||
@ -414,7 +414,7 @@ func (h *RegistryHandler) Register(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Broadcast WORKSPACE_ONLINE
|
||||
if err := h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.ID, map[string]interface{}{
|
||||
if err := h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.ID, map[string]interface{}{
|
||||
"url": cachedURL,
|
||||
"agent_card": payload.AgentCard,
|
||||
"delivery_mode": effectiveMode,
|
||||
@ -572,7 +572,7 @@ func (h *RegistryHandler) Heartbeat(c *gin.Context) {
|
||||
|
||||
// Broadcast current task update only when it changed (avoid spamming on every heartbeat)
|
||||
if payload.CurrentTask != prevTask {
|
||||
h.broadcaster.BroadcastOnly(payload.WorkspaceID, "TASK_UPDATED", map[string]interface{}{
|
||||
h.broadcaster.BroadcastOnly(payload.WorkspaceID, string(events.EventTaskUpdated), map[string]interface{}{
|
||||
"current_task": payload.CurrentTask,
|
||||
"active_tasks": payload.ActiveTasks,
|
||||
})
|
||||
@ -593,7 +593,7 @@ func (h *RegistryHandler) Heartbeat(c *gin.Context) {
|
||||
// so per-heartbeat cost is one in-memory channel send per active
|
||||
// SSE subscriber and one WS hub fan-out. At 30s heartbeat cadence
|
||||
// this is far below any noise floor on either path.
|
||||
h.broadcaster.BroadcastOnly(payload.WorkspaceID, "WORKSPACE_HEARTBEAT", map[string]interface{}{
|
||||
h.broadcaster.BroadcastOnly(payload.WorkspaceID, string(events.EventWorkspaceHeartbeat), map[string]interface{}{
|
||||
"active_tasks": payload.ActiveTasks,
|
||||
"uptime_seconds": payload.UptimeSeconds,
|
||||
})
|
||||
@ -678,7 +678,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
|
||||
if err != nil {
|
||||
log.Printf("Heartbeat: failed to mark %s degraded (wedged): %v", payload.WorkspaceID, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_DEGRADED", payload.WorkspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceDegraded), payload.WorkspaceID, map[string]interface{}{
|
||||
"runtime_state": "wedged",
|
||||
"sample_error": payload.SampleError,
|
||||
})
|
||||
@ -699,7 +699,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
|
||||
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusDegraded, payload.WorkspaceID); err != nil {
|
||||
log.Printf("Heartbeat: failed to mark %s degraded: %v", payload.WorkspaceID, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_DEGRADED", payload.WorkspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceDegraded), payload.WorkspaceID, map[string]interface{}{
|
||||
"error_rate": payload.ErrorRate,
|
||||
"sample_error": payload.SampleError,
|
||||
})
|
||||
@ -718,7 +718,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
|
||||
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusOnline, payload.WorkspaceID); err != nil {
|
||||
log.Printf("Heartbeat: failed to recover %s to online: %v", payload.WorkspaceID, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{})
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{})
|
||||
}
|
||||
|
||||
// Recovery: if workspace was offline but is now sending heartbeats, bring it back online.
|
||||
@ -728,7 +728,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
|
||||
if _, err := db.DB.ExecContext(ctx, `UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2 AND status = 'offline'`, models.StatusOnline, payload.WorkspaceID); err != nil {
|
||||
log.Printf("Heartbeat: failed to recover %s from offline: %v", payload.WorkspaceID, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{})
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{})
|
||||
}
|
||||
|
||||
// Auto-recovery: if a workspace is marked "provisioning" but is actively sending
|
||||
@ -743,7 +743,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
|
||||
} else {
|
||||
log.Printf("Heartbeat: transitioned %s from provisioning to online (heartbeat received)", payload.WorkspaceID)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{
|
||||
"recovered_from": currentStatus,
|
||||
})
|
||||
}
|
||||
@ -771,7 +771,7 @@ func (h *RegistryHandler) evaluateStatus(c *gin.Context, payload models.Heartbea
|
||||
} else {
|
||||
log.Printf("Heartbeat: transitioned %s from awaiting_agent to online (heartbeat received)", payload.WorkspaceID)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", payload.WorkspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), payload.WorkspaceID, map[string]interface{}{
|
||||
"recovered_from": currentStatus,
|
||||
})
|
||||
}
|
||||
@ -820,7 +820,7 @@ func (h *RegistryHandler) UpdateCard(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(c.Request.Context(), "AGENT_CARD_UPDATED", payload.WorkspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(c.Request.Context(), string(events.EventAgentCardUpdated), payload.WorkspaceID, map[string]interface{}{
|
||||
"agent_card": payload.AgentCard,
|
||||
})
|
||||
|
||||
|
||||
@ -112,7 +112,6 @@ func (h *WorkspaceHandler) SetCPProvisioner(cp provisioner.CPProvisionerAPI) {
|
||||
h.cpProv = cp
|
||||
}
|
||||
|
||||
|
||||
// SetEnvMutators wires a provisionhook.Registry into the handler. Plugins
|
||||
// living in separate repos register on the same Registry instance during
|
||||
// boot (see cmd/server/main.go) and main.go calls this setter once before
|
||||
@ -361,7 +360,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
|
||||
// populate the Runtime pill on the side panel immediately — without it
|
||||
// the node lives as "runtime: unknown" until something refetches the
|
||||
// workspace row (which nothing does during provisioning).
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), id, map[string]interface{}{
|
||||
"name": payload.Name,
|
||||
"tier": payload.Tier,
|
||||
"runtime": payload.Runtime,
|
||||
@ -388,7 +387,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
|
||||
if err := db.CacheURL(ctx, id, payload.URL); err != nil {
|
||||
log.Printf("External workspace: failed to cache URL for %s: %v", id, err)
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_ONLINE", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOnline), id, map[string]interface{}{
|
||||
"name": payload.Name, "external": true,
|
||||
})
|
||||
} else {
|
||||
@ -407,7 +406,7 @@ func (h *WorkspaceHandler) Create(c *gin.Context) {
|
||||
} else {
|
||||
connectionToken = tok
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_AWAITING_AGENT", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceAwaitingAgent), id, map[string]interface{}{
|
||||
"name": payload.Name, "external": true,
|
||||
})
|
||||
}
|
||||
@ -539,24 +538,24 @@ func scanWorkspaceRow(rows interface {
|
||||
}
|
||||
|
||||
ws := map[string]interface{}{
|
||||
"id": id,
|
||||
"name": name,
|
||||
"tier": tier,
|
||||
"status": status,
|
||||
"url": url,
|
||||
"parent_id": parentID,
|
||||
"active_tasks": activeTasks,
|
||||
"max_concurrent_tasks": maxConcurrentTasks,
|
||||
"last_error_rate": errorRate,
|
||||
"last_sample_error": sampleError,
|
||||
"uptime_seconds": uptimeSeconds,
|
||||
"current_task": currentTask,
|
||||
"runtime": runtime,
|
||||
"workspace_dir": nilIfEmpty(workspaceDir),
|
||||
"monthly_spend": monthlySpend,
|
||||
"x": x,
|
||||
"y": y,
|
||||
"collapsed": collapsed,
|
||||
"id": id,
|
||||
"name": name,
|
||||
"tier": tier,
|
||||
"status": status,
|
||||
"url": url,
|
||||
"parent_id": parentID,
|
||||
"active_tasks": activeTasks,
|
||||
"max_concurrent_tasks": maxConcurrentTasks,
|
||||
"last_error_rate": errorRate,
|
||||
"last_sample_error": sampleError,
|
||||
"uptime_seconds": uptimeSeconds,
|
||||
"current_task": currentTask,
|
||||
"runtime": runtime,
|
||||
"workspace_dir": nilIfEmpty(workspaceDir),
|
||||
"monthly_spend": monthlySpend,
|
||||
"x": x,
|
||||
"y": y,
|
||||
"collapsed": collapsed,
|
||||
}
|
||||
|
||||
// budget_limit: nil when no limit set, int64 otherwise
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
@ -85,7 +86,7 @@ func (h *WorkspaceHandler) BootstrapFailed(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(c.Request.Context(), "WORKSPACE_PROVISION_FAILED", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(c.Request.Context(), string(events.EventWorkspaceProvisionFailed), id, map[string]interface{}{
|
||||
"error": errMsg,
|
||||
"log_tail": tail,
|
||||
"source": "bootstrap_watcher",
|
||||
|
||||
@ -16,12 +16,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// State handles GET /workspaces/:id/state — minimal status payload for
|
||||
// remote-agent polling (Phase 30.4). Returns `{status, paused, deleted,
|
||||
// workspace_id}` so a remote agent can detect pause/resume/delete
|
||||
@ -380,7 +382,7 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
|
||||
pq.Array(allIDs)); err != nil {
|
||||
log.Printf("Delete token revocation error for %s: %v", id, err)
|
||||
}
|
||||
// #1027: cascade-disable all schedules for the deleted workspaces so
|
||||
// #1027: cascade-disable all schedules for the deleted workspaces so
|
||||
// the scheduler never fires a cron into a removed container.
|
||||
if _, err := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspace_schedules SET enabled = false, updated_at = now()
|
||||
@ -466,14 +468,14 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
|
||||
// leaving other WS clients ignorant of the cascade. The DB
|
||||
// row is already 'removed' so it's recoverable, but the
|
||||
// inconsistency is avoidable.
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, "WORKSPACE_REMOVED", descID, map[string]interface{}{})
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), descID, map[string]interface{}{})
|
||||
}
|
||||
|
||||
stopAndRemove(id)
|
||||
db.ClearWorkspaceKeys(cleanupCtx, id)
|
||||
restartStates.Delete(id) // #2269: same as descendants above
|
||||
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, "WORKSPACE_REMOVED", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(cleanupCtx, string(events.EventWorkspaceRemoved), id, map[string]interface{}{
|
||||
"cascade_deleted": len(descendantIDs),
|
||||
})
|
||||
|
||||
|
||||
@ -41,6 +41,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/wsauth"
|
||||
@ -212,7 +213,7 @@ func (h *WorkspaceHandler) markProvisionFailed(ctx context.Context, workspaceID,
|
||||
} else if _, hasErr := extra["error"]; !hasErr {
|
||||
extra["error"] = msg
|
||||
}
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", workspaceID, extra)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisionFailed), workspaceID, extra)
|
||||
if _, dbErr := db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $3, last_sample_error = $2, updated_at = now() WHERE id = $1`,
|
||||
workspaceID, msg, models.StatusFailed); dbErr != nil {
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provlog"
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -147,7 +148,7 @@ func (h *WorkspaceHandler) Restart(c *gin.Context) {
|
||||
// Reset to provisioning
|
||||
db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusProvisioning, id)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), id, map[string]interface{}{
|
||||
"name": wsName,
|
||||
"tier": tier,
|
||||
"runtime": containerRuntime,
|
||||
@ -341,7 +342,7 @@ func (h *WorkspaceHandler) HibernateWorkspace(ctx context.Context, workspaceID s
|
||||
}
|
||||
|
||||
db.ClearWorkspaceKeys(ctx, workspaceID)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_HIBERNATED", workspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceHibernated), workspaceID, map[string]interface{}{
|
||||
"name": wsName,
|
||||
"tier": tier,
|
||||
})
|
||||
@ -552,7 +553,7 @@ func (h *WorkspaceHandler) runRestartCycle(workspaceID string) {
|
||||
|
||||
db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusProvisioning, workspaceID)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", workspaceID, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), workspaceID, map[string]interface{}{
|
||||
"name": wsName, "tier": tier, "runtime": dbRuntime,
|
||||
})
|
||||
|
||||
@ -640,7 +641,7 @@ func (h *WorkspaceHandler) Pause(c *gin.Context) {
|
||||
db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, url = '', updated_at = now() WHERE id = $2`, models.StatusPaused, ws.id)
|
||||
db.ClearWorkspaceKeys(ctx, ws.id)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PAUSED", ws.id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspacePaused), ws.id, map[string]interface{}{
|
||||
"name": ws.name,
|
||||
})
|
||||
}
|
||||
@ -709,7 +710,7 @@ func (h *WorkspaceHandler) Resume(c *gin.Context) {
|
||||
for _, ws := range toResume {
|
||||
db.DB.ExecContext(ctx,
|
||||
`UPDATE workspaces SET status = $1, updated_at = now() WHERE id = $2`, models.StatusProvisioning, ws.id)
|
||||
h.broadcaster.RecordAndBroadcast(ctx, "WORKSPACE_PROVISIONING", ws.id, map[string]interface{}{
|
||||
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisioning), ws.id, map[string]interface{}{
|
||||
"name": ws.name, "tier": ws.tier, "runtime": ws.runtime,
|
||||
})
|
||||
payload := models.CreateWorkspacePayload{Name: ws.name, Tier: ws.tier, Runtime: ws.runtime}
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
)
|
||||
|
||||
@ -197,7 +198,7 @@ func sweepStuckProvisioning(ctx context.Context, emitter ProvisionTimeoutEmitter
|
||||
// A separate event type was considered but the UI reaction is
|
||||
// identical either way — operators who need to distinguish can
|
||||
// tell from the `source` payload field.
|
||||
if emitErr := emitter.RecordAndBroadcast(ctx, "WORKSPACE_PROVISION_FAILED", c.id, map[string]interface{}{
|
||||
if emitErr := emitter.RecordAndBroadcast(ctx, string(events.EventWorkspaceProvisionFailed), c.id, map[string]interface{}{
|
||||
"error": msg,
|
||||
"timeout_secs": timeoutSec,
|
||||
"runtime": c.runtime,
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
cronlib "github.com/robfig/cron/v3"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/metrics"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/supervised"
|
||||
)
|
||||
@ -541,7 +542,7 @@ func (s *Scheduler) fireSchedule(ctx context.Context, sched scheduleRow) {
|
||||
insertCancel()
|
||||
|
||||
if s.broadcaster != nil {
|
||||
s.broadcaster.RecordAndBroadcast(ctx, "CRON_EXECUTED", sched.WorkspaceID, map[string]interface{}{
|
||||
s.broadcaster.RecordAndBroadcast(ctx, string(events.EventCronExecuted), sched.WorkspaceID, map[string]interface{}{
|
||||
"schedule_id": sched.ID,
|
||||
"schedule_name": sched.Name,
|
||||
"status": lastStatus,
|
||||
@ -618,7 +619,7 @@ func (s *Scheduler) recordSkipped(ctx context.Context, sched scheduleRow, active
|
||||
skipInsCancel()
|
||||
|
||||
if s.broadcaster != nil {
|
||||
_ = s.broadcaster.RecordAndBroadcast(ctx, "CRON_SKIPPED", sched.WorkspaceID, map[string]interface{}{
|
||||
_ = s.broadcaster.RecordAndBroadcast(ctx, string(events.EventCronSkipped), sched.WorkspaceID, map[string]interface{}{
|
||||
"schedule_id": sched.ID,
|
||||
"schedule_name": sched.Name,
|
||||
"reason": reason,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user