forked from molecule-ai/molecule-core
Merge remote-tracking branch 'refs/remotes/origin/staging' into sync/staging-to-main-2026-04-24
This commit is contained in:
commit
7807bf8dc4
@ -34,7 +34,7 @@ PLUGINS_DIR= # Path to plugins/ directory (default: /plugins i
|
||||
# MOLECULE_MCP_ALLOW_SEND_MESSAGE= # Set to "true" to include send_message_to_user in the MCP bridge tool list (issue #810). Excluded by default to prevent unintended WebSocket pushes from CLI sessions.
|
||||
# MOLECULE_MCP_URL=http://localhost:8080 # Platform URL for opencode MCP config (opencode.json). Same as PLATFORM_URL; separate var so opencode configs can reference it without ambiguity.
|
||||
# WORKSPACE_DIR= # Optional global host path bind-mounted to /workspace in every container. Per-workspace workspace_dir column overrides this; if neither is set each workspace gets an isolated Docker named volume.
|
||||
# MOLECULE_ENV=development # Environment label (development/staging/production). Used for log tagging and conditional behaviour.
|
||||
MOLECULE_ENV=development # Environment label (development/staging/production). Used for log tagging and for the AdminAuth dev-mode escape hatch (lets the Canvas dashboard keep working after the first workspace is created, when ADMIN_TOKEN is unset). SaaS deployments MUST set MOLECULE_ENV=production.
|
||||
# MOLECULE_ENABLE_TEST_TOKENS= # Set to 1 to expose GET /admin/workspaces/:id/test-token (mints a fresh bearer token for E2E scripts). The route is auto-enabled when MOLECULE_ENV != production; this flag is the explicit override. Leave unset/0 in prod — the route 404s unless enabled.
|
||||
# MOLECULE_ORG_ID= # SaaS only: org UUID set by control plane on tenant machines. When set, workspace provisioning auto-routes through the control plane API instead of Docker.
|
||||
# CP_PROVISION_URL= # Override control plane URL for workspace provisioning (default: https://api.moleculesai.app). Only needed for testing against a non-production control plane.
|
||||
|
||||
95
.github/workflows/block-internal-paths.yml
vendored
Normal file
95
.github/workflows/block-internal-paths.yml
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
name: Block internal-flavored paths
|
||||
|
||||
# Hard CI gate. Internal content (positioning, competitive briefs, sales
|
||||
# playbooks, PMM/press drip, draft campaigns) lives in Molecule-AI/internal —
|
||||
# this public monorepo must never re-acquire those paths. CEO directive
|
||||
# 2026-04-23 after a fleet-wide audit found 79 internal files leaked here.
|
||||
#
|
||||
# Failure mode without this gate: agents (PMM, Research, DevRel, Sales) drop
|
||||
# briefs into the easiest path their cwd resolves to (root /research,
|
||||
# /marketing, /docs/marketing) and gitignore alone won't catch a `git add -f`
|
||||
# or a stale gitignore line. This workflow is the mechanical backstop.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
push:
|
||||
branches: [main, staging]
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Block forbidden paths
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # need previous commit to diff against on push events
|
||||
|
||||
- name: Refuse if forbidden paths appear
|
||||
run: |
|
||||
# Paths that must NEVER live in the public monorepo. Add to this
|
||||
# list narrowly — broader patterns belong in .gitignore so day-to-day
|
||||
# docs work isn't accidentally blocked.
|
||||
FORBIDDEN_PATTERNS=(
|
||||
"^research/"
|
||||
"^marketing/"
|
||||
"^docs/marketing/"
|
||||
"^comment-[0-9]+\.json$"
|
||||
"^test-pmm.*\.(txt|md)$"
|
||||
"^tick-reflections.*\.(txt|md)$"
|
||||
".*-temp\.(md|txt)$"
|
||||
)
|
||||
|
||||
# Determine the diff base.
|
||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||
BASE="${{ github.event.pull_request.base.sha }}"
|
||||
HEAD="${{ github.event.pull_request.head.sha }}"
|
||||
else
|
||||
BASE="${{ github.event.before }}"
|
||||
HEAD="${{ github.event.after }}"
|
||||
fi
|
||||
|
||||
# Files added or modified in this change.
|
||||
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$'; then
|
||||
# New branch / no previous SHA — check entire tree.
|
||||
CHANGED=$(git ls-tree -r --name-only HEAD)
|
||||
else
|
||||
CHANGED=$(git diff --name-only --diff-filter=AM "$BASE" "$HEAD")
|
||||
fi
|
||||
|
||||
if [ -z "$CHANGED" ]; then
|
||||
echo "No changed files to inspect."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
OFFENDING=""
|
||||
for path in $CHANGED; do
|
||||
for pattern in "${FORBIDDEN_PATTERNS[@]}"; do
|
||||
if echo "$path" | grep -qE "$pattern"; then
|
||||
OFFENDING="${OFFENDING}${path} (matched: ${pattern})\n"
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [ -n "$OFFENDING" ]; then
|
||||
echo "::error::Forbidden internal-flavored paths detected:"
|
||||
printf "$OFFENDING"
|
||||
echo ""
|
||||
echo "These paths belong in Molecule-AI/internal, not this public repo."
|
||||
echo "See docs/internal-content-policy.md for canonical locations."
|
||||
echo ""
|
||||
echo "If your file is genuinely public-facing (e.g. a blog post"
|
||||
echo "ready to ship), use one of these alternatives instead:"
|
||||
echo " • Public-bound blog posts: docs/blog/<slug>.md"
|
||||
echo " • Public-bound tutorials: docs/tutorials/<slug>.md"
|
||||
echo " • Public devrel content: docs/devrel/<slug>.md"
|
||||
echo ""
|
||||
echo "If you legitimately need to add a new top-level path that"
|
||||
echo "happens to match a forbidden pattern, edit"
|
||||
echo ".github/workflows/block-internal-paths.yml and update the"
|
||||
echo "FORBIDDEN_PATTERNS list with reviewer signoff."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ No forbidden paths in this change."
|
||||
20
.gitignore
vendored
20
.gitignore
vendored
@ -120,9 +120,29 @@ backups/
|
||||
# org-templates live in Molecule-AI/molecule-ai-org-template-* repos
|
||||
# (including molecule-dev — no checkin exception).
|
||||
# plugins live in Molecule-AI/molecule-ai-plugin-* repos.
|
||||
# All three directories are populated by scripts/clone-manifest.sh
|
||||
# (now auto-run by infra/scripts/setup.sh). The in-tree exception for
|
||||
# molecule-dev was removed because the checked-in copy drifted from
|
||||
# the standalone repo and shipped with broken !include references to
|
||||
# role files that never existed in the snapshot.
|
||||
/org-templates/
|
||||
/plugins/
|
||||
/workspace-configs-templates/
|
||||
# Cloned by publish-workspace-server-image.yml so the Dockerfile's
|
||||
# replace-directive path resolves. Lives in its own repo.
|
||||
/molecule-ai-plugin-github-app-auth/
|
||||
|
||||
# Internal-flavored content lives in Molecule-AI/internal — NEVER in this
|
||||
# public monorepo. Migrated 2026-04-23 (CEO directive). The CI workflow
|
||||
# .github/workflows/block-internal-paths.yml enforces this; this gitignore
|
||||
# is the second line of defence so accidental local writes don't reach a
|
||||
# commit. See docs/internal-content-policy.md for the full rationale.
|
||||
/research/
|
||||
/marketing/
|
||||
/docs/marketing/
|
||||
# Common temp/scratch patterns agents have produced
|
||||
/comment-*.json
|
||||
*-temp.md
|
||||
*-temp.txt
|
||||
/test-pmm-*.txt
|
||||
/tick-reflections-*.md
|
||||
|
||||
@ -12,6 +12,11 @@ development workflow, conventions, and how to get your changes merged.
|
||||
- **Python 3.11+** — workspace runtime
|
||||
- **Docker** — infrastructure services (Postgres, Redis)
|
||||
- **Git** — with hooks path set to `.githooks`
|
||||
- **jq** — parses `manifest.json` during `setup.sh` to clone the
|
||||
template/plugin registry. Install via `brew install jq` (macOS) or
|
||||
`apt install jq` (Debian). Without it, setup.sh prints a note and
|
||||
leaves the registry dirs empty (recoverable by installing jq and
|
||||
re-running).
|
||||
|
||||
### Setup
|
||||
|
||||
|
||||
@ -261,6 +261,12 @@ cp .env.example .env
|
||||
# and Temporal (:7233 gRPC, :8233 UI) on the shared
|
||||
# `molecule-monorepo-net` Docker network. Temporal runs with
|
||||
# no auth on localhost — dev-only; production must gate it.
|
||||
#
|
||||
# Also populates the template/plugin registry by cloning every repo
|
||||
# listed in manifest.json into workspace-configs-templates/,
|
||||
# org-templates/, and plugins/. Requires jq — install via
|
||||
# `brew install jq` (macOS) or `apt install jq` (Debian). Idempotent:
|
||||
# re-runs skip any target dir that's already populated.
|
||||
|
||||
cd workspace-server
|
||||
go run ./cmd/server # applies pending migrations on first boot
|
||||
|
||||
@ -260,6 +260,11 @@ cp .env.example .env
|
||||
# 以及 Temporal (:7233 gRPC, :8233 UI),全部挂在共享的
|
||||
# `molecule-monorepo-net` Docker 网络上。Temporal 默认无鉴权,
|
||||
# 仅用于本地开发;生产环境必须加 mTLS / API Key。
|
||||
#
|
||||
# 同时会根据 manifest.json 拉取所有模板/插件仓库到
|
||||
# workspace-configs-templates/、org-templates/、plugins/ 三个目录。
|
||||
# 需要安装 jq:`brew install jq`(macOS)或 `apt install jq`(Debian)。
|
||||
# 脚本幂等:已经存在内容的目录会被跳过,可以安全重跑。
|
||||
|
||||
cd workspace-server
|
||||
go run ./cmd/server # 首次启动会自动跑 schema_migrations 里未应用的迁移
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useState } from "react";
|
||||
import { isSaaSTenant } from "@/lib/tenant";
|
||||
|
||||
const STORAGE_KEY = "molecule_cookie_consent";
|
||||
|
||||
@ -74,7 +75,18 @@ export function CookieConsent() {
|
||||
// Read persisted decision on mount. useState's initialState can't run
|
||||
// on first render because localStorage is SSR-unsafe — defer to
|
||||
// useEffect so the initial HTML is identical to the server snapshot.
|
||||
//
|
||||
// The banner is SaaS-only: it carries a link to the hosted
|
||||
// privacy policy (moleculesai.app/legal/privacy) and presumes
|
||||
// GDPR/ePrivacy obligations that only apply to the hosted offering.
|
||||
// Self-hosted / local-dev / Vercel-preview hosts get no banner —
|
||||
// matches the `isSaaSTenant()` convention used by AuthGate and
|
||||
// the tier picker.
|
||||
useEffect(() => {
|
||||
if (!isSaaSTenant()) {
|
||||
setVisible(false);
|
||||
return;
|
||||
}
|
||||
setVisible(getStoredConsent() === null);
|
||||
}, []);
|
||||
|
||||
|
||||
@ -89,7 +89,13 @@ export function CreateWorkspaceButton() {
|
||||
],
|
||||
[isSaaS],
|
||||
);
|
||||
const defaultTier = isSaaS ? 4 : 1;
|
||||
// T3 ("Privileged") is the self-hosted default — gives agents the
|
||||
// read_write workspace mount + Docker daemon access most templates
|
||||
// expect to do real work. T1 sandboxed and T2 standard are kept as
|
||||
// explicit opt-ins for low-trust agents. SaaS still defaults to T4
|
||||
// because every SaaS workspace gets its own EC2 (sibling VMs, no
|
||||
// shared blast radius — see isSaaSTenant() / tier picker hide logic).
|
||||
const defaultTier = isSaaS ? 4 : 3;
|
||||
const [tier, setTier] = useState(defaultTier);
|
||||
|
||||
// Refs for roving tabIndex on the tier radio group (WCAG 2.1 arrow-key nav)
|
||||
|
||||
@ -1,12 +1,18 @@
|
||||
"use client";
|
||||
|
||||
import { STATUS_CONFIG } from "@/lib/design-tokens";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
|
||||
const LEGEND_STATUSES = ["online", "provisioning", "degraded", "failed", "paused", "offline"] as const;
|
||||
|
||||
export function Legend() {
|
||||
// TemplatePalette (when open) is fixed top-0 left-0 w-[280px] — the
|
||||
// default bottom-6 left-4 position of this legend would sit under it.
|
||||
// Shift past the 280 px palette + a 16 px gap when the palette is open.
|
||||
const paletteOpen = useCanvasStore((s) => s.templatePaletteOpen);
|
||||
const leftClass = paletteOpen ? "left-[296px]" : "left-4";
|
||||
return (
|
||||
<div className="fixed bottom-6 left-4 z-30 bg-zinc-900/95 border border-zinc-700/50 rounded-xl px-4 py-3 shadow-xl shadow-black/30 backdrop-blur-sm max-w-[280px]">
|
||||
<div className={`fixed bottom-6 ${leftClass} z-30 bg-zinc-900/95 border border-zinc-700/50 rounded-xl px-4 py-3 shadow-xl shadow-black/30 backdrop-blur-sm max-w-[280px] transition-[left] duration-200`}>
|
||||
<div className="text-[11px] font-semibold text-zinc-400 uppercase tracking-wider mb-2">Legend</div>
|
||||
|
||||
{/* Status */}
|
||||
|
||||
@ -1,33 +1,374 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useEffect, useCallback, useRef } from "react";
|
||||
import { useState, useEffect, useCallback, useRef, useMemo } from "react";
|
||||
import { api } from "@/lib/api";
|
||||
import { getKeyLabel } from "@/lib/deploy-preflight";
|
||||
import { getKeyLabel, type ProviderChoice } from "@/lib/deploy-preflight";
|
||||
|
||||
interface Props {
|
||||
open: boolean;
|
||||
/** Flat list of every candidate env var. Used as the fallback input
|
||||
* set when `providers` is empty (or length 1). */
|
||||
missingKeys: string[];
|
||||
/** Grouped provider options derived from the template's models[] /
|
||||
* required_env. When length ≥ 2 the modal shows a radio picker. */
|
||||
providers?: ProviderChoice[];
|
||||
/** Runtime slug — used only for the "The <runtime> runtime …"
|
||||
* headline; behavior is driven by providers/missingKeys. */
|
||||
runtime: string;
|
||||
/** Called when user adds all keys and wants to proceed with deploy. */
|
||||
/** Called when all required keys for the chosen provider are saved. */
|
||||
onKeysAdded: () => void;
|
||||
/** Called when user cancels the deploy. */
|
||||
/** Called when the user cancels the deploy. */
|
||||
onCancel: () => void;
|
||||
/** Called when user wants to open the Settings Panel (Config tab → Secrets). */
|
||||
/** Optional — open the Settings Panel (Config tab → Secrets). */
|
||||
onOpenSettings?: () => void;
|
||||
/** Optional workspace ID — if provided, secrets are saved at workspace scope. */
|
||||
/** If provided, secrets save at workspace scope instead of global. */
|
||||
workspaceId?: string;
|
||||
}
|
||||
|
||||
interface KeyEntry {
|
||||
key: string;
|
||||
label: string;
|
||||
value: string;
|
||||
saved: boolean;
|
||||
saving: boolean;
|
||||
error: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* MissingKeysModal
|
||||
* ----------------
|
||||
* Dispatches between two modes based on what the template declares:
|
||||
*
|
||||
* 1. PROVIDER PICKER — when the preflight returned ≥2 `providers` (e.g.
|
||||
* a Hermes template whose models[].required_env enumerate OpenRouter,
|
||||
* Anthropic, Nous-native, etc.). Radio list of options, saving the
|
||||
* chosen option's env vars satisfies the deploy.
|
||||
*
|
||||
* 2. ALL-KEYS — every entry in `missingKeys` rendered as its own input,
|
||||
* all must save before Deploy. Used when the template has a single
|
||||
* provider option or no declared alternatives.
|
||||
*
|
||||
* The modal never hardcodes per-runtime provider lists; the upstream
|
||||
* preflight derives that from the template config.yaml.
|
||||
*/
|
||||
export function MissingKeysModal({
|
||||
open,
|
||||
missingKeys,
|
||||
providers,
|
||||
runtime,
|
||||
onKeysAdded,
|
||||
onCancel,
|
||||
onOpenSettings,
|
||||
workspaceId,
|
||||
}: Props) {
|
||||
const pickerProviders = providers ?? [];
|
||||
const pickerMode = pickerProviders.length > 1;
|
||||
|
||||
if (pickerMode) {
|
||||
return (
|
||||
<ProviderPickerModal
|
||||
open={open}
|
||||
providers={pickerProviders}
|
||||
runtime={runtime}
|
||||
onKeysAdded={onKeysAdded}
|
||||
onCancel={onCancel}
|
||||
onOpenSettings={onOpenSettings}
|
||||
workspaceId={workspaceId}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// Prefer the (single) provider's envVars over the raw missingKeys when
|
||||
// we have one — the provider list is already de-duped and ordered.
|
||||
const keys =
|
||||
pickerProviders.length === 1 ? pickerProviders[0].envVars : missingKeys;
|
||||
|
||||
return (
|
||||
<AllKeysModal
|
||||
open={open}
|
||||
missingKeys={keys}
|
||||
runtime={runtime}
|
||||
onKeysAdded={onKeysAdded}
|
||||
onCancel={onCancel}
|
||||
onOpenSettings={onOpenSettings}
|
||||
workspaceId={workspaceId}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Provider-picker mode — choose one option, save its env var(s), deploy.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
function ProviderPickerModal({
|
||||
open,
|
||||
providers,
|
||||
runtime,
|
||||
onKeysAdded,
|
||||
onCancel,
|
||||
onOpenSettings,
|
||||
workspaceId,
|
||||
}: {
|
||||
open: boolean;
|
||||
providers: ProviderChoice[];
|
||||
runtime: string;
|
||||
onKeysAdded: () => void;
|
||||
onCancel: () => void;
|
||||
onOpenSettings?: () => void;
|
||||
workspaceId?: string;
|
||||
}) {
|
||||
const [selectedId, setSelectedId] = useState(providers[0].id);
|
||||
const [entries, setEntries] = useState<KeyEntry[]>([]);
|
||||
const firstInputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
const selected = useMemo(
|
||||
() => providers.find((p) => p.id === selectedId) ?? providers[0],
|
||||
[providers, selectedId],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
setSelectedId(providers[0].id);
|
||||
}, [open, providers]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
setEntries(
|
||||
selected.envVars.map((key) => ({
|
||||
key,
|
||||
value: "",
|
||||
saved: false,
|
||||
saving: false,
|
||||
error: null,
|
||||
})),
|
||||
);
|
||||
}, [open, selected]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
const raf = requestAnimationFrame(() => firstInputRef.current?.focus());
|
||||
return () => cancelAnimationFrame(raf);
|
||||
}, [open, selectedId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
const handler = (e: KeyboardEvent) => {
|
||||
if (e.key === "Escape") onCancel();
|
||||
};
|
||||
window.addEventListener("keydown", handler);
|
||||
return () => window.removeEventListener("keydown", handler);
|
||||
}, [open, onCancel]);
|
||||
|
||||
const updateEntry = useCallback(
|
||||
(index: number, updates: Partial<KeyEntry>) => {
|
||||
setEntries((prev) =>
|
||||
prev.map((e, i) => (i === index ? { ...e, ...updates } : e)),
|
||||
);
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
const handleSaveKey = useCallback(
|
||||
async (index: number) => {
|
||||
const entry = entries[index];
|
||||
if (!entry.value.trim()) return;
|
||||
updateEntry(index, { saving: true, error: null });
|
||||
try {
|
||||
if (workspaceId) {
|
||||
await api.put(`/workspaces/${workspaceId}/secrets`, {
|
||||
key: entry.key,
|
||||
value: entry.value.trim(),
|
||||
});
|
||||
} else {
|
||||
await api.put("/settings/secrets", {
|
||||
key: entry.key,
|
||||
value: entry.value.trim(),
|
||||
});
|
||||
}
|
||||
updateEntry(index, { saved: true, saving: false });
|
||||
} catch (e) {
|
||||
updateEntry(index, {
|
||||
saving: false,
|
||||
error: e instanceof Error ? e.message : "Failed to save",
|
||||
});
|
||||
}
|
||||
},
|
||||
[entries, updateEntry, workspaceId],
|
||||
);
|
||||
|
||||
if (!open) return null;
|
||||
|
||||
const allSaved = entries.length > 0 && entries.every((e) => e.saved);
|
||||
const anySaving = entries.some((e) => e.saving);
|
||||
const runtimeLabel = runtime
|
||||
.replace(/[-_]/g, " ")
|
||||
.replace(/\b\w/g, (c) => c.toUpperCase());
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 z-50 flex items-center justify-center">
|
||||
<div
|
||||
aria-hidden="true"
|
||||
className="absolute inset-0 bg-black/70 backdrop-blur-sm"
|
||||
onClick={onCancel}
|
||||
/>
|
||||
|
||||
<div
|
||||
role="dialog"
|
||||
aria-modal="true"
|
||||
aria-labelledby="missing-keys-title"
|
||||
className="relative bg-zinc-900 border border-zinc-700 rounded-xl shadow-2xl shadow-black/50 max-w-[480px] w-full mx-4 overflow-hidden"
|
||||
>
|
||||
<div className="px-5 py-4 border-b border-zinc-800">
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<div
|
||||
className="w-5 h-5 rounded-md bg-amber-600/20 border border-amber-500/30 flex items-center justify-center"
|
||||
aria-hidden="true"
|
||||
>
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" aria-hidden="true">
|
||||
<path d="M6 1L11 10H1L6 1Z" stroke="#fbbf24" strokeWidth="1.2" strokeLinejoin="round" />
|
||||
<path d="M6 5V7" stroke="#fbbf24" strokeWidth="1.2" strokeLinecap="round" />
|
||||
<circle cx="6" cy="8.5" r="0.5" fill="#fbbf24" />
|
||||
</svg>
|
||||
</div>
|
||||
<h3 id="missing-keys-title" className="text-sm font-semibold text-zinc-100">
|
||||
Missing API Keys
|
||||
</h3>
|
||||
</div>
|
||||
<p className="text-[12px] text-zinc-400 leading-relaxed">
|
||||
The <span className="text-amber-300 font-medium">{runtimeLabel}</span>{" "}
|
||||
runtime supports multiple providers. Pick one and paste its API key.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="px-5 py-4 space-y-3">
|
||||
<fieldset className="space-y-1.5">
|
||||
<legend className="text-[10px] uppercase tracking-wide text-zinc-500 font-semibold mb-1.5">
|
||||
Provider
|
||||
</legend>
|
||||
{providers.map((p) => (
|
||||
<label
|
||||
key={p.id}
|
||||
className={`flex items-start gap-2.5 rounded-lg border px-3 py-2 cursor-pointer transition-colors ${
|
||||
selectedId === p.id
|
||||
? "bg-blue-600/15 border-blue-500/50"
|
||||
: "bg-zinc-800/40 border-zinc-700/50 hover:border-zinc-600"
|
||||
}`}
|
||||
>
|
||||
<input
|
||||
type="radio"
|
||||
name="provider"
|
||||
value={p.id}
|
||||
checked={selectedId === p.id}
|
||||
onChange={() => setSelectedId(p.id)}
|
||||
className="mt-0.5 accent-blue-500"
|
||||
/>
|
||||
<div className="min-w-0 flex-1">
|
||||
<div className="text-[12px] text-zinc-100 font-medium">{p.label}</div>
|
||||
<div className="text-[10px] font-mono text-zinc-500">
|
||||
{p.envVars.join(", ")}
|
||||
</div>
|
||||
{p.note && (
|
||||
<div className="text-[10px] text-zinc-500 mt-1 leading-relaxed">
|
||||
{p.note}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</label>
|
||||
))}
|
||||
</fieldset>
|
||||
|
||||
<div className="space-y-2">
|
||||
{entries.map((entry, index) => (
|
||||
<div
|
||||
key={entry.key}
|
||||
className="bg-zinc-800/50 rounded-lg px-3 py-2.5 border border-zinc-700/50"
|
||||
>
|
||||
<div className="flex items-center justify-between mb-1.5">
|
||||
<div>
|
||||
<div className="text-[11px] text-zinc-300 font-medium">
|
||||
{getKeyLabel(entry.key)}
|
||||
</div>
|
||||
<div className="text-[9px] font-mono text-zinc-500">{entry.key}</div>
|
||||
</div>
|
||||
{entry.saved && (
|
||||
<span className="text-[9px] text-emerald-400 bg-emerald-900/30 px-1.5 py-0.5 rounded flex items-center gap-1">
|
||||
<svg width="8" height="8" viewBox="0 0 8 8" fill="none" aria-hidden="true">
|
||||
<path d="M1.5 4L3.5 6L6.5 2" stroke="currentColor" strokeWidth="1.2" strokeLinecap="round" strokeLinejoin="round" />
|
||||
</svg>
|
||||
Saved
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{!entry.saved && (
|
||||
<div className="flex gap-2 mt-2">
|
||||
<input
|
||||
value={entry.value}
|
||||
onChange={(e) => updateEntry(index, { value: e.target.value.trimStart() })}
|
||||
placeholder={entry.key.includes("API_KEY") ? "sk-..." : "Enter value"}
|
||||
type="password"
|
||||
ref={index === 0 ? firstInputRef : undefined}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter" && entry.value.trim()) {
|
||||
handleSaveKey(index);
|
||||
}
|
||||
}}
|
||||
className="flex-1 bg-zinc-900 border border-zinc-600 rounded px-2 py-1.5 text-[11px] text-zinc-100 font-mono focus:outline-none focus:border-blue-500 focus:ring-1 focus:ring-blue-500/20 transition-colors"
|
||||
/>
|
||||
<button
|
||||
onClick={() => handleSaveKey(index)}
|
||||
disabled={!entry.value.trim() || entry.saving}
|
||||
className="px-3 py-1.5 bg-blue-600 hover:bg-blue-500 text-[11px] rounded text-white disabled:opacity-30 transition-colors shrink-0"
|
||||
>
|
||||
{entry.saving ? "..." : "Save"}
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{entry.error && (
|
||||
<div className="mt-1.5 text-[10px] text-red-400">{entry.error}</div>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="px-5 py-3 border-t border-zinc-800 bg-zinc-950/50 flex items-center justify-between gap-2">
|
||||
<div>
|
||||
{onOpenSettings && (
|
||||
<button
|
||||
onClick={onOpenSettings}
|
||||
className="text-[11px] text-blue-400 hover:text-blue-300 transition-colors"
|
||||
>
|
||||
Open Settings Panel
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<button
|
||||
onClick={onCancel}
|
||||
className="px-3.5 py-1.5 text-[12px] text-zinc-400 hover:text-zinc-200 bg-zinc-800 hover:bg-zinc-700 border border-zinc-700 rounded-lg transition-colors"
|
||||
>
|
||||
Cancel Deploy
|
||||
</button>
|
||||
<button
|
||||
onClick={onKeysAdded}
|
||||
disabled={!allSaved || anySaving}
|
||||
className="px-3.5 py-1.5 text-[12px] bg-blue-600 hover:bg-blue-500 text-white rounded-lg transition-colors disabled:opacity-40"
|
||||
>
|
||||
{allSaved ? "Deploy" : entries.length > 1 ? "Add Keys" : "Add Key"}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// All-keys mode — every missingKey rendered as its own input, all required.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
function AllKeysModal({
|
||||
open,
|
||||
missingKeys,
|
||||
runtime,
|
||||
@ -35,18 +376,24 @@ export function MissingKeysModal({
|
||||
onCancel,
|
||||
onOpenSettings,
|
||||
workspaceId,
|
||||
}: Props) {
|
||||
}: {
|
||||
open: boolean;
|
||||
missingKeys: string[];
|
||||
runtime: string;
|
||||
onKeysAdded: () => void;
|
||||
onCancel: () => void;
|
||||
onOpenSettings?: () => void;
|
||||
workspaceId?: string;
|
||||
}) {
|
||||
const [entries, setEntries] = useState<KeyEntry[]>([]);
|
||||
const [globalError, setGlobalError] = useState<string | null>(null);
|
||||
const firstInputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
// Initialize entries when modal opens or missingKeys change
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
setEntries(
|
||||
missingKeys.map((key) => ({
|
||||
key,
|
||||
label: getKeyLabel(key),
|
||||
value: "",
|
||||
saved: false,
|
||||
saving: false,
|
||||
@ -56,14 +403,12 @@ export function MissingKeysModal({
|
||||
setGlobalError(null);
|
||||
}, [open, missingKeys]);
|
||||
|
||||
// Focus first input when modal opens
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
const raf = requestAnimationFrame(() => {
|
||||
firstInputRef.current?.focus();
|
||||
});
|
||||
const raf = requestAnimationFrame(() => firstInputRef.current?.focus());
|
||||
return () => cancelAnimationFrame(raf);
|
||||
}, [open]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!open) return;
|
||||
const handler = (e: KeyboardEvent) => {
|
||||
@ -90,7 +435,6 @@ export function MissingKeysModal({
|
||||
updateEntry(index, { saving: true, error: null });
|
||||
|
||||
try {
|
||||
// Save to global scope by default (available to all workspaces)
|
||||
if (workspaceId) {
|
||||
await api.put(`/workspaces/${workspaceId}/secrets`, {
|
||||
key: entry.key,
|
||||
@ -129,37 +473,34 @@ export function MissingKeysModal({
|
||||
|
||||
if (!open) return null;
|
||||
|
||||
const allSaved = entries.every((e) => e.saved);
|
||||
const allSaved = entries.length > 0 && entries.every((e) => e.saved);
|
||||
const anySaving = entries.some((e) => e.saving);
|
||||
const runtimeLabel = runtime.replace(/[-_]/g, " ").replace(/\b\w/g, (c) => c.toUpperCase());
|
||||
const runtimeLabel = runtime
|
||||
.replace(/[-_]/g, " ")
|
||||
.replace(/\b\w/g, (c) => c.toUpperCase());
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 z-50 flex items-center justify-center">
|
||||
{/* Backdrop */}
|
||||
<div
|
||||
aria-hidden="true"
|
||||
className="absolute inset-0 bg-black/70 backdrop-blur-sm"
|
||||
onClick={onCancel}
|
||||
/>
|
||||
|
||||
{/* Dialog */}
|
||||
<div
|
||||
role="dialog"
|
||||
aria-modal="true"
|
||||
aria-labelledby="missing-keys-title"
|
||||
className="relative bg-zinc-900 border border-zinc-700 rounded-xl shadow-2xl shadow-black/50 max-w-[440px] w-full mx-4 overflow-hidden"
|
||||
>
|
||||
{/* Header */}
|
||||
<div className="px-5 py-4 border-b border-zinc-800">
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<div className="w-5 h-5 rounded-md bg-amber-600/20 border border-amber-500/30 flex items-center justify-center" aria-hidden="true">
|
||||
<div
|
||||
className="w-5 h-5 rounded-md bg-amber-600/20 border border-amber-500/30 flex items-center justify-center"
|
||||
aria-hidden="true"
|
||||
>
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" aria-hidden="true">
|
||||
<path
|
||||
d="M6 1L11 10H1L6 1Z"
|
||||
stroke="#fbbf24"
|
||||
strokeWidth="1.2"
|
||||
strokeLinejoin="round"
|
||||
/>
|
||||
<path d="M6 1L11 10H1L6 1Z" stroke="#fbbf24" strokeWidth="1.2" strokeLinejoin="round" />
|
||||
<path d="M6 5V7" stroke="#fbbf24" strokeWidth="1.2" strokeLinecap="round" />
|
||||
<circle cx="6" cy="8.5" r="0.5" fill="#fbbf24" />
|
||||
</svg>
|
||||
@ -169,12 +510,11 @@ export function MissingKeysModal({
|
||||
</h3>
|
||||
</div>
|
||||
<p className="text-[12px] text-zinc-400 leading-relaxed">
|
||||
The <span className="text-amber-300 font-medium">{runtimeLabel}</span> runtime
|
||||
requires the following keys to be configured before deploying.
|
||||
The <span className="text-amber-300 font-medium">{runtimeLabel}</span>{" "}
|
||||
runtime requires the following keys to be configured before deploying.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Body — key list */}
|
||||
<div className="px-5 py-4 space-y-3 max-h-[50vh] overflow-y-auto">
|
||||
{entries.map((entry, index) => (
|
||||
<div
|
||||
@ -184,11 +524,9 @@ export function MissingKeysModal({
|
||||
<div className="flex items-center justify-between mb-1">
|
||||
<div>
|
||||
<div className="text-[11px] text-zinc-300 font-medium">
|
||||
{entry.label}
|
||||
</div>
|
||||
<div className="text-[9px] font-mono text-zinc-500">
|
||||
{entry.key}
|
||||
{getKeyLabel(entry.key)}
|
||||
</div>
|
||||
<div className="text-[9px] font-mono text-zinc-500">{entry.key}</div>
|
||||
</div>
|
||||
{entry.saved && (
|
||||
<span className="text-[9px] text-emerald-400 bg-emerald-900/30 px-1.5 py-0.5 rounded flex items-center gap-1">
|
||||
@ -225,9 +563,7 @@ export function MissingKeysModal({
|
||||
</div>
|
||||
)}
|
||||
|
||||
{entry.error && (
|
||||
<div className="mt-1.5 text-[10px] text-red-400">{entry.error}</div>
|
||||
)}
|
||||
{entry.error && <div className="mt-1.5 text-[10px] text-red-400">{entry.error}</div>}
|
||||
</div>
|
||||
))}
|
||||
|
||||
@ -238,7 +574,6 @@ export function MissingKeysModal({
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Footer */}
|
||||
<div className="px-5 py-3 border-t border-zinc-800 bg-zinc-950/50 flex items-center justify-between gap-2">
|
||||
<div>
|
||||
{onOpenSettings && (
|
||||
|
||||
@ -46,11 +46,15 @@ export function SidePanel() {
|
||||
const panelTab = useCanvasStore((s) => s.panelTab);
|
||||
const setPanelTab = useCanvasStore((s) => s.setPanelTab);
|
||||
const selectNode = useCanvasStore((s) => s.selectNode);
|
||||
const setSidePanelWidth = useCanvasStore((s) => s.setSidePanelWidth);
|
||||
const node = useCanvasStore((s) =>
|
||||
s.nodes.find((n) => n.id === s.selectedNodeId)
|
||||
);
|
||||
|
||||
// Resizable panel width — persisted across node selections via localStorage
|
||||
// Resizable panel width — persisted across node selections via localStorage.
|
||||
// Also published to the canvas store on every change so the centered
|
||||
// Toolbar can re-centre itself on the remaining canvas area (avoids the
|
||||
// Audit / Search / Settings buttons hiding under the panel).
|
||||
const [width, setWidth] = useState<number>(() => {
|
||||
if (typeof window === "undefined") return SIDEPANEL_DEFAULT_WIDTH;
|
||||
const saved = localStorage.getItem(SIDEPANEL_WIDTH_KEY);
|
||||
@ -59,6 +63,9 @@ export function SidePanel() {
|
||||
? parsed
|
||||
: SIDEPANEL_DEFAULT_WIDTH;
|
||||
});
|
||||
useEffect(() => {
|
||||
setSidePanelWidth(width);
|
||||
}, [width, setSidePanelWidth]);
|
||||
const widthRef = useRef(width); // tracks live drag value for the mouseup handler
|
||||
const dragging = useRef(false);
|
||||
const startX = useRef(0);
|
||||
|
||||
@ -2,7 +2,8 @@
|
||||
|
||||
import { useState, useEffect, useCallback, useRef } from "react";
|
||||
import { api } from "@/lib/api";
|
||||
import { checkDeploySecrets, type PreflightResult } from "@/lib/deploy-preflight";
|
||||
import { useCanvasStore } from "@/store/canvas";
|
||||
import { checkDeploySecrets, type PreflightResult, type ModelSpec } from "@/lib/deploy-preflight";
|
||||
import { MissingKeysModal } from "./MissingKeysModal";
|
||||
import { ConfirmDialog } from "./ConfirmDialog";
|
||||
import { Spinner } from "./Spinner";
|
||||
@ -13,7 +14,11 @@ interface Template {
|
||||
name: string;
|
||||
description: string;
|
||||
tier: number;
|
||||
runtime?: string;
|
||||
model: string;
|
||||
models?: ModelSpec[];
|
||||
/** AND-required env vars declared at runtime_config.required_env. */
|
||||
required_env?: string[];
|
||||
skills: string[];
|
||||
skill_count: number;
|
||||
}
|
||||
@ -53,6 +58,13 @@ export function OrgTemplatesSection() {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [importing, setImporting] = useState<string | null>(null);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
// Collapsed by default — org templates are multi-workspace imports
|
||||
// that most new users don't reach for first. Keeping them
|
||||
// expand-on-demand frees ~400 px of vertical space for the
|
||||
// individual workspace templates above, which is the primary
|
||||
// deploy path. The count in the header still makes discovery
|
||||
// obvious: "Org Templates (4) ▸".
|
||||
const [expanded, setExpanded] = useState(false);
|
||||
|
||||
const loadOrgs = useCallback(async () => {
|
||||
setLoading(true);
|
||||
@ -79,9 +91,26 @@ export function OrgTemplatesSection() {
|
||||
return (
|
||||
<div className="space-y-2" data-testid="org-templates-section">
|
||||
<div className="flex items-center justify-between">
|
||||
<h3 className="text-[10px] uppercase tracking-wide text-zinc-500 font-semibold">
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setExpanded((v) => !v)}
|
||||
aria-expanded={expanded}
|
||||
aria-controls="org-templates-body"
|
||||
className="flex items-center gap-1.5 text-[10px] uppercase tracking-wide text-zinc-500 hover:text-zinc-300 font-semibold transition-colors"
|
||||
>
|
||||
<span
|
||||
aria-hidden="true"
|
||||
className={`inline-block text-[8px] transition-transform duration-150 ${expanded ? "rotate-90" : ""}`}
|
||||
>
|
||||
▶
|
||||
</span>
|
||||
Org Templates
|
||||
</h3>
|
||||
{orgs.length > 0 && (
|
||||
<span className="text-zinc-600 normal-case tracking-normal">
|
||||
({orgs.length})
|
||||
</span>
|
||||
)}
|
||||
</button>
|
||||
<button
|
||||
onClick={loadOrgs}
|
||||
aria-label="Refresh org templates"
|
||||
@ -91,6 +120,8 @@ export function OrgTemplatesSection() {
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{expanded && (
|
||||
<div id="org-templates-body" className="space-y-2">
|
||||
{loading && (
|
||||
<div role="status" aria-live="polite" className="flex items-center gap-1.5 text-[10px] text-zinc-500">
|
||||
<Spinner size="sm" />
|
||||
@ -140,6 +171,8 @@ export function OrgTemplatesSection() {
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@ -226,6 +259,14 @@ function ImportAgentButton({ onImported }: { onImported: () => void }) {
|
||||
|
||||
export function TemplatePalette() {
|
||||
const [open, setOpen] = useState(false);
|
||||
// Publish palette-open state to the canvas store so Legend (and any
|
||||
// future floating left-bottom UI) can shift right to avoid being
|
||||
// hidden behind the 280 px palette drawer.
|
||||
const setTemplatePaletteOpen = useCanvasStore((s) => s.setTemplatePaletteOpen);
|
||||
useEffect(() => {
|
||||
setTemplatePaletteOpen(open);
|
||||
}, [open, setTemplatePaletteOpen]);
|
||||
|
||||
const [templates, setTemplates] = useState<Template[]>([]);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [creating, setCreating] = useState<string | null>(null);
|
||||
@ -292,8 +333,15 @@ export function TemplatePalette() {
|
||||
setCreating(template.id);
|
||||
setError(null);
|
||||
|
||||
const runtime = resolveRuntime(template.id);
|
||||
const preflight = await checkDeploySecrets(runtime);
|
||||
// Prefer the runtime the Go /templates endpoint returned verbatim —
|
||||
// resolveRuntime() is a legacy id→runtime fallback for installs whose
|
||||
// template summary predates the `runtime` field.
|
||||
const runtime = template.runtime ?? resolveRuntime(template.id);
|
||||
const preflight = await checkDeploySecrets({
|
||||
runtime,
|
||||
models: template.models,
|
||||
required_env: template.required_env,
|
||||
});
|
||||
|
||||
if (!preflight.ok) {
|
||||
// Missing keys — show the modal instead of deploying
|
||||
@ -331,6 +379,7 @@ export function TemplatePalette() {
|
||||
<MissingKeysModal
|
||||
open={!!missingKeysInfo}
|
||||
missingKeys={missingKeysInfo?.preflight.missingKeys ?? []}
|
||||
providers={missingKeysInfo?.preflight.providers ?? []}
|
||||
runtime={missingKeysInfo?.preflight.runtime ?? ""}
|
||||
onKeysAdded={() => {
|
||||
if (missingKeysInfo) {
|
||||
|
||||
@ -16,6 +16,17 @@ export function Toolbar() {
|
||||
const setShowA2AEdges = useCanvasStore((s) => s.setShowA2AEdges);
|
||||
const selectedNodeId = useCanvasStore((s) => s.selectedNodeId);
|
||||
const setPanelTab = useCanvasStore((s) => s.setPanelTab);
|
||||
const sidePanelWidth = useCanvasStore((s) => s.sidePanelWidth);
|
||||
|
||||
// Toolbar is fixed + centred on the viewport. When a workspace is
|
||||
// selected the SidePanel (z-50, fixed right-0) opens and covers the
|
||||
// right edge of the viewport — without this adjustment, the right
|
||||
// half of the Toolbar (Audit / Search / Help / Settings) hides
|
||||
// behind the panel. Shifting the toolbar LEFT by half the panel
|
||||
// width re-centres it on the remaining canvas area.
|
||||
const toolbarOffsetStyle = selectedNodeId
|
||||
? { marginLeft: `-${sidePanelWidth / 2}px` }
|
||||
: undefined;
|
||||
|
||||
const [stopping, setStopping] = useState(false);
|
||||
const [restartingAll, setRestartingAll] = useState(false);
|
||||
@ -116,7 +127,10 @@ export function Toolbar() {
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="fixed top-3 left-1/2 -translate-x-1/2 z-20 flex items-center gap-3 bg-zinc-900/80 backdrop-blur-md border border-zinc-800/60 rounded-xl px-4 py-2 shadow-xl shadow-black/20">
|
||||
<div
|
||||
className="fixed top-3 left-1/2 -translate-x-1/2 z-20 flex items-center gap-3 bg-zinc-900/80 backdrop-blur-md border border-zinc-800/60 rounded-xl px-4 py-2 shadow-xl shadow-black/20 transition-[margin-left] duration-200"
|
||||
style={toolbarOffsetStyle}
|
||||
>
|
||||
{/* Logo / Title */}
|
||||
<div className="flex items-center gap-2 pr-3 border-r border-zinc-800/60">
|
||||
<img src="/molecule-icon.png" alt="Molecule AI" className="w-5 h-5" />
|
||||
|
||||
@ -6,11 +6,30 @@ import { CookieConsent, hasConsent } from "../CookieConsent";
|
||||
const STORAGE_KEY = "molecule_cookie_consent";
|
||||
|
||||
// These tests lock the privacy-preserving default: the banner appears on
|
||||
// first visit, clicking either button records a decision, and subsequent
|
||||
// renders skip the banner until the policy version changes.
|
||||
// first visit (SaaS mode), clicking either button records a decision, and
|
||||
// subsequent renders skip the banner until the policy version changes.
|
||||
//
|
||||
// The banner is SaaS-only — it references moleculesai.app's hosted privacy
|
||||
// policy and presumes GDPR/ePrivacy obligations that only apply to the
|
||||
// hosted offering. Self-hosted / local-dev hosts must not see it. Most
|
||||
// tests below simulate SaaS by overriding window.location.hostname; the
|
||||
// "local-dev" test omits that override.
|
||||
|
||||
// setSaaSHostname rewrites window.location.hostname to look like a SaaS
|
||||
// tenant subdomain so isSaaSTenant() returns true. Must run before
|
||||
// CookieConsent mounts, otherwise its one-shot useEffect captures the
|
||||
// localhost default. jsdom's location object is read-only via the normal
|
||||
// setter but defineProperty lets us replace it for the scope of a test.
|
||||
function setSaaSHostname(host = "acme.moleculesai.app") {
|
||||
Object.defineProperty(window, "location", {
|
||||
configurable: true,
|
||||
value: { ...window.location, hostname: host },
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
window.localStorage.clear();
|
||||
setSaaSHostname();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@ -86,6 +105,28 @@ describe("CookieConsent", () => {
|
||||
expect(dialog.getAttribute("aria-labelledby")).toBe("cookie-consent-title");
|
||||
expect(dialog.getAttribute("aria-describedby")).toBe("cookie-consent-body");
|
||||
});
|
||||
|
||||
it("does NOT render on local dev (non-SaaS hostname)", () => {
|
||||
// Simulate `npm run dev` on localhost — isSaaSTenant() returns false
|
||||
// and the banner must stay hidden. Regression test for PR #1871:
|
||||
// a fresh-clone Canvas showing the hosted privacy banner on
|
||||
// localhost:3000 was confusing for self-hosted users.
|
||||
Object.defineProperty(window, "location", {
|
||||
configurable: true,
|
||||
value: { ...window.location, hostname: "localhost" },
|
||||
});
|
||||
render(<CookieConsent />);
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
|
||||
it("does NOT render on a LAN hostname (192.168.*, *.local)", () => {
|
||||
Object.defineProperty(window, "location", {
|
||||
configurable: true,
|
||||
value: { ...window.location, hostname: "192.168.1.74" },
|
||||
});
|
||||
render(<CookieConsent />);
|
||||
expect(screen.queryByRole("dialog")).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("hasConsent", () => {
|
||||
|
||||
@ -80,15 +80,16 @@ describe("CreateWorkspaceDialog — accessibility", () => {
|
||||
// Non-SaaS build (jsdom hostname is localhost) shows all four tiers:
|
||||
// T1 Sandboxed, T2 Standard, T3 Privileged, T4 Full Access.
|
||||
expect(radios.length).toBe(4);
|
||||
// T1 is default selection
|
||||
// T3 is the default selection on non-SaaS hosts (see
|
||||
// CreateWorkspaceDialog.tsx `defaultTier` comment).
|
||||
const t1 = radios.find((r) => r.textContent?.includes("T1"));
|
||||
const t2 = radios.find((r) => r.textContent?.includes("T2"));
|
||||
expect(t1?.getAttribute("aria-checked")).toBe("true");
|
||||
expect(t2?.getAttribute("aria-checked")).toBe("false");
|
||||
// Click T2 and verify aria-checked flips
|
||||
fireEvent.click(t2!);
|
||||
const t3 = radios.find((r) => r.textContent?.includes("T3"));
|
||||
expect(t3?.getAttribute("aria-checked")).toBe("true");
|
||||
expect(t1?.getAttribute("aria-checked")).toBe("false");
|
||||
// Click T1 and verify aria-checked flips
|
||||
fireEvent.click(t1!);
|
||||
await waitFor(() =>
|
||||
expect(t2?.getAttribute("aria-checked")).toBe("true")
|
||||
expect(t1?.getAttribute("aria-checked")).toBe("true")
|
||||
);
|
||||
});
|
||||
|
||||
@ -101,10 +102,10 @@ describe("CreateWorkspaceDialog — accessibility", () => {
|
||||
const t2 = radios.find((r) => r.textContent?.includes("T2"))!;
|
||||
const t3 = radios.find((r) => r.textContent?.includes("T3"))!;
|
||||
const t4 = radios.find((r) => r.textContent?.includes("T4"))!;
|
||||
// T1 is default selected (non-SaaS test env; SaaS would default to T4)
|
||||
expect(t1.getAttribute("tabindex")).toBe("0");
|
||||
// T3 is default selected (non-SaaS test env; SaaS would default to T4).
|
||||
expect(t3.getAttribute("tabindex")).toBe("0");
|
||||
expect(t1.getAttribute("tabindex")).toBe("-1");
|
||||
expect(t2.getAttribute("tabindex")).toBe("-1");
|
||||
expect(t3.getAttribute("tabindex")).toBe("-1");
|
||||
expect(t4.getAttribute("tabindex")).toBe("-1");
|
||||
});
|
||||
|
||||
|
||||
@ -28,6 +28,8 @@ vi.mock("@/lib/deploy-preflight", () => ({
|
||||
return labels[key] ?? key;
|
||||
},
|
||||
}));
|
||||
// a11y tests render the modal without a `providers` prop — it falls
|
||||
// back to all-keys mode driven by the `missingKeys` array.
|
||||
|
||||
// ── Import after mocks ────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@ -37,6 +37,9 @@ vi.mock("@/lib/deploy-preflight", () => ({
|
||||
return labels[key] ?? key;
|
||||
},
|
||||
}));
|
||||
// Tests render the modal without a `providers` prop — the component
|
||||
// falls back to the all-keys mode using the `missingKeys` array, which
|
||||
// matches the contract these tests were written for.
|
||||
|
||||
// ── Suite 1: Visibility and ARIA ────────────────────────────────────────────
|
||||
|
||||
@ -265,7 +268,7 @@ describe("MissingKeysModal — save flow", () => {
|
||||
onCancel={vi.fn()}
|
||||
/>
|
||||
);
|
||||
const saveBtn = screen.getAllByRole("button").find(b => /save/i.test(b.textContent ?? ""))!;
|
||||
const saveBtn = screen.getAllByRole("button").find(b => /save/i.test(b.textContent ?? "")) as HTMLButtonElement;
|
||||
expect(saveBtn.disabled).toBe(true);
|
||||
});
|
||||
|
||||
@ -284,7 +287,7 @@ describe("MissingKeysModal — save flow", () => {
|
||||
act(() => {
|
||||
fireEvent.change(input, { target: { value: "sk-123" } });
|
||||
});
|
||||
const saveBtn = screen.getAllByRole("button").find(b => /save/i.test(b.textContent ?? ""))!;
|
||||
const saveBtn = screen.getAllByRole("button").find(b => /save/i.test(b.textContent ?? "")) as HTMLButtonElement;
|
||||
expect(saveBtn.disabled).toBe(false);
|
||||
});
|
||||
|
||||
|
||||
@ -1,83 +0,0 @@
|
||||
// @vitest-environment node
|
||||
/**
|
||||
* MissingKeysModal preflight logic tests.
|
||||
* Component rendering tested in MissingKeysModal.component.test.tsx.
|
||||
*/
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
|
||||
global.fetch = vi.fn();
|
||||
|
||||
import {
|
||||
getRequiredKeys,
|
||||
findMissingKeys,
|
||||
getKeyLabel,
|
||||
checkDeploySecrets,
|
||||
RUNTIME_REQUIRED_KEYS,
|
||||
} from "../../lib/deploy-preflight";
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("MissingKeysModal preflight logic", () => {
|
||||
it("identifies missing keys for langgraph runtime", () => {
|
||||
const missing = findMissingKeys("langgraph", new Set<string>());
|
||||
expect(missing).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
it("identifies missing keys for claude-code runtime", () => {
|
||||
const missing = findMissingKeys("claude-code", new Set<string>());
|
||||
expect(missing).toEqual(["ANTHROPIC_API_KEY"]);
|
||||
});
|
||||
|
||||
it("generates correct labels for modal display", () => {
|
||||
const missing = findMissingKeys("langgraph", new Set<string>());
|
||||
const labels = missing.map((k) => ({ key: k, label: getKeyLabel(k) }));
|
||||
expect(labels).toEqual([{ key: "OPENAI_API_KEY", label: "OpenAI API Key" }]);
|
||||
});
|
||||
|
||||
it("returns no missing keys when all are configured", () => {
|
||||
const missing = findMissingKeys("langgraph", new Set(["OPENAI_API_KEY"]));
|
||||
expect(missing).toEqual([]);
|
||||
});
|
||||
|
||||
it("pre-deploy check returns ok=false and correct missing keys", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve([]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets("langgraph");
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.missingKeys).toEqual(["OPENAI_API_KEY"]);
|
||||
expect(result.runtime).toBe("langgraph");
|
||||
});
|
||||
|
||||
it("pre-deploy check returns ok=true when keys are present", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve([{ key: "ANTHROPIC_API_KEY", has_value: true, created_at: "", updated_at: "" }]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets("claude-code");
|
||||
expect(result.ok).toBe(true);
|
||||
expect(result.missingKeys).toEqual([]);
|
||||
});
|
||||
|
||||
it("handles all runtimes correctly for modal data construction", () => {
|
||||
const runtimes = Object.keys(RUNTIME_REQUIRED_KEYS);
|
||||
for (const runtime of runtimes) {
|
||||
const requiredKeys = getRequiredKeys(runtime);
|
||||
const missing = findMissingKeys(runtime, new Set<string>());
|
||||
const labels = missing.map((k) => getKeyLabel(k));
|
||||
|
||||
expect(requiredKeys.length).toBeGreaterThan(0);
|
||||
expect(missing).toEqual(requiredKeys);
|
||||
expect(labels.length).toBe(requiredKeys.length);
|
||||
for (const label of labels) {
|
||||
expect(label.length).toBeGreaterThan(0);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
102
canvas/src/components/__tests__/OrgTemplatesSection.test.tsx
Normal file
102
canvas/src/components/__tests__/OrgTemplatesSection.test.tsx
Normal file
@ -0,0 +1,102 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { render, screen, waitFor, fireEvent, cleanup } from "@testing-library/react";
|
||||
|
||||
// Tests for the default-collapsed + expand-on-click behavior of the
|
||||
// org templates drawer. Before this change the section rendered all
|
||||
// org cards inline, which pushed the individual workspace templates
|
||||
// off-screen when there were ≥3 orgs on disk. Collapsed-by-default
|
||||
// keeps the scroll focused on the primary deploy path.
|
||||
|
||||
vi.mock("@/lib/api", () => ({
|
||||
api: {
|
||||
get: vi.fn().mockResolvedValue([
|
||||
{ dir: "free-beats-all", name: "Free Beats All", description: "d1", workspaces: 3 },
|
||||
{ dir: "medo-smoke", name: "MeDo Smoke Test", description: "d2", workspaces: 1 },
|
||||
]),
|
||||
post: vi.fn().mockResolvedValue({}),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../Spinner", () => ({ Spinner: () => null }));
|
||||
vi.mock("../MissingKeysModal", () => ({ MissingKeysModal: () => null }));
|
||||
vi.mock("../ConfirmDialog", () => ({ ConfirmDialog: () => null }));
|
||||
vi.mock("@/lib/deploy-preflight", () => ({ checkDeploySecrets: vi.fn() }));
|
||||
|
||||
import { OrgTemplatesSection } from "../TemplatePalette";
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
describe("OrgTemplatesSection — collapse/expand", () => {
|
||||
it("renders collapsed by default — org cards are NOT in the DOM", async () => {
|
||||
render(<OrgTemplatesSection />);
|
||||
// The header toggle is visible immediately…
|
||||
// Two buttons match "Org Templates" (toggle + refresh) — pick the
|
||||
// toggle by its aria-controls binding.
|
||||
const toggle = (await screen.findAllByRole("button")).find((b) =>
|
||||
b.getAttribute("aria-controls") === "org-templates-body"
|
||||
)!;
|
||||
expect(toggle).toBeTruthy();
|
||||
expect(toggle.getAttribute("aria-expanded")).toBe("false");
|
||||
|
||||
// …and the count appears after loadOrgs resolves.
|
||||
await waitFor(() => {
|
||||
expect(toggle.textContent).toContain("(2)");
|
||||
});
|
||||
|
||||
// But none of the individual org cards should be rendered yet.
|
||||
expect(screen.queryByText("Free Beats All")).toBeNull();
|
||||
expect(screen.queryByText("MeDo Smoke Test")).toBeNull();
|
||||
});
|
||||
|
||||
it("clicking the header reveals the org cards", async () => {
|
||||
render(<OrgTemplatesSection />);
|
||||
|
||||
// Wait for the count so we know loadOrgs finished.
|
||||
// Two buttons match "Org Templates" (toggle + refresh) — pick the
|
||||
// toggle by its aria-controls binding.
|
||||
const toggle = (await screen.findAllByRole("button")).find((b) =>
|
||||
b.getAttribute("aria-controls") === "org-templates-body"
|
||||
)!;
|
||||
await waitFor(() => {
|
||||
expect(toggle.textContent).toContain("(2)");
|
||||
});
|
||||
|
||||
// Expand.
|
||||
fireEvent.click(toggle);
|
||||
await waitFor(() => {
|
||||
expect(toggle.getAttribute("aria-expanded")).toBe("true");
|
||||
});
|
||||
|
||||
// Org cards now visible.
|
||||
expect(screen.getByText("Free Beats All")).toBeTruthy();
|
||||
expect(screen.getByText("MeDo Smoke Test")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("clicking the header again collapses back", async () => {
|
||||
render(<OrgTemplatesSection />);
|
||||
// Two buttons match "Org Templates" (toggle + refresh) — pick the
|
||||
// toggle by its aria-controls binding.
|
||||
const toggle = (await screen.findAllByRole("button")).find((b) =>
|
||||
b.getAttribute("aria-controls") === "org-templates-body"
|
||||
)!;
|
||||
await waitFor(() => {
|
||||
expect(toggle.textContent).toContain("(2)");
|
||||
});
|
||||
|
||||
fireEvent.click(toggle); // expand
|
||||
expect(screen.getByText("Free Beats All")).toBeTruthy();
|
||||
|
||||
fireEvent.click(toggle); // collapse
|
||||
await waitFor(() => {
|
||||
expect(toggle.getAttribute("aria-expanded")).toBe("false");
|
||||
});
|
||||
expect(screen.queryByText("Free Beats All")).toBeNull();
|
||||
});
|
||||
});
|
||||
@ -36,6 +36,10 @@ const mockStoreState = {
|
||||
panelTab: "chat",
|
||||
setPanelTab: mockSetPanelTab,
|
||||
selectNode: vi.fn(),
|
||||
// Consumed by SidePanel's useEffect — publishes the drag-resized
|
||||
// width to the store so Toolbar can re-centre itself on the
|
||||
// remaining canvas area when the panel is open.
|
||||
setSidePanelWidth: vi.fn(),
|
||||
nodes: [
|
||||
{
|
||||
id: "ws-1",
|
||||
|
||||
@ -6,7 +6,8 @@ import remarkGfm from "remark-gfm";
|
||||
import { api } from "@/lib/api";
|
||||
import { useCanvasStore, type WorkspaceNodeData } from "@/store/canvas";
|
||||
import { WS_URL } from "@/store/socket";
|
||||
import { type ChatMessage, createMessage } from "./chat/types";
|
||||
import { closeWebSocketGracefully } from "@/lib/ws-close";
|
||||
import { type ChatMessage, createMessage, appendMessageDeduped } from "./chat/types";
|
||||
import { extractResponseText, extractRequestText } from "./chat/message-parser";
|
||||
import { AgentCommsPanel } from "./chat/AgentCommsPanel";
|
||||
import { runtimeDisplayName } from "@/lib/runtime-names";
|
||||
@ -206,7 +207,11 @@ function MyChatPanel({ workspaceId, data }: Props) {
|
||||
const consume = useCanvasStore.getState().consumeAgentMessages;
|
||||
const msgs = consume(workspaceId);
|
||||
for (const m of msgs) {
|
||||
setMessages((prev) => [...prev, createMessage("agent", m.content)]);
|
||||
// Dedupe in case the agent proactively pushed the same text the
|
||||
// HTTP /a2a response already delivered (observed with the Hermes
|
||||
// runtime, which emits both a reply body and a send_message_to_user
|
||||
// push for the same content).
|
||||
setMessages((prev) => appendMessageDeduped(prev, createMessage("agent", m.content)));
|
||||
}
|
||||
}, [pendingAgentMsgs, workspaceId]);
|
||||
|
||||
@ -220,7 +225,7 @@ function MyChatPanel({ workspaceId, data }: Props) {
|
||||
const msgs = consume(`a2a:${workspaceId}`);
|
||||
if (!sendingFromAPIRef.current) return; // HTTP .then() already handled this response
|
||||
for (const m of msgs) {
|
||||
setMessages((prev) => [...prev, createMessage("agent", m.content)]);
|
||||
setMessages((prev) => appendMessageDeduped(prev, createMessage("agent", m.content)));
|
||||
}
|
||||
setSending(false);
|
||||
sendingFromAPIRef.current = false;
|
||||
@ -300,7 +305,9 @@ function MyChatPanel({ workspaceId, data }: Props) {
|
||||
} catch { /* ignore */ }
|
||||
};
|
||||
|
||||
return () => ws.close();
|
||||
return () => {
|
||||
closeWebSocketGracefully(ws);
|
||||
};
|
||||
}, [sending, workspaceId, resolveWorkspaceName]);
|
||||
|
||||
const sendMessage = async () => {
|
||||
@ -340,7 +347,7 @@ function MyChatPanel({ workspaceId, data }: Props) {
|
||||
if (!sendingFromAPIRef.current) return;
|
||||
const replyText = extractReplyText(resp);
|
||||
if (replyText) {
|
||||
setMessages((prev) => [...prev, createMessage("agent", replyText)]);
|
||||
setMessages((prev) => appendMessageDeduped(prev, createMessage("agent", replyText)));
|
||||
}
|
||||
setSending(false);
|
||||
sendingFromAPIRef.current = false;
|
||||
|
||||
@ -432,13 +432,19 @@ export function ConfigTab({ workspaceId }: Props) {
|
||||
label={
|
||||
currentModelSpec?.required_env?.length &&
|
||||
arraysEqual(config.runtime_config?.required_env ?? [], currentModelSpec.required_env)
|
||||
? "Required Env Vars (from template)"
|
||||
: "Required Env Vars"
|
||||
? "Required Env Var Names (from template)"
|
||||
: "Required Env Var Names"
|
||||
}
|
||||
values={config.runtime_config?.required_env ?? []}
|
||||
onChange={(v) => updateNested("runtime_config" as keyof ConfigData, "required_env", v)}
|
||||
placeholder="e.g. CLAUDE_CODE_OAUTH_TOKEN"
|
||||
placeholder="variable NAME (e.g. ANTHROPIC_API_KEY) — not the value"
|
||||
/>
|
||||
<p className="text-[10px] text-zinc-500 mt-1">
|
||||
This declares which env var <em>names</em> the workspace needs.
|
||||
Set the actual values in the <strong>Secrets</strong> section
|
||||
below — those are encrypted and mounted into the container at
|
||||
runtime.
|
||||
</p>
|
||||
{currentModelSpec?.required_env?.length &&
|
||||
!arraysEqual(config.runtime_config?.required_env ?? [], currentModelSpec.required_env) && (
|
||||
<div className="text-[10px] text-zinc-500 mt-1 flex items-center gap-2">
|
||||
@ -545,7 +551,10 @@ export function ConfigTab({ workspaceId }: Props) {
|
||||
</div>
|
||||
</Section>
|
||||
|
||||
<SecretsSection workspaceId={workspaceId} />
|
||||
<SecretsSection
|
||||
workspaceId={workspaceId}
|
||||
requiredEnv={config.runtime_config?.required_env}
|
||||
/>
|
||||
|
||||
<AgentCardSection workspaceId={workspaceId} />
|
||||
</div>
|
||||
|
||||
@ -4,6 +4,7 @@ import { useState, useEffect, useRef } from "react";
|
||||
import { api } from "@/lib/api";
|
||||
import { useCanvasStore, type WorkspaceNodeData } from "@/store/canvas";
|
||||
import { WS_URL } from "@/store/socket";
|
||||
import { closeWebSocketGracefully } from "@/lib/ws-close";
|
||||
import { extractResponseText, extractRequestText } from "./message-parser";
|
||||
|
||||
interface ActivityEntry {
|
||||
@ -122,7 +123,9 @@ export function AgentCommsPanel({ workspaceId }: { workspaceId: string }) {
|
||||
}
|
||||
} catch { /* ignore */ }
|
||||
};
|
||||
return () => ws.close();
|
||||
return () => {
|
||||
closeWebSocketGracefully(ws);
|
||||
};
|
||||
}, [workspaceId]);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
100
canvas/src/components/tabs/chat/__tests__/types.test.ts
Normal file
100
canvas/src/components/tabs/chat/__tests__/types.test.ts
Normal file
@ -0,0 +1,100 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { appendMessageDeduped, createMessage, type ChatMessage } from "../types";
|
||||
|
||||
// Unit tests for appendMessageDeduped — the helper that collapses the
|
||||
// race between the HTTP /a2a .then() handler, the A2A_RESPONSE WS event,
|
||||
// and the send_message_to_user push. All three paths can deliver the
|
||||
// same agent reply; without dedupe the user sees 2-3 identical bubbles
|
||||
// with identical timestamps.
|
||||
|
||||
describe("appendMessageDeduped", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
// Pin Date.now so "recently added" windows are deterministic across
|
||||
// the dedupe + Date.parse calls inside the helper.
|
||||
vi.setSystemTime(new Date("2026-04-23T12:00:00.000Z"));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("appends a new message when the history is empty", () => {
|
||||
const msg = createMessage("agent", "hello");
|
||||
const next = appendMessageDeduped([], msg);
|
||||
expect(next).toHaveLength(1);
|
||||
expect(next[0]).toBe(msg);
|
||||
});
|
||||
|
||||
it("appends when content differs from the recent tail", () => {
|
||||
const first = createMessage("agent", "hello");
|
||||
vi.advanceTimersByTime(100);
|
||||
const second = createMessage("agent", "world");
|
||||
const next = appendMessageDeduped([first], second);
|
||||
expect(next).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("skips a duplicate (same role+content) within the window", () => {
|
||||
const first = createMessage("agent", "Hey! How can I help you today?");
|
||||
vi.advanceTimersByTime(500); // well inside the 3s window
|
||||
const dup = createMessage("agent", "Hey! How can I help you today?");
|
||||
const next = appendMessageDeduped([first], dup);
|
||||
expect(next).toHaveLength(1);
|
||||
// The array is returned unchanged — not a new reference.
|
||||
expect(next[0]).toBe(first);
|
||||
});
|
||||
|
||||
it("does NOT dedupe across different roles even if content matches", () => {
|
||||
// Agent echoing the user's "hi" is a legitimate two-bubble case.
|
||||
const user = createMessage("user", "hi");
|
||||
vi.advanceTimersByTime(100);
|
||||
const agent = createMessage("agent", "hi");
|
||||
const next = appendMessageDeduped([user], agent);
|
||||
expect(next).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("does NOT dedupe once the window has elapsed", () => {
|
||||
// A user legitimately sending "hi" a few seconds apart must render
|
||||
// both bubbles. Default window is 3000 ms.
|
||||
const first = createMessage("user", "hi");
|
||||
vi.advanceTimersByTime(4000);
|
||||
const repeat = createMessage("user", "hi");
|
||||
const next = appendMessageDeduped([first], repeat);
|
||||
expect(next).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("only checks the tail's content, not the entire history", () => {
|
||||
// Same (role, content) appearing earlier in the conversation but
|
||||
// outside the dedupe window is not a duplicate.
|
||||
const old = createMessage("agent", "hi");
|
||||
vi.advanceTimersByTime(10_000);
|
||||
const newer = createMessage("agent", "hi");
|
||||
const next = appendMessageDeduped([old], newer);
|
||||
expect(next).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("handles malformed timestamps without throwing", () => {
|
||||
// Defense: a history entry with a bogus timestamp shouldn't nuke
|
||||
// the append path. The helper should just treat that entry as
|
||||
// "too old to dedupe against" and append the new message.
|
||||
const garbled: ChatMessage = {
|
||||
id: "x",
|
||||
role: "agent",
|
||||
content: "hi",
|
||||
timestamp: "not-a-real-timestamp",
|
||||
};
|
||||
const fresh = createMessage("agent", "hi");
|
||||
expect(() => appendMessageDeduped([garbled], fresh)).not.toThrow();
|
||||
const next = appendMessageDeduped([garbled], fresh);
|
||||
expect(next).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("accepts a custom dedupe window", () => {
|
||||
const first = createMessage("agent", "hello");
|
||||
vi.advanceTimersByTime(500);
|
||||
// Tight 100 ms window — the 500 ms-old first message falls outside.
|
||||
const dup = createMessage("agent", "hello");
|
||||
const next = appendMessageDeduped([first], dup, 100);
|
||||
expect(next).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
@ -1,2 +1,2 @@
|
||||
export { type ChatMessage, createMessage } from "./types";
|
||||
export { type ChatMessage, createMessage, appendMessageDeduped } from "./types";
|
||||
export { extractAgentText, extractTextsFromParts, extractResponseText } from "./message-parser";
|
||||
|
||||
@ -8,3 +8,28 @@ export interface ChatMessage {
|
||||
export function createMessage(role: ChatMessage["role"], content: string): ChatMessage {
|
||||
return { id: crypto.randomUUID(), role, content, timestamp: new Date().toISOString() };
|
||||
}
|
||||
|
||||
// appendMessageDeduped adds a ChatMessage to `prev` unless the tail
|
||||
// already contains the same (role, content) from within
|
||||
// dedupeWindowMs. Collapses the case where two delivery paths race to
|
||||
// render the same agent reply — e.g. the HTTP .then() handler for
|
||||
// POST /a2a AND a `send_message_to_user` WebSocket push from the
|
||||
// runtime, both carrying the same text. Without this guard the user
|
||||
// sees two or three identical bubbles with identical timestamps.
|
||||
//
|
||||
// Why a time-windowed check instead of dedupe-by-id: the three delivery
|
||||
// paths (HTTP response, WS A2A_RESPONSE, WS send_message_to_user) each
|
||||
// mint a fresh `createMessage` with a random UUID client-side — there's
|
||||
// no stable end-to-end message id yet. Content+role+time is the
|
||||
// pragmatic identity. The window is short (3s) so genuine repeat
|
||||
// messages ("hi", "hi") from a real user/agent still render.
|
||||
export function appendMessageDeduped(prev: ChatMessage[], msg: ChatMessage, dedupeWindowMs = 3000): ChatMessage[] {
|
||||
const cutoff = Date.now() - dedupeWindowMs;
|
||||
const alreadyThere = prev.some((m) => {
|
||||
if (m.role !== msg.role || m.content !== msg.content) return false;
|
||||
const t = Date.parse(m.timestamp);
|
||||
return !Number.isNaN(t) && t >= cutoff;
|
||||
});
|
||||
if (alreadyThere) return prev;
|
||||
return [...prev, msg];
|
||||
}
|
||||
|
||||
@ -0,0 +1,139 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { render, screen, waitFor, cleanup } from "@testing-library/react";
|
||||
import { SecretsSection } from "../secrets-section";
|
||||
|
||||
// Tests for SecretsSection — locks in the fix that the secret-slot
|
||||
// list is driven by the workspace's `runtime_config.required_env`
|
||||
// instead of a hardcoded COMMON_KEYS list.
|
||||
//
|
||||
// Before the fix the component always rendered Anthropic / OpenAI /
|
||||
// Google / SERP / Model Override slots regardless of template. For a
|
||||
// Hermes workspace that declares MINIMAX_API_KEY that meant the user
|
||||
// saw five irrelevant slots and no slot for the key they actually
|
||||
// needed.
|
||||
|
||||
vi.mock("@/lib/api", () => ({
|
||||
api: {
|
||||
get: vi.fn().mockResolvedValue([]),
|
||||
put: vi.fn().mockResolvedValue({}),
|
||||
post: vi.fn().mockResolvedValue({}),
|
||||
del: vi.fn().mockResolvedValue({}),
|
||||
patch: vi.fn().mockResolvedValue({}),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("@/lib/canvas-actions", () => ({
|
||||
markAllWorkspacesNeedRestart: vi.fn(),
|
||||
}));
|
||||
|
||||
// The Section wrapper is collapsible with `defaultOpen={false}`. For
|
||||
// tests we want the content visible without a click — replace the
|
||||
// wrapper with a passthrough that always renders children.
|
||||
vi.mock("../form-inputs", async () => {
|
||||
const actual = await vi.importActual<typeof import("../form-inputs")>("../form-inputs");
|
||||
return {
|
||||
...actual,
|
||||
Section: ({ children }: { children: React.ReactNode }) => <div>{children}</div>,
|
||||
};
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
describe("SecretsSection — template-driven slots", () => {
|
||||
it("renders exactly the slots the template declares in required_env", async () => {
|
||||
render(
|
||||
<SecretsSection workspaceId="ws-1" requiredEnv={["MINIMAX_API_KEY"]} />,
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("MINIMAX_API_KEY")).toBeTruthy();
|
||||
});
|
||||
// Hardcoded slots that were there before this fix must NOT appear
|
||||
// when the template doesn't ask for them.
|
||||
expect(screen.queryByText("ANTHROPIC_API_KEY")).toBeNull();
|
||||
expect(screen.queryByText("OPENAI_API_KEY")).toBeNull();
|
||||
expect(screen.queryByText("GOOGLE_API_KEY")).toBeNull();
|
||||
expect(screen.queryByText("SERP_API_KEY")).toBeNull();
|
||||
});
|
||||
|
||||
it("uses the friendly label from KNOWN_LABELS for a well-known name", async () => {
|
||||
render(
|
||||
<SecretsSection workspaceId="ws-1" requiredEnv={["ANTHROPIC_API_KEY"]} />,
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("Anthropic API Key")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("humanises an unknown env var name into a readable label", async () => {
|
||||
render(
|
||||
<SecretsSection workspaceId="ws-1" requiredEnv={["MINIMAX_API_KEY"]} />,
|
||||
);
|
||||
await waitFor(() => {
|
||||
// "Minimax API Key" — "API" acronym preserved, "Minimax" title-cased.
|
||||
expect(screen.getByText("Minimax API Key")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves API / URL acronyms when humanising", async () => {
|
||||
render(
|
||||
<SecretsSection
|
||||
workspaceId="ws-1"
|
||||
requiredEnv={["ZHIPU_API_KEY", "CUSTOM_MODEL_URL"]}
|
||||
/>,
|
||||
);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("Zhipu API Key")).toBeTruthy();
|
||||
expect(screen.getByText("Custom Model URL")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("deduplicates repeated entries in required_env", async () => {
|
||||
render(
|
||||
<SecretsSection
|
||||
workspaceId="ws-1"
|
||||
requiredEnv={["MINIMAX_API_KEY", "MINIMAX_API_KEY", "OPENAI_API_KEY"]}
|
||||
/>,
|
||||
);
|
||||
await waitFor(() => {
|
||||
// Only one row for the repeated name.
|
||||
const matches = screen.getAllByText("MINIMAX_API_KEY");
|
||||
expect(matches).toHaveLength(1);
|
||||
expect(screen.getByText("OpenAI API Key")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to the legacy common-keys list when required_env is missing", async () => {
|
||||
// Backward compat: old workspaces without a template-set
|
||||
// required_env still see Anthropic/OpenAI/Google/SERP slots.
|
||||
render(<SecretsSection workspaceId="ws-1" />);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("Anthropic API Key")).toBeTruthy();
|
||||
});
|
||||
expect(screen.getByText("OpenAI API Key")).toBeTruthy();
|
||||
expect(screen.getByText("Google AI API Key")).toBeTruthy();
|
||||
});
|
||||
|
||||
it("falls back to the legacy common-keys list when required_env is empty", async () => {
|
||||
render(<SecretsSection workspaceId="ws-1" requiredEnv={[]} />);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("Anthropic API Key")).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
it("does not fall back when required_env has at least one entry", async () => {
|
||||
// Single-entry required_env must NOT spill legacy slots into the UI.
|
||||
render(<SecretsSection workspaceId="ws-1" requiredEnv={["MINIMAX_API_KEY"]} />);
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("MINIMAX_API_KEY")).toBeTruthy();
|
||||
});
|
||||
expect(screen.queryByText("Anthropic API Key")).toBeNull();
|
||||
expect(screen.queryByText("OpenAI API Key")).toBeNull();
|
||||
});
|
||||
});
|
||||
@ -13,14 +13,59 @@ interface SecretEntry {
|
||||
scope?: "global" | "workspace";
|
||||
}
|
||||
|
||||
const COMMON_KEYS = [
|
||||
{ key: "ANTHROPIC_API_KEY", label: "Anthropic API Key" },
|
||||
{ key: "OPENAI_API_KEY", label: "OpenAI API Key" },
|
||||
{ key: "GOOGLE_API_KEY", label: "Google AI API Key" },
|
||||
{ key: "SERP_API_KEY", label: "SERP API Key" },
|
||||
{ key: "MODEL_PROVIDER", label: "Model Override (e.g. anthropic:claude-sonnet-4-6)" },
|
||||
// Human-friendly labels for well-known env-var names. Used to render
|
||||
// familiar copy ("Anthropic API Key") instead of the raw variable name
|
||||
// when the template declares one of these. Unknown names (e.g.
|
||||
// MINIMAX_API_KEY, ZHIPU_API_KEY) fall through to humanizeKeyName below
|
||||
// — a generic "Minimax API Key" label is better than no label at all.
|
||||
//
|
||||
// SECRETS_WHEN_NO_TEMPLATE is the fallback set shown only when a
|
||||
// workspace's template doesn't declare any required_env (legacy /
|
||||
// bare-runtime case). In the normal flow the list is driven by
|
||||
// runtime_config.required_env passed in from the Config tab.
|
||||
const KNOWN_LABELS: Record<string, string> = {
|
||||
ANTHROPIC_API_KEY: "Anthropic API Key",
|
||||
OPENAI_API_KEY: "OpenAI API Key",
|
||||
GOOGLE_API_KEY: "Google AI API Key",
|
||||
SERP_API_KEY: "SERP API Key",
|
||||
OPENROUTER_API_KEY: "OpenRouter API Key",
|
||||
HERMES_API_KEY: "Hermes API Key (Nous Research)",
|
||||
GROQ_API_KEY: "Groq API Key",
|
||||
CEREBRAS_API_KEY: "Cerebras API Key",
|
||||
MINIMAX_API_KEY: "Minimax API Key",
|
||||
MODEL_PROVIDER: "Model Override (e.g. anthropic:claude-sonnet-4-6)",
|
||||
};
|
||||
|
||||
const SECRETS_WHEN_NO_TEMPLATE = [
|
||||
"ANTHROPIC_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"GOOGLE_API_KEY",
|
||||
"SERP_API_KEY",
|
||||
"MODEL_PROVIDER",
|
||||
];
|
||||
|
||||
// humanizeKeyName converts SCREAMING_SNAKE_CASE into "Title Case Words"
|
||||
// so templates that declare uncommon env var names still get a readable
|
||||
// label. "MINIMAX_API_KEY" → "Minimax API Key". Preserves "API" / "URL"
|
||||
// acronyms via the normalize step.
|
||||
function humanizeKeyName(key: string): string {
|
||||
const words = key.toLowerCase().split("_").filter(Boolean);
|
||||
return words
|
||||
.map((w) => {
|
||||
const upper = w.toUpperCase();
|
||||
// Keep common acronyms upper-case.
|
||||
if (["API", "URL", "URI", "ID", "SDK", "MCP", "LLM", "AI"].includes(upper)) {
|
||||
return upper;
|
||||
}
|
||||
return w.charAt(0).toUpperCase() + w.slice(1);
|
||||
})
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
function labelForKey(key: string): string {
|
||||
return KNOWN_LABELS[key] ?? humanizeKeyName(key);
|
||||
}
|
||||
|
||||
function ScopeBadge({ scope }: { scope: "global" | "workspace" | "override" }) {
|
||||
if (scope === "global") {
|
||||
return <span className="text-[8px] text-amber-400 bg-amber-900/30 px-1.5 py-0.5 rounded" title="Inherited from global secrets">Global</span>;
|
||||
@ -147,7 +192,7 @@ function CustomSecretRow({ secretKey, scope, globalMode, onSave, onDelete }: {
|
||||
);
|
||||
}
|
||||
|
||||
export function SecretsSection({ workspaceId }: { workspaceId: string }) {
|
||||
export function SecretsSection({ workspaceId, requiredEnv }: { workspaceId: string; requiredEnv?: string[] }) {
|
||||
const [mergedSecrets, setMergedSecrets] = useState<SecretEntry[]>([]);
|
||||
const [globalSecrets, setGlobalSecrets] = useState<SecretEntry[]>([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
@ -218,9 +263,27 @@ export function SecretsSection({ workspaceId }: { workspaceId: string }) {
|
||||
// For global view: use global secrets only
|
||||
const activeSecrets = globalMode ? globalSecrets : mergedSecrets;
|
||||
|
||||
// Split into common keys and custom keys
|
||||
const commonKeySet = new Set(COMMON_KEYS.map((c) => c.key));
|
||||
const customSecrets = activeSecrets.filter((s) => !commonKeySet.has(s.key));
|
||||
// Template-driven slots: render one labelled row per env var the
|
||||
// template declares. Falls back to a legacy common-keys list when
|
||||
// the template has nothing (older workspaces / bare runtimes) so
|
||||
// the Secrets section is never empty.
|
||||
const templateKeys = (requiredEnv && requiredEnv.length > 0)
|
||||
? requiredEnv
|
||||
: SECRETS_WHEN_NO_TEMPLATE;
|
||||
|
||||
// Deduplicate while preserving order — a template that lists the
|
||||
// same key twice shouldn't render two rows.
|
||||
const seen = new Set<string>();
|
||||
const slotKeys = templateKeys.filter((k) => {
|
||||
if (seen.has(k)) return false;
|
||||
seen.add(k);
|
||||
return true;
|
||||
});
|
||||
|
||||
// Split into template-slot keys and user-added custom keys so the
|
||||
// latter still surface even when not declared by the template.
|
||||
const slotKeySet = new Set(slotKeys);
|
||||
const customSecrets = activeSecrets.filter((s) => !slotKeySet.has(s.key));
|
||||
|
||||
return (
|
||||
<Section title="Secrets & API Keys" defaultOpen={false}>
|
||||
@ -256,15 +319,16 @@ export function SecretsSection({ workspaceId }: { workspaceId: string }) {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Common keys */}
|
||||
{COMMON_KEYS.map(({ key, label }) => {
|
||||
{/* Template-declared slots — one labelled row per env var
|
||||
the workspace actually needs. Driven by runtime_config.required_env. */}
|
||||
{slotKeys.map((key) => {
|
||||
const entry = globalMode
|
||||
? globalSecrets.find((s) => s.key === key)
|
||||
: mergedByKey.get(key);
|
||||
const isSet = !!entry?.has_value;
|
||||
const scope = globalMode ? undefined : (entry ? getScope(entry) : undefined);
|
||||
return (
|
||||
<SecretRow key={key} label={label} secretKey={key}
|
||||
<SecretRow key={key} label={labelForKey(key)} secretKey={key}
|
||||
isSet={isSet}
|
||||
scope={scope}
|
||||
globalMode={globalMode}
|
||||
|
||||
100
canvas/src/lib/__tests__/api-401.test.ts
Normal file
100
canvas/src/lib/__tests__/api-401.test.ts
Normal file
@ -0,0 +1,100 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
|
||||
// Dedicated file for the 401 → login-redirect tests because they need
|
||||
// `window.location.hostname` (jsdom), while the rest of api.test.ts
|
||||
// runs happily in node. Splitting keeps the node tests fast.
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 401 handling — gated on SaaS-tenant hostname
|
||||
// ---------------------------------------------------------------------------
|
||||
//
|
||||
// Before fix/quickstart-bugless, any 401 from any endpoint triggered
|
||||
// `redirectToLogin()`, navigating to `/cp/auth/login`. That route
|
||||
// exists only on SaaS (mounted by cp_proxy when CP_UPSTREAM_URL is
|
||||
// set). On localhost / self-hosted / Vercel preview it 404s, so the
|
||||
// user lands on a broken login page instead of seeing the actual error.
|
||||
//
|
||||
// These tests lock in:
|
||||
// - SaaS tenant hostname (*.moleculesai.app) → 401 still redirects.
|
||||
// - non-SaaS hostname (localhost, LAN IP, apex) → 401 throws, no
|
||||
// redirect, so the caller renders a real error affordance.
|
||||
|
||||
const mockFetch = vi.fn();
|
||||
globalThis.fetch = mockFetch;
|
||||
|
||||
function mockFailure(status: number, text: string) {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status,
|
||||
json: () => Promise.reject(new Error("no json")),
|
||||
text: () => Promise.resolve(text),
|
||||
} as unknown as Response);
|
||||
}
|
||||
|
||||
function setHostname(host: string) {
|
||||
Object.defineProperty(window, "location", {
|
||||
configurable: true,
|
||||
value: { ...window.location, hostname: host },
|
||||
});
|
||||
}
|
||||
|
||||
describe("api 401 handling", () => {
|
||||
let redirectSpy: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
vi.resetModules();
|
||||
redirectSpy = vi.fn();
|
||||
vi.doMock("../auth", () => ({
|
||||
redirectToLogin: redirectSpy,
|
||||
// Stub siblings so any other import of ../auth in the chain
|
||||
// (AuthGate, TermsGate, etc.) still resolves.
|
||||
fetchSession: vi.fn().mockResolvedValue(null),
|
||||
}));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.doUnmock("../auth");
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
it("redirects to login on SaaS tenant hostname", async () => {
|
||||
setHostname("acme.moleculesai.app");
|
||||
mockFailure(401, '{"error":"admin auth required"}');
|
||||
|
||||
const { api } = await import("../api");
|
||||
await expect(api.get("/workspaces")).rejects.toThrow(/Session expired/);
|
||||
expect(redirectSpy).toHaveBeenCalledWith("sign-in");
|
||||
});
|
||||
|
||||
it("does NOT redirect on localhost — throws a real error instead", async () => {
|
||||
setHostname("localhost");
|
||||
mockFailure(401, '{"error":"admin auth required"}');
|
||||
|
||||
const { api } = await import("../api");
|
||||
await expect(api.get("/workspaces")).rejects.toThrow(/401/);
|
||||
expect(redirectSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does NOT redirect on a LAN hostname", async () => {
|
||||
setHostname("192.168.1.74");
|
||||
mockFailure(401, '{"error":"missing workspace auth token"}');
|
||||
|
||||
const { api } = await import("../api");
|
||||
await expect(api.get("/workspaces/abc/activity")).rejects.toThrow(/401/);
|
||||
expect(redirectSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("does NOT redirect on reserved subdomains (app.moleculesai.app)", async () => {
|
||||
// `app` is in reservedSubdomains — getTenantSlug returns "" there.
|
||||
// Users landing on app.moleculesai.app (pre-tenant-selection) must
|
||||
// see the real 401 error rather than loop on login.
|
||||
setHostname("app.moleculesai.app");
|
||||
mockFailure(401, '{"error":"admin auth required"}');
|
||||
|
||||
const { api } = await import("../api");
|
||||
await expect(api.get("/workspaces")).rejects.toThrow(/401/);
|
||||
expect(redirectSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@ -1,121 +1,148 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
|
||||
// Mock fetch globally before importing the module
|
||||
global.fetch = vi.fn();
|
||||
|
||||
import {
|
||||
getRequiredKeys,
|
||||
findMissingKeys,
|
||||
getKeyLabel,
|
||||
checkDeploySecrets,
|
||||
RUNTIME_REQUIRED_KEYS,
|
||||
KEY_LABELS,
|
||||
providersFromTemplate,
|
||||
findSatisfiedProvider,
|
||||
getKeyLabel,
|
||||
getProviderLabel,
|
||||
type TemplateLike,
|
||||
type ModelSpec,
|
||||
} from "../deploy-preflight";
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
/* ---------- getRequiredKeys ---------- */
|
||||
// -----------------------------------------------------------------------------
|
||||
// Fixtures mirroring what the Go /templates endpoint returns from each
|
||||
// template repo's config.yaml. Keep these minimal — we only need the
|
||||
// fields the preflight reads.
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
describe("getRequiredKeys", () => {
|
||||
it("returns OPENAI_API_KEY for langgraph", () => {
|
||||
expect(getRequiredKeys("langgraph")).toEqual(["OPENAI_API_KEY"]);
|
||||
const hermesModels: ModelSpec[] = [
|
||||
{ id: "nousresearch/hermes-4-70b", name: "Hermes 4 70B", required_env: ["HERMES_API_KEY"] },
|
||||
{ id: "nousresearch/hermes-3-405b", name: "Hermes 3 405B", required_env: ["OPENROUTER_API_KEY"] },
|
||||
{ id: "anthropic/claude-opus", name: "Claude Opus", required_env: ["ANTHROPIC_API_KEY"] },
|
||||
{ id: "openai/gpt-5", name: "GPT-5 via OpenRouter", required_env: ["OPENROUTER_API_KEY"] },
|
||||
{ id: "custom/local", name: "Local endpoint", required_env: [] },
|
||||
];
|
||||
|
||||
const HERMES: TemplateLike = { runtime: "hermes", models: hermesModels };
|
||||
|
||||
const LANGGRAPH: TemplateLike = {
|
||||
runtime: "langgraph",
|
||||
required_env: ["OPENAI_API_KEY"],
|
||||
};
|
||||
|
||||
const UNKNOWN: TemplateLike = { runtime: "nothing-declared" };
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// providersFromTemplate
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
describe("providersFromTemplate", () => {
|
||||
it("groups hermes models by unique required_env tuples", () => {
|
||||
const providers = providersFromTemplate(HERMES);
|
||||
// Three distinct tuples: HERMES_API_KEY, OPENROUTER_API_KEY, ANTHROPIC_API_KEY.
|
||||
// The `custom/local` entry has required_env: [] and must be skipped.
|
||||
expect(providers.map((p) => p.id)).toEqual([
|
||||
"HERMES_API_KEY",
|
||||
"OPENROUTER_API_KEY",
|
||||
"ANTHROPIC_API_KEY",
|
||||
]);
|
||||
});
|
||||
|
||||
it("returns ANTHROPIC_API_KEY for claude-code", () => {
|
||||
expect(getRequiredKeys("claude-code")).toEqual(["ANTHROPIC_API_KEY"]);
|
||||
it("decorates labels with model counts when a provider serves multiple models", () => {
|
||||
const providers = providersFromTemplate(HERMES);
|
||||
const openrouter = providers.find((p) => p.id === "OPENROUTER_API_KEY");
|
||||
expect(openrouter?.label).toMatch(/\(2 models\)/);
|
||||
const hermes = providers.find((p) => p.id === "HERMES_API_KEY");
|
||||
expect(hermes?.label).not.toMatch(/\(\d+ models\)/);
|
||||
});
|
||||
|
||||
it("returns OPENAI_API_KEY for crewai", () => {
|
||||
expect(getRequiredKeys("crewai")).toEqual(["OPENAI_API_KEY"]);
|
||||
it("preserves insertion order so the template author controls defaults", () => {
|
||||
const providers = providersFromTemplate(HERMES);
|
||||
expect(providers[0].id).toBe("HERMES_API_KEY");
|
||||
});
|
||||
|
||||
it("returns OPENAI_API_KEY for autogen", () => {
|
||||
expect(getRequiredKeys("autogen")).toEqual(["OPENAI_API_KEY"]);
|
||||
it("falls back to top-level required_env when no models[] are declared", () => {
|
||||
const providers = providersFromTemplate(LANGGRAPH);
|
||||
expect(providers).toHaveLength(1);
|
||||
expect(providers[0].envVars).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
it("returns OPENAI_API_KEY for openclaw", () => {
|
||||
expect(getRequiredKeys("openclaw")).toEqual(["OPENAI_API_KEY"]);
|
||||
it("returns [] for templates declaring no env requirements", () => {
|
||||
expect(providersFromTemplate(UNKNOWN)).toEqual([]);
|
||||
});
|
||||
|
||||
it("returns OPENAI_API_KEY for deepagents", () => {
|
||||
expect(getRequiredKeys("deepagents")).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
it("returns empty array for unknown runtimes", () => {
|
||||
expect(getRequiredKeys("unknown-runtime")).toEqual([]);
|
||||
expect(getRequiredKeys("")).toEqual([]);
|
||||
it("supports multi-env providers (AND-semantics inside one option)", () => {
|
||||
const tmpl: TemplateLike = {
|
||||
runtime: "agent",
|
||||
models: [
|
||||
{ id: "m", required_env: ["OPENAI_API_KEY", "SERPER_API_KEY"] },
|
||||
],
|
||||
};
|
||||
const providers = providersFromTemplate(tmpl);
|
||||
expect(providers).toHaveLength(1);
|
||||
expect(providers[0].envVars).toEqual(["OPENAI_API_KEY", "SERPER_API_KEY"]);
|
||||
});
|
||||
});
|
||||
|
||||
/* ---------- findMissingKeys ---------- */
|
||||
// -----------------------------------------------------------------------------
|
||||
// findSatisfiedProvider
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
describe("findMissingKeys", () => {
|
||||
it("returns empty array when all keys are configured", () => {
|
||||
const configured = new Set(["OPENAI_API_KEY", "OTHER_KEY"]);
|
||||
expect(findMissingKeys("langgraph", configured)).toEqual([]);
|
||||
describe("findSatisfiedProvider", () => {
|
||||
it("returns the first provider whose envVars are all configured", () => {
|
||||
const providers = providersFromTemplate(HERMES);
|
||||
const satisfied = findSatisfiedProvider(
|
||||
providers,
|
||||
new Set(["ANTHROPIC_API_KEY"]),
|
||||
);
|
||||
expect(satisfied?.id).toBe("ANTHROPIC_API_KEY");
|
||||
});
|
||||
|
||||
it("returns missing keys when not configured", () => {
|
||||
const configured = new Set(["OTHER_KEY"]);
|
||||
expect(findMissingKeys("langgraph", configured)).toEqual(["OPENAI_API_KEY"]);
|
||||
it("returns null when no provider is fully configured", () => {
|
||||
const providers = providersFromTemplate(HERMES);
|
||||
expect(findSatisfiedProvider(providers, new Set())).toBeNull();
|
||||
});
|
||||
|
||||
it("returns empty array for runtime with no required keys", () => {
|
||||
const configured = new Set<string>();
|
||||
expect(findMissingKeys("unknown-runtime", configured)).toEqual([]);
|
||||
});
|
||||
|
||||
it("returns all required keys when nothing is configured", () => {
|
||||
const configured = new Set<string>();
|
||||
expect(findMissingKeys("claude-code", configured)).toEqual(["ANTHROPIC_API_KEY"]);
|
||||
});
|
||||
|
||||
it("handles empty configured set for multi-key runtimes", () => {
|
||||
const configured = new Set<string>();
|
||||
const result = findMissingKeys("langgraph", configured);
|
||||
expect(result).toEqual(["OPENAI_API_KEY"]);
|
||||
it("requires ALL envVars in a multi-env provider", () => {
|
||||
const providers: ReturnType<typeof providersFromTemplate> =
|
||||
providersFromTemplate({
|
||||
runtime: "agent",
|
||||
models: [{ id: "m", required_env: ["A", "B"] }],
|
||||
});
|
||||
expect(findSatisfiedProvider(providers, new Set(["A"]))).toBeNull();
|
||||
expect(findSatisfiedProvider(providers, new Set(["A", "B"]))?.id).toBe("A|B");
|
||||
});
|
||||
});
|
||||
|
||||
/* ---------- getKeyLabel ---------- */
|
||||
// -----------------------------------------------------------------------------
|
||||
// Label helpers
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
describe("getKeyLabel", () => {
|
||||
it("returns label for known keys", () => {
|
||||
describe("getKeyLabel / getProviderLabel", () => {
|
||||
it("uses KEY_LABELS for well-known keys", () => {
|
||||
expect(getProviderLabel("OPENAI_API_KEY")).toBe("OpenAI");
|
||||
expect(getKeyLabel("OPENAI_API_KEY")).toBe("OpenAI API Key");
|
||||
expect(getKeyLabel("ANTHROPIC_API_KEY")).toBe("Anthropic API Key");
|
||||
});
|
||||
|
||||
it("returns the key itself for unknown keys", () => {
|
||||
expect(getKeyLabel("CUSTOM_SECRET")).toBe("CUSTOM_SECRET");
|
||||
it("humanizes unknown env vars", () => {
|
||||
expect(getProviderLabel("MY_CUSTOM_API_KEY")).toBe("My Custom");
|
||||
expect(getKeyLabel("MY_CUSTOM_TOKEN")).toBe("My Custom");
|
||||
});
|
||||
});
|
||||
|
||||
/* ---------- RUNTIME_REQUIRED_KEYS ---------- */
|
||||
|
||||
describe("RUNTIME_REQUIRED_KEYS", () => {
|
||||
it("covers all six standard runtimes", () => {
|
||||
const runtimes = Object.keys(RUNTIME_REQUIRED_KEYS);
|
||||
expect(runtimes).toContain("langgraph");
|
||||
expect(runtimes).toContain("claude-code");
|
||||
expect(runtimes).toContain("openclaw");
|
||||
expect(runtimes).toContain("deepagents");
|
||||
expect(runtimes).toContain("crewai");
|
||||
expect(runtimes).toContain("autogen");
|
||||
});
|
||||
|
||||
it("each runtime has at least one required key", () => {
|
||||
for (const [runtime, keys] of Object.entries(RUNTIME_REQUIRED_KEYS)) {
|
||||
expect(keys.length).toBeGreaterThan(0);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
/* ---------- checkDeploySecrets ---------- */
|
||||
// -----------------------------------------------------------------------------
|
||||
// checkDeploySecrets
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
describe("checkDeploySecrets", () => {
|
||||
it("returns ok=true when all required keys have values", async () => {
|
||||
it("returns ok=true when a single-provider template's key is configured", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
@ -124,49 +151,13 @@ describe("checkDeploySecrets", () => {
|
||||
]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets("langgraph");
|
||||
const result = await checkDeploySecrets(LANGGRAPH);
|
||||
expect(result.ok).toBe(true);
|
||||
expect(result.missingKeys).toEqual([]);
|
||||
expect(result.runtime).toBe("langgraph");
|
||||
});
|
||||
|
||||
it("returns ok=false when required keys are missing", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve([
|
||||
{ key: "OTHER_KEY", has_value: true, created_at: "", updated_at: "" },
|
||||
]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets("langgraph");
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.missingKeys).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
it("returns ok=false when secret exists but has_value is false", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve([
|
||||
{ key: "OPENAI_API_KEY", has_value: false, created_at: "", updated_at: "" },
|
||||
]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets("langgraph");
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.missingKeys).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
it("returns ok=true for runtimes with no required keys", async () => {
|
||||
const result = await checkDeploySecrets("unknown-runtime");
|
||||
expect(result.ok).toBe(true);
|
||||
expect(result.missingKeys).toEqual([]);
|
||||
// Should not have called fetch
|
||||
expect(global.fetch).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("uses workspace-specific endpoint when workspaceId is provided", async () => {
|
||||
it("returns ok=true on a multi-provider template when ANY provider is configured", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
@ -175,33 +166,82 @@ describe("checkDeploySecrets", () => {
|
||||
]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets("claude-code", "ws-123");
|
||||
const result = await checkDeploySecrets(HERMES);
|
||||
expect(result.ok).toBe(true);
|
||||
});
|
||||
|
||||
it("returns ok=false with every candidate env when nothing is configured", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve([]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets(HERMES);
|
||||
expect(result.ok).toBe(false);
|
||||
// De-duplicated flat list across providers.
|
||||
expect(new Set(result.missingKeys)).toEqual(
|
||||
new Set(["HERMES_API_KEY", "OPENROUTER_API_KEY", "ANTHROPIC_API_KEY"]),
|
||||
);
|
||||
// Grouped providers preserved for the picker.
|
||||
expect(result.providers).toHaveLength(3);
|
||||
});
|
||||
|
||||
it("treats has_value=false as not-configured", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve([
|
||||
{ key: "OPENAI_API_KEY", has_value: false, created_at: "", updated_at: "" },
|
||||
]),
|
||||
} as Response);
|
||||
|
||||
const result = await checkDeploySecrets(LANGGRAPH);
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.missingKeys).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
it("skips the API call entirely when the template declares no env needs", async () => {
|
||||
const result = await checkDeploySecrets(UNKNOWN);
|
||||
expect(result.ok).toBe(true);
|
||||
expect(result.missingKeys).toEqual([]);
|
||||
expect(global.fetch).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("uses the workspace-scoped endpoint when workspaceId is provided", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () =>
|
||||
Promise.resolve([
|
||||
{ key: "OPENAI_API_KEY", has_value: true, created_at: "", updated_at: "" },
|
||||
]),
|
||||
} as Response);
|
||||
|
||||
await checkDeploySecrets(LANGGRAPH, "ws-123");
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
expect.stringContaining("/workspaces/ws-123/secrets"),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it("uses global secrets endpoint when no workspaceId", async () => {
|
||||
it("uses the global secrets endpoint when no workspaceId", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve([]),
|
||||
} as Response);
|
||||
|
||||
await checkDeploySecrets("langgraph");
|
||||
await checkDeploySecrets(LANGGRAPH);
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
expect.stringContaining("/settings/secrets"),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it("treats API failure as all keys missing (safe default)", async () => {
|
||||
it("treats fetch failure as all-missing (safe default prompts the user)", async () => {
|
||||
(global.fetch as ReturnType<typeof vi.fn>).mockRejectedValueOnce(
|
||||
new Error("Network error"),
|
||||
);
|
||||
|
||||
const result = await checkDeploySecrets("langgraph");
|
||||
const result = await checkDeploySecrets(LANGGRAPH);
|
||||
expect(result.ok).toBe(false);
|
||||
expect(result.missingKeys).toEqual(["OPENAI_API_KEY"]);
|
||||
});
|
||||
|
||||
85
canvas/src/lib/__tests__/ws-close.test.ts
Normal file
85
canvas/src/lib/__tests__/ws-close.test.ts
Normal file
@ -0,0 +1,85 @@
|
||||
// @vitest-environment jsdom
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import { closeWebSocketGracefully } from "../ws-close";
|
||||
|
||||
// Minimal test-double for WebSocket. jsdom doesn't ship a
|
||||
// spec-compliant WebSocket, so we roll our own with just the bits the
|
||||
// helper touches: readyState, close(), addEventListener("open") /
|
||||
// ("error"). This lets us verify the graceful-close semantics without
|
||||
// a live server.
|
||||
function makeFakeWS(initialState: number) {
|
||||
const listeners: Record<string, Array<() => void>> = {};
|
||||
const ws = {
|
||||
readyState: initialState,
|
||||
close: vi.fn(),
|
||||
addEventListener: vi.fn(
|
||||
(type: string, handler: () => void, _opts?: { once?: boolean }) => {
|
||||
(listeners[type] ??= []).push(handler);
|
||||
},
|
||||
),
|
||||
removeEventListener: vi.fn(
|
||||
(type: string, handler: () => void) => {
|
||||
const arr = listeners[type];
|
||||
if (!arr) return;
|
||||
const idx = arr.indexOf(handler);
|
||||
if (idx >= 0) arr.splice(idx, 1);
|
||||
},
|
||||
),
|
||||
// Helpers for tests to fire the queued listeners.
|
||||
fire(type: string) {
|
||||
(listeners[type] ?? []).slice().forEach((h) => h());
|
||||
},
|
||||
};
|
||||
return ws as unknown as WebSocket & { fire(type: string): void };
|
||||
}
|
||||
|
||||
describe("closeWebSocketGracefully", () => {
|
||||
it("calls close() immediately when the socket is OPEN", () => {
|
||||
const ws = makeFakeWS(WebSocket.OPEN);
|
||||
closeWebSocketGracefully(ws);
|
||||
expect(ws.close).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("calls close() immediately when the socket is CLOSING", () => {
|
||||
const ws = makeFakeWS(WebSocket.CLOSING);
|
||||
closeWebSocketGracefully(ws);
|
||||
expect(ws.close).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("is a no-op when the socket is already CLOSED", () => {
|
||||
const ws = makeFakeWS(WebSocket.CLOSED);
|
||||
closeWebSocketGracefully(ws);
|
||||
expect(ws.close).not.toHaveBeenCalled();
|
||||
expect(ws.addEventListener).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("defers close until 'open' when the socket is CONNECTING", () => {
|
||||
const ws = makeFakeWS(WebSocket.CONNECTING);
|
||||
closeWebSocketGracefully(ws);
|
||||
|
||||
// close() NOT called yet — handshake hasn't completed.
|
||||
expect(ws.close).not.toHaveBeenCalled();
|
||||
// Two listeners queued: one for 'open' (close on connect), one
|
||||
// for 'error' (cancel the queued close if handshake fails).
|
||||
expect(ws.addEventListener).toHaveBeenCalledWith(
|
||||
"open", expect.any(Function), { once: true },
|
||||
);
|
||||
expect(ws.addEventListener).toHaveBeenCalledWith(
|
||||
"error", expect.any(Function), { once: true },
|
||||
);
|
||||
|
||||
// Simulate the handshake completing — close() should fire now.
|
||||
(ws as unknown as { fire: (t: string) => void }).fire("open");
|
||||
expect(ws.close).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("does NOT call close() when the CONNECTING socket errors instead of opening", () => {
|
||||
const ws = makeFakeWS(WebSocket.CONNECTING);
|
||||
closeWebSocketGracefully(ws);
|
||||
|
||||
// Simulate handshake failure — the browser has already torn the
|
||||
// socket down, no explicit close() needed.
|
||||
(ws as unknown as { fire: (t: string) => void }).fire("error");
|
||||
expect(ws.close).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
@ -39,11 +39,20 @@ async function request<T>(
|
||||
signal: AbortSignal.timeout(DEFAULT_TIMEOUT_MS),
|
||||
});
|
||||
if (res.status === 401) {
|
||||
// Session expired or credentials lost — redirect to login once.
|
||||
// Import dynamically to avoid circular dependency with auth.ts.
|
||||
const { redirectToLogin } = await import("./auth");
|
||||
redirectToLogin("sign-in");
|
||||
throw new Error("Session expired — redirecting to login");
|
||||
// Session expired or credentials lost. On SaaS (tenant subdomain)
|
||||
// the login page lives at /cp/auth/login and is mounted by the
|
||||
// control-plane reverse proxy — redirect. On self-hosted / local
|
||||
// dev / Vercel preview there IS no /cp/* mount, so redirecting
|
||||
// would navigate to a 404 ("404 page not found") instead of the
|
||||
// real error the user should see. In that case, throw instead
|
||||
// and let the caller render a meaningful failure (retry button,
|
||||
// error banner, etc.).
|
||||
if (slug) {
|
||||
const { redirectToLogin } = await import("./auth");
|
||||
redirectToLogin("sign-in");
|
||||
throw new Error("Session expired — redirecting to login");
|
||||
}
|
||||
throw new Error(`API ${method} ${path}: 401 ${await res.text()}`);
|
||||
}
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
|
||||
@ -1,38 +1,37 @@
|
||||
/**
|
||||
* Pre-deploy secret check per runtime.
|
||||
* Pre-deploy secret check driven by the template's config.yaml.
|
||||
*
|
||||
* Before a workspace is deployed, validates that all required secrets/env vars
|
||||
* are configured for the target runtime. Each runtime defines its own set of
|
||||
* required keys (derived from each runtime's config.yaml `env.required` field).
|
||||
* The single source of truth for which env vars a workspace needs is
|
||||
* each template repo's config.yaml — the `runtime_config.models[].required_env`
|
||||
* array names the key(s) required per model, and `runtime_config.required_env`
|
||||
* names any AND-required keys at the runtime level. The Go `/templates`
|
||||
* handler parses these and exposes them as `models` and `required_env` on
|
||||
* each template summary.
|
||||
*
|
||||
* This module consumes that shape; it does NOT hardcode a per-runtime
|
||||
* provider table. When a template declares alternative models (e.g.
|
||||
* Hermes supports 35 models across 8 providers), the unique required_env
|
||||
* tuples become the provider options shown in the picker modal.
|
||||
*/
|
||||
|
||||
import { api } from "./api";
|
||||
|
||||
/* ---------- Required keys per runtime ---------- */
|
||||
/* ---------- Types matching the /templates response ---------- */
|
||||
|
||||
export const RUNTIME_REQUIRED_KEYS: Record<string, string[]> = {
|
||||
langgraph: ["OPENAI_API_KEY"],
|
||||
"claude-code": ["ANTHROPIC_API_KEY"],
|
||||
openclaw: ["OPENAI_API_KEY"],
|
||||
deepagents: ["OPENAI_API_KEY"],
|
||||
crewai: ["OPENAI_API_KEY"],
|
||||
autogen: ["OPENAI_API_KEY"],
|
||||
hermes: ["OPENROUTER_API_KEY"],
|
||||
"gemini-cli": ["GOOGLE_API_KEY"],
|
||||
};
|
||||
export interface ModelSpec {
|
||||
id: string;
|
||||
name?: string;
|
||||
required_env?: string[];
|
||||
}
|
||||
|
||||
/** Human-readable labels for common secret keys */
|
||||
export const KEY_LABELS: Record<string, string> = {
|
||||
OPENAI_API_KEY: "OpenAI API Key",
|
||||
ANTHROPIC_API_KEY: "Anthropic API Key",
|
||||
GOOGLE_API_KEY: "Google AI API Key",
|
||||
SERP_API_KEY: "SERP API Key",
|
||||
OPENROUTER_API_KEY: "OpenRouter API Key",
|
||||
HERMES_API_KEY: "Nous Research API Key",
|
||||
DEEPSEEK_API_KEY: "DeepSeek API Key",
|
||||
};
|
||||
|
||||
/* ---------- Types ---------- */
|
||||
/** Minimal template shape consumed by the preflight check. Any object
|
||||
* that matches this subset of the `/templates` response works. */
|
||||
export interface TemplateLike {
|
||||
runtime: string;
|
||||
models?: ModelSpec[];
|
||||
/** AND-required env vars declared at runtime_config level. */
|
||||
required_env?: string[];
|
||||
}
|
||||
|
||||
export interface SecretEntry {
|
||||
key: string;
|
||||
@ -44,63 +43,184 @@ export interface SecretEntry {
|
||||
|
||||
export interface PreflightResult {
|
||||
ok: boolean;
|
||||
/** Flat list of env var names needed — for the legacy modal path and
|
||||
* for callers that want a single display of "what's missing". */
|
||||
missingKeys: string[];
|
||||
/** Grouped provider options derived from the template. When length ≥ 2
|
||||
* the modal renders a picker; length 1 means exactly one provider is
|
||||
* required (AllKeysModal renders the N envVars inline). */
|
||||
providers: ProviderChoice[];
|
||||
runtime: string;
|
||||
}
|
||||
|
||||
/* ---------- Pure helpers (easily testable) ---------- */
|
||||
/* ---------- Provider options ---------- */
|
||||
|
||||
/** Get required env keys for a given runtime. Returns empty array for unknown runtimes. */
|
||||
export function getRequiredKeys(runtime: string): string[] {
|
||||
return RUNTIME_REQUIRED_KEYS[runtime] ?? [];
|
||||
/** One row in the provider picker. `envVars` is the set of keys required
|
||||
* TOGETHER to satisfy this option (usually length 1 — e.g. just
|
||||
* OPENROUTER_API_KEY). When length ≥ 2 all must be saved. */
|
||||
export interface ProviderChoice {
|
||||
/** Stable id for React keys + picker value — the sorted envVars joined. */
|
||||
id: string;
|
||||
/** Human label, e.g. "OpenRouter" or "OpenAI + Serper". */
|
||||
label: string;
|
||||
/** Env vars required for this provider option. */
|
||||
envVars: string[];
|
||||
/** Short rationale shown under the option, optional. */
|
||||
note?: string;
|
||||
}
|
||||
|
||||
/** Given a runtime and a set of configured key names, return which keys are missing. */
|
||||
export function findMissingKeys(
|
||||
runtime: string,
|
||||
configuredKeys: Set<string>,
|
||||
): string[] {
|
||||
return getRequiredKeys(runtime).filter((k) => !configuredKeys.has(k));
|
||||
}
|
||||
/** Human-readable labels for well-known secret keys. Anything not in
|
||||
* this table falls back to a humanized form of the env var. */
|
||||
export const KEY_LABELS: Record<string, string> = {
|
||||
OPENAI_API_KEY: "OpenAI",
|
||||
ANTHROPIC_API_KEY: "Anthropic",
|
||||
GOOGLE_API_KEY: "Google AI",
|
||||
GEMINI_API_KEY: "Google Gemini",
|
||||
SERP_API_KEY: "SERP",
|
||||
SERPER_API_KEY: "Serper",
|
||||
OPENROUTER_API_KEY: "OpenRouter",
|
||||
HERMES_API_KEY: "Nous Research (Hermes native)",
|
||||
DEEPSEEK_API_KEY: "DeepSeek",
|
||||
GLM_API_KEY: "z.ai GLM",
|
||||
KIMI_API_KEY: "Moonshot Kimi",
|
||||
MINIMAX_API_KEY: "MiniMax",
|
||||
KILOCODE_API_KEY: "Kilo Code",
|
||||
CLAUDE_CODE_OAUTH_TOKEN: "Claude Code subscription",
|
||||
};
|
||||
|
||||
/** Get human-readable label for a key, or fall back to the key itself. */
|
||||
/** Full "API Key" label used for input field headers. */
|
||||
export function getKeyLabel(key: string): string {
|
||||
return KEY_LABELS[key] ?? key;
|
||||
const base = KEY_LABELS[key];
|
||||
if (base) return `${base} API Key`;
|
||||
return humanizeEnvVar(key);
|
||||
}
|
||||
|
||||
/* ---------- API-calling preflight check ---------- */
|
||||
/** Short provider name used in the picker (no trailing "API Key"). */
|
||||
export function getProviderLabel(key: string): string {
|
||||
return KEY_LABELS[key] ?? humanizeEnvVar(key);
|
||||
}
|
||||
|
||||
function humanizeEnvVar(key: string): string {
|
||||
return key
|
||||
.replace(/_API_KEY$|_TOKEN$|_KEY$/i, "")
|
||||
.split(/[_-]/)
|
||||
.filter(Boolean)
|
||||
.map((w) => w.charAt(0).toUpperCase() + w.slice(1).toLowerCase())
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch configured secrets from the platform and check whether all required
|
||||
* keys for the target runtime are present.
|
||||
* Derive the provider options for a template from its declared shape.
|
||||
*
|
||||
* If `workspaceId` is provided, fetches the merged (global + workspace) secret
|
||||
* list for that workspace. Otherwise falls back to global secrets only.
|
||||
* 1. `models[].required_env` — each unique (sorted) tuple becomes a
|
||||
* provider option. E.g. Hermes exposes 8 options (Nous, OpenRouter,
|
||||
* Anthropic, Gemini, DeepSeek, GLM, Kimi, Kilocode) even though it
|
||||
* lists 35 models. Insertion order is preserved so the template's
|
||||
* author controls which provider is offered first.
|
||||
* 2. If `models` is empty or has no required_env, fall back to the
|
||||
* top-level `required_env` as a single all-required option.
|
||||
* 3. If neither is declared, return [] — no preflight needed.
|
||||
*
|
||||
* Models with `required_env: []` (local / self-hosted endpoints) are
|
||||
* skipped when computing options; they never block a deploy.
|
||||
*/
|
||||
export async function checkDeploySecrets(
|
||||
runtime: string,
|
||||
workspaceId?: string,
|
||||
): Promise<PreflightResult> {
|
||||
const requiredKeys = getRequiredKeys(runtime);
|
||||
if (requiredKeys.length === 0) {
|
||||
return { ok: true, missingKeys: [], runtime };
|
||||
export function providersFromTemplate(template: TemplateLike): ProviderChoice[] {
|
||||
const out: ProviderChoice[] = [];
|
||||
const seen = new Set<string>();
|
||||
const modelCount: Record<string, number> = {};
|
||||
|
||||
for (const m of template.models ?? []) {
|
||||
const envs = m.required_env ?? [];
|
||||
if (envs.length === 0) continue;
|
||||
const id = [...envs].sort().join("|");
|
||||
modelCount[id] = (modelCount[id] ?? 0) + 1;
|
||||
if (seen.has(id)) continue;
|
||||
seen.add(id);
|
||||
out.push({
|
||||
id,
|
||||
envVars: envs,
|
||||
label: envs.map(getProviderLabel).join(" + "),
|
||||
});
|
||||
}
|
||||
|
||||
// Decorate labels with model-count hints when multiple models share
|
||||
// the same provider. Gives the user context: "OpenRouter (14 models)".
|
||||
for (const p of out) {
|
||||
const n = modelCount[p.id];
|
||||
if (n && n > 1) p.label = `${p.label} (${n} models)`;
|
||||
}
|
||||
|
||||
if (out.length === 0 && template.required_env?.length) {
|
||||
const envs = template.required_env;
|
||||
out.push({
|
||||
id: [...envs].sort().join("|"),
|
||||
envVars: envs,
|
||||
label: envs.map(getProviderLabel).join(" + "),
|
||||
});
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
/** Helper: is any single provider option already satisfied by the set of
|
||||
* configured keys? A provider is satisfied when EVERY envVar it requires
|
||||
* is present. Returns the first such option or null. */
|
||||
export function findSatisfiedProvider(
|
||||
providers: ProviderChoice[],
|
||||
configured: Set<string>,
|
||||
): ProviderChoice | null {
|
||||
for (const p of providers) {
|
||||
if (p.envVars.every((k) => configured.has(k))) return p;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/* ---------- Preflight ---------- */
|
||||
|
||||
/**
|
||||
* Fetch configured secrets from the platform and decide whether the
|
||||
* workspace can deploy. When `workspaceId` is provided the merged
|
||||
* (global + workspace) secrets are checked; otherwise only globals.
|
||||
*
|
||||
* Returns `ok=true` immediately if any provider option's env vars are
|
||||
* already configured. Otherwise returns all candidate env vars flat in
|
||||
* `missingKeys` plus the grouped `providers` list for the picker.
|
||||
*/
|
||||
export async function checkDeploySecrets(
|
||||
template: TemplateLike,
|
||||
workspaceId?: string,
|
||||
): Promise<PreflightResult> {
|
||||
const providers = providersFromTemplate(template);
|
||||
const runtime = template.runtime;
|
||||
|
||||
if (providers.length === 0) {
|
||||
// Template declares no env requirements — nothing to preflight.
|
||||
return { ok: true, missingKeys: [], providers: [], runtime };
|
||||
}
|
||||
|
||||
let configured: Set<string>;
|
||||
try {
|
||||
const secrets = workspaceId
|
||||
? await api.get<SecretEntry[]>(`/workspaces/${workspaceId}/secrets`)
|
||||
: await api.get<SecretEntry[]>("/settings/secrets");
|
||||
|
||||
const configuredKeys = new Set(
|
||||
secrets.filter((s) => s.has_value).map((s) => s.key),
|
||||
);
|
||||
|
||||
const missingKeys = findMissingKeys(runtime, configuredKeys);
|
||||
return { ok: missingKeys.length === 0, missingKeys, runtime };
|
||||
configured = new Set(secrets.filter((s) => s.has_value).map((s) => s.key));
|
||||
} catch (error) {
|
||||
// Log the error before falling back — aids debugging when the API is down.
|
||||
console.error("[deploy-preflight] Failed to check secrets, assuming all missing:", error);
|
||||
// If we can't reach the secrets API, assume missing — safer to prompt the user.
|
||||
return { ok: false, missingKeys: requiredKeys, runtime };
|
||||
console.error(
|
||||
"[deploy-preflight] Failed to read secrets, assuming all missing:",
|
||||
error,
|
||||
);
|
||||
// Safer to prompt the user than to silently deploy.
|
||||
configured = new Set();
|
||||
}
|
||||
|
||||
if (findSatisfiedProvider(providers, configured)) {
|
||||
return { ok: true, missingKeys: [], providers, runtime };
|
||||
}
|
||||
|
||||
// Nothing configured — surface every candidate env var so the modal
|
||||
// can render the picker or the all-keys fallback.
|
||||
const missingKeys = Array.from(
|
||||
new Set(providers.flatMap((p) => p.envVars)),
|
||||
);
|
||||
return { ok: false, missingKeys, providers, runtime };
|
||||
}
|
||||
|
||||
38
canvas/src/lib/ws-close.ts
Normal file
38
canvas/src/lib/ws-close.ts
Normal file
@ -0,0 +1,38 @@
|
||||
/**
|
||||
* closeWebSocketGracefully closes a WebSocket without tripping the
|
||||
* browser console warning "WebSocket is closed before the connection is
|
||||
* established". That warning fires when `ws.close()` runs while
|
||||
* readyState is still CONNECTING (0) — most often triggered by React
|
||||
* StrictMode's double-invoked useEffect in dev, or any rapid
|
||||
* mount/unmount (tab switch, route change) during the WS handshake.
|
||||
*
|
||||
* Behaviour by state:
|
||||
* - OPEN / CLOSING: close immediately (the normal path).
|
||||
* - CONNECTING: defer the close until 'open' fires, so the
|
||||
* browser sees a full handshake before the shutdown.
|
||||
* - CLOSED: no-op.
|
||||
*
|
||||
* Returns the ws unchanged for chaining.
|
||||
*/
|
||||
export function closeWebSocketGracefully(ws: WebSocket): WebSocket {
|
||||
const state = ws.readyState;
|
||||
if (state === WebSocket.OPEN || state === WebSocket.CLOSING) {
|
||||
ws.close();
|
||||
return ws;
|
||||
}
|
||||
if (state === WebSocket.CONNECTING) {
|
||||
const onOpen = () => {
|
||||
ws.close();
|
||||
};
|
||||
ws.addEventListener("open", onOpen, { once: true });
|
||||
// Also wire an error listener — if the handshake fails we don't
|
||||
// need to close (the browser already tore it down) and we should
|
||||
// clear the queued onOpen handler.
|
||||
ws.addEventListener(
|
||||
"error",
|
||||
() => ws.removeEventListener("open", onOpen),
|
||||
{ once: true },
|
||||
);
|
||||
}
|
||||
return ws;
|
||||
}
|
||||
@ -269,7 +269,7 @@ describe("applyEvent", () => {
|
||||
makeMsg({
|
||||
event: "WORKSPACE_PROVISIONING",
|
||||
workspace_id: "ws-new",
|
||||
payload: { name: "Fresh", tier: 2 },
|
||||
payload: { name: "Fresh", tier: 2, runtime: "hermes" },
|
||||
})
|
||||
);
|
||||
|
||||
@ -281,6 +281,9 @@ describe("applyEvent", () => {
|
||||
expect(newNode.data.name).toBe("Fresh");
|
||||
expect(newNode.data.tier).toBe(2);
|
||||
expect(newNode.data.status).toBe("provisioning");
|
||||
// Runtime must flow through the provisioning event so the side-panel
|
||||
// pill renders the real runtime instead of "unknown" until a refetch.
|
||||
expect(newNode.data.runtime).toBe("hermes");
|
||||
// Position is offset by existing node count * 40
|
||||
expect(newNode.position.x).toBeGreaterThanOrEqual(0);
|
||||
expect(newNode.position.y).toBeGreaterThanOrEqual(0);
|
||||
|
||||
@ -145,6 +145,7 @@ export function handleCanvasEvent(
|
||||
url: "",
|
||||
parentId: null,
|
||||
currentTask: "",
|
||||
runtime: (msg.payload.runtime as string) ?? "",
|
||||
needsRestart: false,
|
||||
},
|
||||
},
|
||||
|
||||
@ -51,6 +51,18 @@ interface CanvasState {
|
||||
panelTab: PanelTab;
|
||||
dragOverNodeId: string | null;
|
||||
contextMenu: ContextMenuState | null;
|
||||
// Live width of the SidePanel in pixels. Only meaningful when
|
||||
// selectedNodeId is non-null (panel visible). The Toolbar reads this
|
||||
// to stay centred on the remaining canvas area instead of the full
|
||||
// viewport, so the "Audit" / "Search" / "Settings" buttons don't get
|
||||
// hidden behind the panel when a workspace is selected.
|
||||
sidePanelWidth: number;
|
||||
setSidePanelWidth: (w: number) => void;
|
||||
// Whether the TemplatePalette left-drawer is open. Consumed by the
|
||||
// Legend so it can shift right and avoid being hidden under the
|
||||
// palette. Set by TemplatePalette's toggle button.
|
||||
templatePaletteOpen: boolean;
|
||||
setTemplatePaletteOpen: (open: boolean) => void;
|
||||
hydrate: (workspaces: WorkspaceData[]) => void;
|
||||
applyEvent: (msg: WSMessage) => void;
|
||||
onNodesChange: (changes: NodeChange<Node<WorkspaceNodeData>>[]) => void;
|
||||
@ -115,6 +127,10 @@ export const useCanvasStore = create<CanvasState>((set, get) => ({
|
||||
panelTab: "chat",
|
||||
dragOverNodeId: null,
|
||||
contextMenu: null,
|
||||
sidePanelWidth: 480, // matches SIDEPANEL_DEFAULT_WIDTH in SidePanel.tsx
|
||||
setSidePanelWidth: (w) => set({ sidePanelWidth: w }),
|
||||
templatePaletteOpen: false,
|
||||
setTemplatePaletteOpen: (open) => set({ templatePaletteOpen: open }),
|
||||
// Batch selection
|
||||
selectedNodeIds: new Set<string>(),
|
||||
toggleNodeSelection: (id) => {
|
||||
|
||||
@ -1 +0,0 @@
|
||||
{"body": "## Demo Complete \u2014 #1172 AGENTS.md Auto-Generation\n\nAll acceptance criteria met \u2705\n\n### What was built\n\nA working demo + screencast spec for the AAIF / Linux Foundation AGENTS.md standard.\n\n**Demo files:**\n- `marketing/demos/agents-md-auto-generation/README.md` \u2014 full working demo with 4 walkthrough scenarios\n- `marketing/demos/agents-md-auto-generation/narration.mp3` \u2014 30s TTS narration (en-US-AriaNeural)\n\n**Screencast outline (1 min):**\n1. Canvas: pm-agent + researcher online\n2. Terminal: researcher reads PM's AGENTS.md via platform files API\n3. AGENTS.md output \u2014 role, A2A endpoint, tools\n4. Researcher dispatches A2A task to PM using discovered endpoint\n5. Canvas shows both active \u2014 close on \"agents that can read each other\"\n\n### Repo link\n\n`workspace/agents_md.py` on `molecule-core` main\nDirect: `workspace/agents_md.py`\n\n### TTS narration script (30s)\n\n> When a PM agent starts up in Molecule AI, it generates an AGENTS.md file automatically \u2014 not manually written, not kept in sync by hand. It reflects the workspace config in real time. Any other agent can read it to discover what the PM does, how to reach it, and what tools it has. No system prompts, no guessing. Just the facts. That's the AAIF standard in action: agents that can read each other without human intervention. AGENTS.md auto-generation, from Molecule AI workspace.\n\n### Note\n\nPush pending on GH_TOKEN refresh \u2014 all files are on the `content/blog/memory-backup-restore` branch and ready.\n"}
|
||||
@ -1 +0,0 @@
|
||||
{"body": "## Demo Complete \u2014 #1173 Cloudflare Artifacts Integration\n\nAll acceptance criteria met \u2705\n\n### What was built\n\nA working demo + screencast spec showing workspace snapshot storage and forking via Cloudflare Artifacts.\n\n**Demo files:**\n- `marketing/demos/cloudflare-artifacts/README.md` \u2014 full working demo with 5 walkthrough scenarios\n- `marketing/demos/cloudflare-artifacts/narration.mp3` \u2014 30s TTS narration (en-US-AriaNeural)\n\n**Screencast outline (1 min):**\n1. Canvas: workspace online\n2. Terminal: `POST /workspaces/:id/artifacts` \u2014 repo created, remote URL returned\n3. Mint git credential via `POST /workspaces/:id/artifacts/token` \u2014 `clone_url` shown\n4. `git clone` runs, agent writes snapshot, `git push` \u2014 push succeeds\n5. Fork call: `POST /workspaces/:id/artifacts/fork` \u2014 new repo created in CF Artifacts\n6. Close on \"versioned agent state, built into the platform\"\n\n### Repo link\n\n`workspace-server/internal/handlers/artifacts.go` on `molecule-core` main\nDirect: `workspace-server/internal/handlers/artifacts.go`\n\n### TTS narration script (30s)\n\n> Cloudflare Artifacts turns your Molecule AI workspace into a versioned git repository. Attach a repo, mint a short-lived credential, and the agent can push snapshots \u2014 memory dumps, task state, config \u2014 and other agents can fork the history to bootstrap from the same point. No external git service configuration. No separate dashboard. The platform manages the credential lifecycle and the repo link. Versioned agent state, built into the platform. That's the first-mover advantage: Git for agents, from Molecule AI.\n\n### Note\n\nPush pending on GH_TOKEN refresh \u2014 all files are on the `content/blog/memory-backup-restore` branch and ready.\n"}
|
||||
88
docs/internal-content-policy.md
Normal file
88
docs/internal-content-policy.md
Normal file
@ -0,0 +1,88 @@
|
||||
# Internal content policy
|
||||
|
||||
The `Molecule-AI/molecule-monorepo` repo is **public**. Anything internal
|
||||
(positioning, competitive briefs, sales playbooks, PMM/press drip, draft
|
||||
campaigns, raw research notes, ops runbooks, retrospectives) lives in
|
||||
**`Molecule-AI/internal`**.
|
||||
|
||||
This page is the canonical decision tree.
|
||||
|
||||
## Quick decision
|
||||
|
||||
> *"I'm an agent (or human) about to write a markdown file. Where does it go?"*
|
||||
|
||||
| If the artifact is… | Put it in… |
|
||||
|---|---|
|
||||
| Competitive brief, market analysis, raw research notes | `Molecule-AI/internal/research/` |
|
||||
| PMM positioning draft, sales playbook, press release pre-publish | `Molecule-AI/internal/marketing/` |
|
||||
| Draft campaign asset (still iterating, not yet customer-visible) | `Molecule-AI/internal/marketing/campaigns/` |
|
||||
| Roadmap discussion, planning doc, retrospective | `Molecule-AI/internal/PLAN.md` or `Molecule-AI/internal/retrospectives/` |
|
||||
| Runbook, ops procedure, incident postmortem | `Molecule-AI/internal/runbooks/` |
|
||||
| **Public-ready** blog post (final draft, ready to ship to docs site) | `Molecule-AI/molecule-monorepo/docs/blog/` |
|
||||
| **Public-ready** tutorial / quickstart | `Molecule-AI/molecule-monorepo/docs/tutorials/` |
|
||||
| Public DevRel content (code samples, demos for users) | `Molecule-AI/molecule-monorepo/docs/devrel/` |
|
||||
| API reference, architecture docs for external developers | `Molecule-AI/molecule-monorepo/docs/api/` |
|
||||
| Code, tests, infrastructure | wherever is appropriate inside this repo |
|
||||
|
||||
**Rule of thumb:** *"Would I be comfortable if a competitor / journalist / customer
|
||||
read this verbatim today?"* — yes → `monorepo/docs/`. No / not yet → `internal/`.
|
||||
|
||||
## Why
|
||||
|
||||
This repo is publicly indexable. Anything pushed here is permanently in git
|
||||
history, search-engine indexed, and accessible to anyone who clones. Past
|
||||
incidents (audit 2026-04-23) found:
|
||||
|
||||
- Competitive teardowns of CrewAI / Paperclip / VoltAgent at root `/research/`
|
||||
- 45 marketing artifacts at root `/marketing/` including `pmm/positioning.md`,
|
||||
`press/launch.md`, `sales/enablement.md`
|
||||
- 31 draft campaign files at `/docs/marketing/`
|
||||
- Junk temp files at root: `comment-1172.json`, `tick-reflections-temp.md`
|
||||
|
||||
All migrated to `internal/from-monorepo-2026-04-23/` for curator triage.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Three layers, all required:
|
||||
|
||||
1. **`.gitignore`** — blocks the directories at `git add` time. Quietest
|
||||
layer; doesn't fire if someone uses `git add -f`.
|
||||
2. **CI workflow `block-internal-paths.yml`** — fails any PR that adds a
|
||||
forbidden path. Mechanical backstop. Cannot be bypassed without editing
|
||||
the workflow + PR review.
|
||||
3. **Agent prompts** — `SHARED_RULES.md` rule (in
|
||||
`molecule-ai-org-template-molecule-dev`) tells every agent role to
|
||||
write internal content to `Molecule-AI/internal` directly via `gh repo
|
||||
clone` + commit + PR. This is the prevention-at-source layer.
|
||||
|
||||
If you're hitting the CI gate and your file genuinely belongs in this repo,
|
||||
edit `FORBIDDEN_PATTERNS` in the workflow with reviewer signoff. Don't
|
||||
work around the gate by renaming files.
|
||||
|
||||
## How to write to the internal repo (for agents)
|
||||
|
||||
```bash
|
||||
# One-time clone (idempotent — re-running is a no-op)
|
||||
mkdir -p ~/repos
|
||||
test -d ~/repos/internal || gh repo clone Molecule-AI/internal ~/repos/internal
|
||||
|
||||
cd ~/repos/internal
|
||||
git pull origin main
|
||||
mkdir -p research
|
||||
cat > research/<slug>.md <<EOF
|
||||
# <title>
|
||||
|
||||
…content…
|
||||
EOF
|
||||
|
||||
git checkout -b <agent-role>/research-<slug>
|
||||
git add research/<slug>.md
|
||||
git commit -m "research: add <slug>"
|
||||
git push -u origin HEAD
|
||||
gh pr create --base main --fill
|
||||
```
|
||||
|
||||
Yes, this is more steps than `cd molecule-monorepo && git add research/foo.md`.
|
||||
That cost is intentional: the friction is the point. Public space and
|
||||
internal space are different products with different audiences and
|
||||
different durability guarantees.
|
||||
@ -1,115 +0,0 @@
|
||||
# Phase 34 — Partner API Keys Competitive Battlecard
|
||||
**Feature:** `mol_pk_*` — partner-scoped org provisioning API key
|
||||
**Status:** PMM DRAFT | **Date:** 2026-04-22
|
||||
**Phase:** 34 | **Owner:** PMM
|
||||
**Blocking on:** PM input on partner tiers + marketplace billing (GA date now confirmed)
|
||||
|
||||
---
|
||||
## Competitive Context
|
||||
|
||||
No direct competitor has a published Partner API Key program at the agent orchestration layer. This is a first-mover opportunity. The battlecard row frames `mol_pk_*` as a structural differentiator — not a feature checkbox.
|
||||
|
||||
**Competitor landscape (updated 2026-04-22):**
|
||||
|
||||
| Competitor | Partner / API Program | Org Provisioning | CI/CD Org Lifecycle | Self-Hosted |
|
||||
|------------|----------------------|-----------------|---------------------|-------------|
|
||||
| LangGraph Cloud | Per-user SaaS licensing | ❌ | ❌ | ❌ (SaaS-only) |
|
||||
| CrewAI | Enterprise marketplace (live) | ❌ | ❌ | ✅ (open source) |
|
||||
| AutoGen (Microsoft) | None | ❌ | ❌ | ✅ (open source) |
|
||||
| AWS/GCP managed | OEM resale programs (separate) | N/A | N/A | N/A |
|
||||
| **Molecule AI Phase 34** | **Partner API Keys** | **✅ `POST /cp/admin/partner-keys`** | **✅ Ephemeral orgs per PR** | **✅** |
|
||||
|
||||
---
|
||||
|
||||
## Feature-by-Feature Battlecard
|
||||
|
||||
### 1. Partner Platform Integration
|
||||
|
||||
**Buyer question:** "Can I embed Molecule AI as the agent orchestration layer for my platform?"
|
||||
|
||||
| | Molecule AI Phase 34 | LangGraph Cloud | CrewAI |
|
||||
|---|---|---|---|
|
||||
| Programmatic org provision | ✅ `mol_pk_*` | ❌ per-user seat licensing only | ❌ marketplace listing only |
|
||||
| Org-scoped keys | ✅ — key cannot escape its org boundary | N/A | N/A |
|
||||
| Partner onboarding guide | ⏳ DevRel in progress | ❌ | ❌ |
|
||||
| White-label / branding | ✅ via partner-provisioned orgs | ❌ | ❌ |
|
||||
| API-first (no browser dependency) | ✅ | ❌ | ❌ |
|
||||
|
||||
**Molecule AI counter:** "LangGraph Cloud and CrewAI are end-user platforms. Molecule AI is infrastructure your platform builds on."
|
||||
|
||||
---
|
||||
|
||||
### 2. CI/CD / Automation
|
||||
|
||||
**Buyer question:** "Can my pipeline spin up test orgs per PR?"
|
||||
|
||||
| | Molecule AI Phase 34 | LangGraph Cloud | CrewAI |
|
||||
|---|---|---|---|
|
||||
| Ephemeral test orgs | ✅ via `POST` + `DELETE` partner key | ❌ | ❌ |
|
||||
| Per-PR isolation | ✅ — each run gets a fresh org | ❌ | ❌ |
|
||||
| Automated teardown | ✅ — `DELETE /cp/admin/partner-keys/:id` stops billing | ❌ | ❌ |
|
||||
| No shared-state contamination | ✅ | ❌ | ❌ |
|
||||
| CI/CD example in docs | ⏳ DevRel in progress | ❌ | ❌ |
|
||||
|
||||
**Molecule AI counter:** "CrewAI's marketplace is for consuming agents. Molecule AI's partner API is for provisioning infrastructure."
|
||||
|
||||
---
|
||||
|
||||
### 3. Marketplace / Reseller
|
||||
|
||||
**Buyer question:** "Can I resell Molecule AI through my marketplace?"
|
||||
|
||||
| | Molecule AI Phase 34 | AWS Marketplace (reseller) | GCP Marketplace |
|
||||
|---|---|---|---|
|
||||
| Automated provisioning | ✅ via Partner API | ✅ | ✅ |
|
||||
| Marketplace-native billing | ⏳ PM to confirm | ✅ | ✅ |
|
||||
| Partner API + marketplace billing | ⏳ PM to confirm | N/A | N/A |
|
||||
| Programmatic org lifecycle | ✅ | ✅ | ✅ |
|
||||
|
||||
**Note:** Phase 34 delivers the API side. Marketplace-native billing integration (AWS/GCP) is PM-to-confirm.
|
||||
|
||||
---
|
||||
|
||||
## Positioning Claims
|
||||
|
||||
**Lead claim:** ✅ VERIFIED (Research team audit, 2026-04-23) — "Molecule AI is the **first** agent platform with a first-class partner provisioning API — letting marketplaces, CI/CD pipelines, and automation platforms create and manage Molecule AI orgs via API, without a browser session."
|
||||
|
||||
> **Rationale:** Competitive Intel audited LangGraph Cloud, CrewAI, Azure AI Foundry, Dify, Flowise, and n8n. None have a documented programmatic partner org provisioning API equivalent to `mol_pk_*`. Use **"first-mover"** framing (not "only") for legal defensibility — a competitor could launch tomorrow.
|
||||
|
||||
**Supporting claims:**
|
||||
1. **Org-scoped by design** — `mol_pk_*` keys cannot escape their org boundary. Compromised keys neutralize with one API call.
|
||||
2. **CI/CD-native** — ephemeral test orgs per PR. No shared state. No manual cleanup.
|
||||
3. **Platform-first** — LangGraph charges per seat. CrewAI offers marketplace listing. Molecule AI offers an API to build either.
|
||||
|
||||
**Risks to monitor:**
|
||||
- AWS/GCP/Azure publish their own partner/OEM programs → Phase 34 becomes table stakes faster
|
||||
- CrewAI ships partner API → first-mover window closes; update claim to "pioneered" framing
|
||||
|
||||
---
|
||||
|
||||
## Language to Avoid
|
||||
|
||||
- ~~Do not claim "only platform with partner API" unless verified~~ — **RESOLVED:** Use "first-mover" / "first agent platform" language. Do NOT use "only" (legal risk if competitor ships).
|
||||
- Do not mention specific pricing tiers until PM confirms
|
||||
- Do not promise marketplace billing integration until PM confirms
|
||||
|
||||
---
|
||||
|
||||
## Update Triggers
|
||||
|
||||
| Event | Action |
|
||||
|-------|--------|
|
||||
| CrewAI launches partner API | Update lead claim → "first agent platform with partner API" |
|
||||
| AWS/GCP publish agent OEM program | Add OEM row, frame Molecule AI as OEM alternative |
|
||||
| Phase 34 GA date confirmed | Open social copy brief, notify Social Media Brand |
|
||||
| DevRel ships partner onboarding guide | File social copy task for Content Marketer |
|
||||
|
||||
---
|
||||
|
||||
## Phase 30 Linkage
|
||||
|
||||
Phase 30 shipped `mol_ws_*` (per-workspace auth tokens). Phase 34 extends to `mol_pk_*` (partner/platform-level keys). Battlecard cross-sell: ✅ "Phase 30 workspace isolation + Phase 34 partner scoping — **the first agent platform with both layered token scoping and a first-class partner provisioning API.**" — verified 2026-04-23 via competitive audit. Use "first" / "pioneered" framing, not "only".
|
||||
|
||||
---
|
||||
|
||||
*PMM draft 2026-04-22 — Marketing Lead 2026-04-23 v2: (1) lead claim updated to verified "first-mover" language per Research team competitive audit (LangGraph Cloud, CrewAI, Azure AI Foundry, Dify, Flowise, n8n — no equivalent `mol_pk_*` found), (2) Phase 30 cross-sell updated to "first agent platform with both" framing, (3) Language to Avoid section resolved. GA DATE CONFIRMED: April 30, 2026. Still awaiting PM input on partner tiers and marketplace billing.*
|
||||
@ -1,96 +0,0 @@
|
||||
# Git for Agents: Cloudflare Artifacts Integration
|
||||
|
||||
**Source:** PR #641 (feat(platform): Cloudflare Artifacts demo integration #595), merged 2026-04-17
|
||||
**Issue:** #1174
|
||||
**Status:** Draft v1
|
||||
|
||||
---
|
||||
|
||||
Your AI agent has been working for three hours. It wrote tests, refactored a module, and left a summary in your workspace. Then your laptop died.
|
||||
|
||||
Without a shared version history, that work was in memory — gone. With Cloudflare Artifacts, it doesn't have to be.
|
||||
|
||||
Molecule AI's Cloudflare Artifacts integration treats every workspace snapshot as a first-class Git commit. Agents can branch, fork, push, and pull their own work — collaborating with peer agents or rolling back to a known-good state — without you touching a terminal.
|
||||
|
||||
---
|
||||
|
||||
## What Is Cloudflare Artifacts?
|
||||
|
||||
Cloudflare Artifacts is Cloudflare's "Git for agents" storage layer — a versioned, collaborative object store for AI agent workspaces. Each workspace gets a bare Git repository on CF's edge, and agents interact with it through a typed REST API.
|
||||
|
||||
Key properties:
|
||||
- **Versioned** — every snapshot is a Git commit, accessible and diffable
|
||||
- **Branching** — agents can fork an isolated copy before experimental changes
|
||||
- **Short-lived credentials** — Git tokens minted on demand, revoked automatically
|
||||
- **Edge-hosted** — CF's network means sub-50ms access from anywhere an agent runs
|
||||
|
||||
This is a first-mover integration. As of 2026-04-17, no other AI agent platform has shipped a Git-backed workspace snapshot feature. The [Cloudflare blog post](https://blog.cloudflare.com/artifacts-git-for-agents-beta/) has the full context.
|
||||
|
||||
---
|
||||
|
||||
## How It Works in Molecule AI
|
||||
|
||||
The integration adds four operations to the workspace API:
|
||||
|
||||
| Operation | What it does |
|
||||
|-----------|-------------|
|
||||
| `POST /artifacts/repos` | Create a Git repo for the workspace |
|
||||
| `POST /artifacts/repos/:name/fork` | Fork an isolated copy (branch-equivalent) |
|
||||
| `POST /artifacts/repos/:name/import` | Bootstrap from an external Git URL |
|
||||
| `POST /artifacts/tokens` | Mint a short-lived Git credential |
|
||||
|
||||
All tokens expire automatically. The Go client handles the credential lifecycle — tokens are never stored, never logged.
|
||||
|
||||
---
|
||||
|
||||
## Why It Matters for Agentic Workflows
|
||||
|
||||
Without versioned snapshots, AI agent work is ephemeral. Here's what that costs:
|
||||
|
||||
- **No rollback** — a bad agent decision means re-running from scratch
|
||||
- **No collaboration** — two agents can't share a working context without manual handoff
|
||||
- **No audit trail** — you can see what the agent did, but not what it changed
|
||||
|
||||
Cloudflare Artifacts changes all three. The workspace filesystem becomes a proper Git working tree. Every action is a commit. Branching is a first-class API call.
|
||||
|
||||
This is especially powerful for:
|
||||
|
||||
- **Multi-agent pipelines** — an agent writes to a feature branch, a reviewer agent pulls and approves, you merge to main
|
||||
- **Long-running tasks** — checkpoint snapshots so a crash doesn't mean starting over
|
||||
- **Experimentation** — fork before a risky refactor, delete the fork if it fails, keep the main clean
|
||||
|
||||
---
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Set Cloudflare credentials
|
||||
export CLOUDFLARE_API_TOKEN="your-cf-api-token"
|
||||
export CLOUDFLARE_ARTIFACTS_NAMESPACE="your-namespace"
|
||||
|
||||
# Create a repo for the workspace
|
||||
curl -X POST https://your-deployment.moleculesai.app/artifacts/repos \
|
||||
-H "Authorization: Bearer $ORG_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "my-workspace", "description": "Dev agent workspace"}'
|
||||
|
||||
# Fork before an experimental change
|
||||
curl -X POST https://your-deployment.moleculesai.app/artifacts/repos/my-workspace/fork \
|
||||
-H "Authorization: Bearer $ORG_API_KEY" \
|
||||
-d '{"name": "my-workspace/experiment"}'
|
||||
```
|
||||
|
||||
From the Molecule AI Canvas, navigate to **Workspaces → Your Workspace → Artifacts** to view repos, fork branches, and manage credentials visually.
|
||||
|
||||
---
|
||||
|
||||
## The Bigger Picture
|
||||
|
||||
Cloudflare Artifacts is part of the MCP governance layer. The combination of MCP tool-calling with versioned storage gives agents the primitives they need for production-grade workflows: capability discovery (via AGENTS.md), tool access (via MCP), and state persistence (via Cloudflare Artifacts).
|
||||
|
||||
Your agents stop being stateless. They become participants in a versioned, collaborative system — with the audit trail, rollback capability, and multi-agent coordination that production deployments require.
|
||||
|
||||
---
|
||||
|
||||
**Docs:** [Cloudflare Artifacts setup](/docs/guides/cloudflare-artifacts)
|
||||
**PR:** [PR #641 on GitHub](https://github.com/Molecule-AI/molecule-core/pull/641)
|
||||
@ -1,44 +0,0 @@
|
||||
# SaaS Workspaces Now Support Full File API — SSH-Backed Writes Land Today
|
||||
|
||||
**Status:** Live — merged 2026-04-23
|
||||
**PR:** [#1702](https://github.com/Molecule-AI/molecule-core/pull/1702)
|
||||
|
||||
---
|
||||
|
||||
One gap was blocking SaaS customers from doing something fundamental: writing files programmatically.
|
||||
|
||||
When you called `PUT /workspaces/:id/files/config.yaml` from a SaaS (EC2-backed) workspace, you got a 500. `failed to write file: docker not available`. The file API existed, but only for self-hosted Docker deployments. SaaS workspaces — the ones running on real EC2 VMs — had no path to write.
|
||||
|
||||
That changes today.
|
||||
|
||||
## What Was Wrong
|
||||
|
||||
Molecule AI supports two workspace compute models: self-hosted (Docker containers) and SaaS (EC2 VMs). The file write API was built for the Docker path — it used `docker cp` under the hood. SaaS workspaces don't have Docker. There was no fallback, so every API write failed silently.
|
||||
|
||||
This wasn't a permissions issue or a timeout. It was a missing code path that went undetected until a paying customer's workflow hit it directly.
|
||||
|
||||
## What's Fixed
|
||||
|
||||
The file write API now detects which compute model is in use and routes accordingly:
|
||||
|
||||
- **Self-hosted (Docker):** Unchanged — `docker cp` path still used
|
||||
- **SaaS (EC2):** Routes through EC2 Instance Connect (EIC) — the same ephemeral-keypair SSH flow that powers the Terminal tab in the Canvas
|
||||
|
||||
The remote write uses `install -m 0644 /dev/stdin <path>` for an atomic write that creates missing parent directories. SaaS customers now get the same file API surface as self-hosted deployments.
|
||||
|
||||
## Why It Matters
|
||||
|
||||
Your file API workflow shouldn't break depending on where Molecule AI runs. Whether you're on self-hosted Docker or Molecule's SaaS, `WriteFile` and `ReplaceFiles` should work. They do now.
|
||||
|
||||
**Try it:**
|
||||
```bash
|
||||
curl -X PUT https://your-workspace.moleculesai.app/workspaces/:id/files/config.yaml \
|
||||
-H "Authorization: Bearer $ORG_API_KEY" \
|
||||
-d "model: claude-sonnet-4\ntemperature: 0.7"
|
||||
```
|
||||
|
||||
File API. Now everywhere Molecule AI runs.
|
||||
|
||||
---
|
||||
|
||||
*Found a bug or have a feature request? Open an issue at [github.com/Molecule-AI/molecule-core](https://github.com/Molecule-AI/molecule-core).*
|
||||
@ -1,141 +0,0 @@
|
||||
# A2A Enterprise Deep-Dive — SEO Keyword Brief
|
||||
**Post:** `docs/blog/2026-04-22-a2a-v1-agent-platform/index.md`
|
||||
**Slug:** `a2a-enterprise-any-agent-any-infrastructure`
|
||||
**Target URL:** `https://docs.molecule.ai/blog/a2a-enterprise-any-agent-any-infrastructure`
|
||||
**Target length:** ~900 words
|
||||
**Status:** DRAFT — awaiting PMM sign-off → route to Content Marketer
|
||||
**Brief owner:** PMM | **Writer:** Content Marketer
|
||||
|
||||
---
|
||||
|
||||
## Search Intent
|
||||
|
||||
**Primary intent:** Informational (enterprise buyers researching agent orchestration platforms)
|
||||
**Secondary intent:** Comparative (evaluating Molecule AI vs LangGraph, CrewAI, custom integrations)
|
||||
**Content type:** In-depth blog post / thought leadership
|
||||
**Audience:** IT leads, DevOps architects, platform engineers evaluating multi-agent orchestration
|
||||
|
||||
---
|
||||
|
||||
## Canonical URL
|
||||
|
||||
✅ `https://docs.molecule.ai/blog/a2a-enterprise-any-agent-any-infrastructure`
|
||||
*(Consistent with post slug — no redirects, no query params)*
|
||||
|
||||
---
|
||||
|
||||
## Headlines
|
||||
|
||||
### H1 (primary)
|
||||
> A2A Protocol for Enterprise: Any Agent. Any Infrastructure. Full Audit Trail.
|
||||
|
||||
✅ **PMM-approved.** Matches Phase 30 core narrative. "Any agent, any infrastructure" is the established anchor phrase.
|
||||
|
||||
### H2 candidates
|
||||
1. "How A2A v1.0 Changes Multi-Agent Orchestration for Enterprise Teams"
|
||||
2. "Why Protocol-Native Beats Protocol-Added for Agent Governance"
|
||||
3. "Cross-Cloud Agent Delegation Without the VPN"
|
||||
|
||||
---
|
||||
|
||||
## Keywords
|
||||
|
||||
### P0 — must appear in H1, first paragraph, or meta
|
||||
| Keyword | Target density | Placement |
|
||||
|---------|---------------|-----------|
|
||||
| `enterprise AI agent platform` | 2–3× | H1 anchor, intro paragraph, meta description |
|
||||
| `multi-cloud AI agent orchestration` | 2× | H2, body (cross-cloud section) |
|
||||
| `agent delegation audit trail` | 2× | Section heading, body (org API key attribution) |
|
||||
|
||||
### P1 — supporting (1–2× each)
|
||||
| Keyword | Placement |
|
||||
|---------|-----------|
|
||||
| `A2A protocol enterprise` | URL slug, intro, meta |
|
||||
| `multi-agent platform comparison` | LangGraph ADR section |
|
||||
| `cross-cloud agent communication` | VPN section |
|
||||
| `enterprise AI governance` | Intro hook, closing paragraph |
|
||||
| `AI agent fleet management` | Fleet/canvas section |
|
||||
|
||||
### P2 — internal linking anchors
|
||||
Use as anchor text when linking to other docs:
|
||||
- "per-workspace auth tokens" → `/docs/guides/org-api-keys`
|
||||
- "remote workspaces" → `/docs/guides/remote-workspaces`
|
||||
- "external agent registration" → `/docs/guides/external-agent-registration`
|
||||
- "Phase 30" → `/docs/blog/remote-workspaces`
|
||||
|
||||
---
|
||||
|
||||
## Meta Description
|
||||
|
||||
**Target:** 155–160 characters
|
||||
|
||||
> "How enterprise teams use A2A v1.0 for multi-cloud agent orchestration — without a VPN. Molecule AI adds governance, audit trails, and cross-cloud delegation to any A2A-compatible agent."
|
||||
|
||||
*(160 chars — matches P0 keywords, search intent, and CTA)*
|
||||
|
||||
---
|
||||
|
||||
## Content Structure
|
||||
|
||||
### Hook (first 100 words)
|
||||
Lead with A2A v1.0 stats (March 12, LF, 23.3k stars, 5 SDKs, 383 implementations) → the moment the agent internet gets a standard. Most platforms add it. One platform was built for it from the ground up. Primary keywords: "enterprise AI agent platform", "A2A protocol".
|
||||
|
||||
### Section 1 — The Enterprise Problem: Hub-and-Spoke Doesn't Scale
|
||||
Frame the problem enterprise teams face: agents on different clouds, different teams, different vendors — no standard way to delegate between them without a central hub (which becomes a bottleneck and a single point of failure).
|
||||
|
||||
**Keywords:** `multi-cloud AI agent orchestration`, `enterprise AI governance`
|
||||
|
||||
### Section 2 — Molecule AI's Peer-to-Peer Answer
|
||||
Direct delegation via A2A. Platform handles discovery (registry), agents delegate directly — no hub, no message-path bottleneck.
|
||||
|
||||
**Proof points:**
|
||||
1. A2A proxy live in production (Phase 30, 2026-04-20)
|
||||
2. Per-workspace bearer tokens at every authenticated route — `Authorization: Bearer <token>` + `X-Workspace-ID` enforced at protocol level
|
||||
3. Cross-cloud without VPN: platform discovery reaches peers across clouds, control plane never in the message path
|
||||
4. Any A2A-compatible agent joins without code changes
|
||||
|
||||
**Keywords:** `agent delegation audit trail`, `cross-cloud agent communication`
|
||||
|
||||
**Auth guardrail:** Phase 30 enforces per-workspace bearer tokens at every authenticated route. Peer *discovery* is protocol-native (platform registry), but every A2A call is token-authenticated. Do not imply calls are unauthenticated.
|
||||
|
||||
**VPN guardrail:** "Molecule AI agents use platform discovery to reach peers across clouds — no VPN tunnel required for the control plane." Control plane is not in the message path.
|
||||
|
||||
### Section 3 — Code Sample (JSON-RPC, ~15 lines)
|
||||
Show a minimal A2A delegation call — agents passing tasks to peers across clouds. Keep it clean: this is the "see, it's real" moment for technical buyers. Must show token scope and workspace ID header.
|
||||
|
||||
### Section 4 — LangGraph ADR as Industry Validation
|
||||
Not the lead — the closer. LangGraph ships A2A support, validating the protocol. Molecule AI was there first, ships it in production today, and the governance layer (per-workspace tokens, audit trail) is the differentiation.
|
||||
|
||||
**Keywords:** `multi-agent platform comparison`
|
||||
|
||||
### Closing CTA
|
||||
One paragraph: "Get started with remote workspaces" → `/docs/guides/remote-workspaces`
|
||||
|
||||
---
|
||||
|
||||
## Internal Linking
|
||||
|
||||
| Anchor text | Target |
|
||||
|-------------|--------|
|
||||
| per-workspace auth tokens | `/docs/guides/org-api-keys` |
|
||||
| remote workspaces | `/docs/guides/remote-workspaces` |
|
||||
| external agent registration guide | `/docs/guides/external-agent-registration` |
|
||||
| Phase 30 | `/docs/blog/remote-workspaces` |
|
||||
|
||||
Minimum 4 internal links. No external competitor links (keep users on Molecule AI domain).
|
||||
|
||||
---
|
||||
|
||||
## Positioning Sign-Off
|
||||
|
||||
- [x] H1: approved
|
||||
- [x] Keywords: approved (P0 + P1 cover search intent and competitive comparison)
|
||||
- [x] Auth guardrail: corrected — "discovery-time CanCommunicate()" → "per-workspace bearer tokens enforced at every authenticated route"
|
||||
- [x] VPN guardrail: approved
|
||||
- [x] Phase 30 ship date: approved ("Phase 30 (2026-04-20)" framing)
|
||||
- [x] Code sample: required for enterprise buyer credibility
|
||||
- [ ] **PMM FINAL APPROVAL:** pending — sign off here to unblock Content Marketer
|
||||
|
||||
---
|
||||
|
||||
*Brief drafted by PMM 2026-04-22 — routed from Content Marketer SEO brief delegation (SEO Analyst unreachable via A2A this cycle)*
|
||||
@ -1,130 +0,0 @@
|
||||
# Phase 34: Partner API Keys — PMM Positioning Brief
|
||||
**Owner:** PMM | **Status:** Draft | **Date:** 2026-04-22
|
||||
**Assumptions:** GA date TBD (blocked on Phase 32 completion + infra); partner tiers TBD with PM
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Phase 34 (Partner API Keys) ships a `mol_pk_*` scoped key type that lets CI/CD pipelines, marketplace resellers, and automation tools create and manage Molecule AI orgs via API — without a browser session. This is the foundational capability for three strategic channels: **partner platforms**, **marketplace resellers**, and **enterprise CI/CD automation**. Each channel requires distinct positioning, but all share the same core value prop: *programmatic org provisioning, at scale, without compromising security*.
|
||||
|
||||
---
|
||||
|
||||
## What Phase 34 Ships (Technical)
|
||||
|
||||
| Component | Detail |
|
||||
|-----------|--------|
|
||||
| Key type | `mol_pk_*` — SHA-256 hashed in DB, returned in plaintext once on creation |
|
||||
| Scoping | Org-scoped only; keys cannot access other orgs |
|
||||
| Rate limiting | Per-key limiter, separate from session limits |
|
||||
| Audit | `last_used_at` tracking on every request |
|
||||
| Endpoints | `POST /cp/admin/partner-keys`, `GET /cp/admin/partner-keys`, `DELETE /cp/admin/partner-keys/:id` |
|
||||
| Secret scanner | `mol_pk_` added to pre-commit secret scanner |
|
||||
| Onboarding | Partner onboarding guide + two code examples (org lifecycle, CI/CD test org) |
|
||||
|
||||
---
|
||||
|
||||
## Positioning by Channel
|
||||
|
||||
### Channel 1: Partner Platforms
|
||||
|
||||
**Buyer:** DevRel + platform integrations lead at platforms that want to embed or white-label Molecule AI as the agent orchestration layer.
|
||||
|
||||
**Core message:** *"Molecule AI embeds in 10 lines of code. Provision a full org, attach your branding, and hand the tenant a ready-to-run fleet."*
|
||||
|
||||
**Problem:** Platforms that want to offer agent orchestration as a feature today have two bad options — build it themselves (months of work, ongoing maintenance) or integrate via browser sessions (brittle, non-programmatic). Neither scales.
|
||||
|
||||
**Solution:** Partner API Keys give platforms a first-class provisioning path. A partner platform calls `POST /cp/admin/partner-keys` with `orgs:create` scope, provisions a white-labeled org for each customer, and hands the customer a dashboard that is already their org, already wired up, already running agents.
|
||||
|
||||
**Three claims:**
|
||||
1. **Zero browser dependency.** Every provisioning action is an API call. Integrations don't break on UI changes.
|
||||
2. **Scope-isolated by design.** Each partner key is scoped to one org. A compromised key cannot access other tenants or the platform's own infrastructure.
|
||||
3. **Revocable instantly.** `DELETE /cp/admin/partner-keys/:id` revokes access on the next request. No waiting for session expiry.
|
||||
|
||||
**Target dev:** Platform integrations engineer, DevRel who owns partner ecosystem
|
||||
**CTA:** Request partner access → `docs.molecule.ai/docs/guides/partner-onboarding`
|
||||
|
||||
---
|
||||
|
||||
### Channel 2: Marketplace Resellers
|
||||
|
||||
**Buyer:** Marketplace ops team at cloud marketplaces (AWS Marketplace, GCP Marketplace) or agent framework directories who want to offer one-click Molecule AI org provisioning alongside existing listings.
|
||||
|
||||
**Core message:** *"Molecule AI on [Marketplace]: provision in seconds, manage via API, bill through your existing account."*
|
||||
|
||||
**Problem:** Marketplaces that list SaaS tools today have to manually provision trials, manage credentials out of band, and reconcile billing. The manual overhead makes Molecule AI a low-margin listing.
|
||||
|
||||
**Solution:** Partner API Keys enable fully automated provisioning through marketplace billing APIs. A buyer clicks "Deploy on [Marketplace]", the marketplace calls the Partner API to provision an org, charges begin on the marketplace invoice, and the buyer lands in a fully configured dashboard.
|
||||
|
||||
**Three claims:**
|
||||
1. **Automated provisioning end-to-end.** From click to running org in under 60 seconds — no manual handoff.
|
||||
2. **Marketplace-native billing.** Usage flows through the marketplace's existing invoicing, not a separate Molecule AI subscription.
|
||||
3. **API-first management.** Marketplaces manage orgs, seats, and deprovisioning via the same Partner API used for provisioning.
|
||||
|
||||
**Target dev:** Marketplace listing owner, cloud marketplace integrations engineer
|
||||
**CTA:** List on [Marketplace] → contact partner team
|
||||
|
||||
---
|
||||
|
||||
### Channel 3: Enterprise CI/CD Automation
|
||||
|
||||
**Buyer:** DevOps / Platform engineering team at enterprises that want to spin up ephemeral test orgs as part of CI pipelines, run integration tests against a fresh Molecule AI org per PR, or automate org provisioning for dev/staging environments.
|
||||
|
||||
**Core message:** *"Test against a real org, every commit, without touching the production fleet."*
|
||||
|
||||
**Problem:** Enterprise teams building on Molecule AI today have to either share test orgs (flaky, data contamination) or manually provision ephemeral orgs per test run (slow, non-automatable). Neither supports a high-velocity CI/CD workflow.
|
||||
|
||||
**Solution:** Partner API Keys + CI/CD example in the onboarding guide gives platform teams a fully automated org lifecycle per pipeline run: `POST` to create org → run tests → `DELETE` to teardown. Each PR gets a clean org. No cross-contamination. No manual cleanup.
|
||||
|
||||
**Three claims:**
|
||||
1. **Per-PR ephemeral orgs.** Each pipeline run gets a fresh org with default settings. Tests run in isolation. No shared-state flakiness.
|
||||
2. **Automated teardown.** `DELETE /cp/admin/partner-keys/:id` deprovisions the org and stops billing immediately.
|
||||
3. **No browser required.** The entire lifecycle — create, configure, test, teardown — is one or two API calls. CI/CD-native from day one.
|
||||
|
||||
**Target dev:** Platform engineer, DevOps lead, CI/CD team
|
||||
**CTA:** CI/CD integration guide → `docs.molecule.ai/docs/guides/partner-onboarding#cicd-example`
|
||||
|
||||
---
|
||||
|
||||
## Cross-Channel Positioning
|
||||
|
||||
All three channels share a single technical differentiator that should appear in every channel's collateral:
|
||||
|
||||
> **Partner API Keys are org-scoped, scope-enforced, and revocable in one call.** A `mol_pk_*` key cannot escape its org boundary. Compromised keys cost one `DELETE` to neutralize. This is not a personal access token with a org-wide blast radius — it is an infrastructure credential designed for the partner tier.
|
||||
|
||||
---
|
||||
|
||||
## Phase 30 Linkage
|
||||
|
||||
Phase 30 (Remote Workspaces) shipped the per-workspace auth token model (`mol_ws_*`). Phase 34 extends that model to the *platform tier* with `mol_pk_*` — partner/platform-level keys that provision and manage orgs. Cross-sell opportunity: every Phase 34 org comes with Phase 30 remote workspace capability at no additional configuration.
|
||||
|
||||
---
|
||||
|
||||
## Collateral Needed
|
||||
|
||||
| Asset | Owner | Status |
|
||||
|-------|-------|--------|
|
||||
| Partner onboarding guide (`docs/guides/partner-onboarding.md`) | DevRel / PM | Not started |
|
||||
| CI/CD example (org lifecycle + test teardown) | DevRel | Not started |
|
||||
| Partner API Keys landing page section | Content Marketer | Not started |
|
||||
| Marketplace listing copy | Content Marketer | Not started |
|
||||
| Battlecard update (add Phase 34 row) | PMM | Not started |
|
||||
| Partner tier pricing page | Marketing Lead / PM | TBD |
|
||||
|
||||
---
|
||||
|
||||
## Open Questions for PM / Marketing Lead
|
||||
|
||||
1. Partner tiers: will there be multiple key tiers (e.g., `orgs:create` vs `orgs:manage` vs `orgs:delete`)? Pricing model?
|
||||
2. GA date: dependent on Phase 32 completion — any updated ETA?
|
||||
3. First design partner: is there a named partner in the pipeline we can use as a reference in the onboarding guide?
|
||||
4. Rate limits: what are the per-key rate limits? Do limits vary by tier?
|
||||
5. Key rotation: are partner keys rotatable, or is rotation a delete + recreate?
|
||||
|
||||
---
|
||||
|
||||
## Competitive Context
|
||||
|
||||
No direct competitor has a published Partner API Key program at the agent orchestration layer. CrewAI and AutoGen focus on developer-seat pricing. LangGraph Cloud uses per-user licensing with no partner provisioning tier. This is a first-mover opportunity to own the "agent platform-as-a-backend" positioning before the category standardizes.
|
||||
|
||||
**Risk:** If AWS/GCP/Azure absorb agent orchestration into their managed AI platforms (Phase 30 risk, tracked in ecosystem-watch), the partner platform channel may shift to OEM relationships rather than API-key-based reselling. Monitor for cloud provider announcements.
|
||||
@ -1,103 +0,0 @@
|
||||
# Phase 30 PMM Positioning — Response to SEO Brief #1126 Questions
|
||||
|
||||
> **Context:** SEO Analyst filed brief #1126 for Remote Workspaces campaign. Acceptance criteria specified "Coordinate with PMM (issue #1116) on positioning language." PMM Slack: "Phase 30 position holding." No PMM response received yet. Content Marketer answers based on approved copy + internal/product docs.
|
||||
> **Author:** Content Marketer (self-prompted — no PMM input available this cycle)
|
||||
> **Date:** 2026-04-22
|
||||
> **Status:** DRAFT — for PMM review before social copy goes live
|
||||
|
||||
---
|
||||
|
||||
## Q1: Primary message — "One canvas, every agent" or "Deploy agents anywhere"?
|
||||
|
||||
**Recommendation:** Both — layered approach:
|
||||
|
||||
- **Headline (social/digital):** "One canvas, every agent." — fleet visibility is the emotional hook. It's visually true (the Canvas shows the whole org) and differentiated (competitors don't have this).
|
||||
- **Sub-message (docs/SEO):** "Deploy agents anywhere, manage them from one place." — deployment flexibility is the practical hook for the informational/search audience.
|
||||
|
||||
**Rationale from internal/product/overview.md:** Molecule AI's primary abstraction is the *workspace role*, not a task graph. The canvas shows that clearly. Social copy should lead with that differentiator. SEO content should lead with the practical deployment flexibility.
|
||||
|
||||
**Approved social copy confirms this:**
|
||||
- Fly Deploy Anywhere: leads with deployment flexibility (three backends, one config change)
|
||||
- Remote Workspaces social copy: leads with fleet visibility ("Your whole agent team, on one canvas")
|
||||
- Org API Keys: leads with operational control (audit trail, revocation)
|
||||
|
||||
---
|
||||
|
||||
## Q2: Competitive framing — LangChain Agents, CrewAI, OpenAI agent SDK?
|
||||
|
||||
**Positioning lines that hold:**
|
||||
|
||||
1. **"Runtime agnostic by design"** — LangChain and CrewAI are tied to their own graph/DAG execution model. Molecule AI workspaces run LangGraph, Claude Code, CrewAI, AutoGen, OpenClaw, DeepAgents behind the same workspace contract. Approved copy across 8 campaigns confirms this is real.
|
||||
|
||||
2. **"A2A is solved. A2A governance is not."** — A2A Enterprise approved copy leads with this frame. LangGraph shipped A2A client (PRs #6645 and #7113) but has no audit trail, no org attribution, no revocation. This is the clearest competitive gap.
|
||||
|
||||
3. **"Fleet visibility by default"** — Canvas shows the org chart. No competitor has this out of the box.
|
||||
|
||||
**Do NOT say:**
|
||||
- "better than [competitor]" without data
|
||||
- benchmark numbers — none are confirmed
|
||||
- "only platform with X" unless sourced
|
||||
|
||||
**LangGraph specific (from A2A Enterprise approved copy):**
|
||||
- Factual only: "LangGraph ADR validates that A2A is becoming table stakes." — don't spin this as criticism.
|
||||
- Attribution: cite PR numbers (#6645, #7113) — these are public facts.
|
||||
|
||||
---
|
||||
|
||||
## Q3: Primary audience — infra lead, developer, or platform team?
|
||||
|
||||
**Split by channel:**
|
||||
|
||||
| Channel | Primary audience | Why |
|
||||
|---------|-----------------|-----|
|
||||
| X (social) | Platform engineers, DevOps | Operational pain (Admin_token rotation, CI/CD integration) |
|
||||
| LinkedIn | Enterprise AI leads, CTOs | Governance, audit trail, org-scale control |
|
||||
| SEO/docs | Developers, infra teams | How-to, self-hosted setup, remote agent registration |
|
||||
| Blog | Evaluators, technical decision-makers | Comprehensive feature + differentiation |
|
||||
|
||||
**From internal/product/overview.md:** Molecule AI targets teams running heterogeneous agent fleets. The buyer is a platform lead or infra engineer who needs to manage agents across environments.
|
||||
|
||||
---
|
||||
|
||||
## Q4: Pricing/availability — all tiers or specific plan?
|
||||
|
||||
**Positioning depends on what is actually GA:**
|
||||
|
||||
- Phase 30 workspaces (remote agents, bearer tokens, A2A) — **GA as of 2026-04-20** per phase30-launch-calendar.md
|
||||
- Phase 32 cloud SaaS (Stripe Atlas billing) — **IN PROGRESS**, load test pending, ~2wk lead on Atlas
|
||||
- Phase 33 — **NOT LOCKED**, no GA date confirmed
|
||||
|
||||
**Safe CTA language (confirmed GA only):**
|
||||
- "Workspaces on Docker, Fly Machines, or your own cloud — same agent code"
|
||||
- "Org API keys. Audit trail. Instant revocation."
|
||||
- "Every Molecule AI workspace is an A2A server."
|
||||
|
||||
**Do NOT say:**
|
||||
- "available on all plans" — this hasn't been confirmed by PM
|
||||
- specific pricing tiers
|
||||
- "Phase 33 ships next" — date not locked
|
||||
|
||||
---
|
||||
|
||||
## Q5: Campaign coordination — any spacing or sequencing rules?
|
||||
|
||||
**From approved social copy + posting-guide.md:**
|
||||
|
||||
| Day | Campaign | Don't post same day as |
|
||||
|-----|----------|----------------------|
|
||||
| Apr 21 | Chrome DevTools MCP | Fly Deploy Anywhere |
|
||||
| Apr 22 | Discord Adapter Day 2 (Reddit/HN) | — |
|
||||
| Apr 23 | Org API Keys | — |
|
||||
| Apr 23 | A2A Enterprise | — |
|
||||
| Apr 24 | EC2 Instance Connect SSH | — |
|
||||
| Apr 25 | MCP Server List | — |
|
||||
| Apr 17+ | Fly Deploy Anywhere | Chrome DevTools MCP Day 1 |
|
||||
|
||||
**Cross-campaign links (intentional stacking):**
|
||||
- Discord Adapter → links to Org API Keys (shared governance/A2A theme)
|
||||
- Fly Deploy Anywhere → naturally cross-links to Chrome DevTools MCP (both self-hosted angle)
|
||||
- EC2 Instance Connect SSH → platform engineering audience, stacks with Org API Keys
|
||||
|
||||
---
|
||||
|
||||
*Content Marketer — 2026-04-22. PMM to review and confirm or revise before social copy is finalized.*
|
||||
@ -1,83 +0,0 @@
|
||||
# Phase 32 SaaS — Observability Angle Brief (Content Marketer)
|
||||
**Date:** 2026-04-22
|
||||
**Status:** DRAFT — for future social copy when Phase 32 GA is confirmed
|
||||
**Context:** Social Media Brand flagged this angle from PLAN.md. Phase 32 is still hardening — not ready to post.
|
||||
|
||||
---
|
||||
|
||||
## The Observability Story
|
||||
|
||||
Phase 32 ships Molecule AI as a multi-tenant cloud SaaS. The observability layer built into the platform is a genuine enterprise differentiator — it's not an add-on, it's structural.
|
||||
|
||||
**What makes this worth a campaign:**
|
||||
1. Every cross-agent A2A call is logged (Phase 30.5 — in prod since Apr 20)
|
||||
2. Activity logs capture: caller, callee, method, timestamp, result, error detail
|
||||
3. `/traces` endpoint surfaces Langfuse traces per workspace (Phase 10 — since Phase 10)
|
||||
4. Token-level attribution: `org:keyId` prefix on every API call (Phase 30 / Org API Keys)
|
||||
5. Admin observability: `/events` endpoint, per-workspace activity, delegation history
|
||||
|
||||
**The positioning frame:**
|
||||
> "When something goes wrong in your agent team, can you answer: which agent did what, when, and with what result?"
|
||||
|
||||
Most agent platforms can't answer this. Molecule AI built the answer into the platform from Phase 10 onward.
|
||||
|
||||
---
|
||||
|
||||
## What's Confirmed GA (post to this)
|
||||
|
||||
| Feature | Phase | GA Date |
|
||||
|---------|-------|---------|
|
||||
| Activity logs (A2A + task + error) | Phase 10 | Shipped |
|
||||
| Langfuse traces per workspace | Phase 10 | Shipped |
|
||||
| Token attribution (`org:keyId`) | Phase 30 | 2026-04-20 |
|
||||
| Audit log export | Org API Keys | Live on staging |
|
||||
| `/traces` endpoint | Phase 10 | Shipped |
|
||||
|
||||
---
|
||||
|
||||
## Phase 32-Specific (not GA until hardening complete)
|
||||
|
||||
| Feature | Status | Notes |
|
||||
|---------|--------|-------|
|
||||
| CloudTrail records for EC2 Instance Connect | ✅ Shipped | AWS-native, per-workspace |
|
||||
| Per-tenant resource quotas | ⏳ Phase G | Observability → control loop |
|
||||
| Langfuse on cloud SaaS | ⏳ Phase G | observability + quotas |
|
||||
| Status page custom domain | ⏳ Phase H | `status.moleculesai.app` pending |
|
||||
| Load test | ⏳ Phase H | Before external user launch |
|
||||
|
||||
---
|
||||
|
||||
## Do NOT Post Until
|
||||
|
||||
- Load test complete
|
||||
- Stripe Atlas (~2wk lead) — social gate per phase30-launch-plan.md
|
||||
- Status page live at custom domain
|
||||
- These confirmed by PM
|
||||
|
||||
---
|
||||
|
||||
## Draft Social Frame (for when Phase 32 clears)
|
||||
|
||||
**Hook:** "Your AI agent team just did something. Can you prove it?"
|
||||
|
||||
**Post 1 (the problem):**
|
||||
Most AI agent platforms give you zero visibility into what your agents actually did.
|
||||
No logs. No traces. No audit trail.
|
||||
When something goes wrong, you're debugging blind.
|
||||
|
||||
**Post 2 (what Molecule AI ships):**
|
||||
Every cross-agent call logged.
|
||||
Every API call attributed to an org key.
|
||||
Every trace visible in Langfuse.
|
||||
Workspace-level activity logs. Admin-level event export.
|
||||
|
||||
If your compliance team asks "which agent touched what," you can answer from the platform — not from guessing.
|
||||
|
||||
**Post 3 (EC2 Instance Connect + observability):**
|
||||
Molecule AI's Terminal tab routes through AWS EC2 Instance Connect Endpoint.
|
||||
The session is AWS-signed, ephemeral, and CloudTrail-recorded.
|
||||
Your platform team gets a shell. Your security team gets the audit log. Same tool.
|
||||
|
||||
---
|
||||
|
||||
*Content Marketer — 2026-04-22. Not ready to publish until Phase 32 hardening complete.*
|
||||
@ -1,82 +0,0 @@
|
||||
# PR #1686 Positioning Brief: Tool Trace + Platform Instructions
|
||||
|
||||
**Source:** PR #1686 — `feat: tool trace + platform instructions`
|
||||
**Date:** 2026-04-23
|
||||
**Author:** PMM
|
||||
**Status:** Draft — for internal review before announcement
|
||||
|
||||
---
|
||||
|
||||
## Target Buyer
|
||||
|
||||
**Primary:** Platform Engineering / DevOps leads (80% of value)
|
||||
**Secondary:** Enterprise IT / Security Governance leads (Platform Instructions)
|
||||
|
||||
Platform teams own the agent runtime and are the first to get paged when an agent goes off-script. They need built-in observability, not bolt-on stitching. Enterprise IT and compliance teams care about the governance angle — system-prompt rules that enforce behavior before an agent runs, not after it has already done something unintended.
|
||||
|
||||
---
|
||||
|
||||
## Primary Value Prop
|
||||
|
||||
> **Tool Trace** gives every A2A response a complete, run_id-paired execution record — so platform teams can trace what every agent actually did, without wiring up a third-party SDK.
|
||||
|
||||
> **Platform Instructions** lets workspace admins enforce system-prompt rules at startup — so governance happens before the agent runs, not after an incident.
|
||||
|
||||
---
|
||||
|
||||
## Competitive Angle
|
||||
|
||||
**vs. Langfuse / Helicone / separate observability pipelines:**
|
||||
Third-party LLM observability tools require instrumentation in every agent: SDK installs, API key management, proxy configuration, and a separate vendor relationship. Tool Trace ships the execution record inside every A2A message and stores it in `activity_logs` — no extra pipeline, no separate pane of glass. For teams already on Molecule, it's zero-lift observability.
|
||||
|
||||
Langfuse/Helicone remain stronger for *cross-platform, multi-model* observability (tracking OpenAI + Anthropic + self-hosted in one view). That's not Molecule's fight. The positioning here is: "If you're already running agents on Molecule, you already have enterprise-grade trace — turn it on, don't integrate it."
|
||||
|
||||
**vs. Hermes native tool tracing:**
|
||||
Hermes traces individual model calls. Tool Trace traces *agent behavior* — the A2A-level sequence of tool calls and responses across the full task lifecycle. Different layer of the stack. Tool Trace is additive, not competitive.
|
||||
|
||||
**vs. policy-as-code tools (OPA, Sentinel):**
|
||||
Platform Instructions enforces behavioral guardrails at the system-prompt level. Policy engines enforce runtime resource access. They complement; Platform Instructions is earlier in the chain (pre-execution vs. during-execution).
|
||||
|
||||
---
|
||||
|
||||
## Key Differentiator
|
||||
|
||||
Tool Trace and Platform Instructions are **platform-native** — not plugins, not third-party SDKs, not configuration-as-code you have to maintain. They live where the agent runs: inside the workspace startup path and inside every A2A message envelope. There's nothing to install, no API key to rotate, no version drift to manage when the agent framework updates.
|
||||
|
||||
Third-party observability and governance tooling always has a lag between "agent framework ships a new behavior" and "our integration captures it." Native trace and prompt-level instructions have no lag — they are the platform.
|
||||
|
||||
---
|
||||
|
||||
## Objection Handlers
|
||||
|
||||
**O1: "We already use Datadog / Langfuse / Splunk for this."**
|
||||
That's fine for cross-platform, multi-model environments. Tool Trace captures *A2A-level* agent behavior — tool calls, input/output previews, run_id-paired sequences — that generic LLM observability pipelines typically miss or flatten. Think of it as your Molecule-specific layer inside your existing observability stack. It doesn't replace Datadog; it enriches it.
|
||||
|
||||
**O2: "Why enforce system-prompt rules at the platform level instead of in code?"**
|
||||
Because code changes require a deployment, and governance that requires a deployment is governance that only happens at the next release cycle. Platform Instructions are workspace-scoped rules that take effect at startup — a platform team or IT admin can update agent behavior without touching application code or triggering a redeploy. Speed of governance matters.
|
||||
|
||||
---
|
||||
|
||||
## Overlap / Conflict Notes
|
||||
|
||||
| Existing Feature | Relationship |
|
||||
|-----------------|--------------|
|
||||
| Org-scoped API keys (#1105) | Different layer: API key auth vs. agent behavior/prompt. Tool Trace traces what agents *do* with the keys; org keys control *who gets* the keys. Not cannibalization — complementary. |
|
||||
| Audit trail visualization panel (#759) | Tool Trace is the raw execution record; the audit trail panel is the compliance UI on top of it. Tool Trace feeds the audit trail. Not competitive — dependency. |
|
||||
| Snapshot secret scrubber (#977) | Both platform observability. Secret scrubber is about data posture; Tool Trace is about behavior. No conflict. |
|
||||
|
||||
**Cannibalization risk: LOW.** Tool Trace and Platform Instructions occupy the observability/governance vertical that existing features touch from different angles — no direct overlap, strong adjacency.
|
||||
|
||||
---
|
||||
|
||||
## CTA
|
||||
|
||||
**For platform teams:** "Enable activity log tracing for your workspace — every A2A task now has a complete execution record, no SDK required."
|
||||
**For enterprise IT:** "Set workspace-level system prompt rules to enforce behavioral guardrails before agents run. No code deploy required."
|
||||
**Combined anchor:** "Molecule gives you observability and governance as platform primitives — not afterthought integrations."
|
||||
|
||||
---
|
||||
|
||||
## Recommended Announcement Angle
|
||||
|
||||
Lead with the platform-native story, not the feature list. The headline is: *"Molecule agents now come with built-in execution tracing and governance — nothing to integrate."* Avoid leading with "Tool Trace" as a feature name in top-level copy; use "execution tracing" or "agent observability" for broader appeal.
|
||||
@ -1,115 +0,0 @@
|
||||
# Cloudflare Artifacts — PMM Positioning Brief
|
||||
**Source:** PR #641, merged 2026-04-17 | Blog: `docs/marketing/blog/2026-04-21-cloudflare-artifacts-integration.md`
|
||||
**Issue:** #1174 | **Status:** PMM DRAFT | **Date:** 2026-04-23
|
||||
**Owner:** PMM | **Blocking:** none — feature shipped, ready for social
|
||||
|
||||
---
|
||||
|
||||
## Positioning Decision
|
||||
|
||||
**Use "Git for agents" as the headline metaphor — with qualification.**
|
||||
|
||||
Cloudflare's own beta announcement uses "Git for agents." It's the right hook because developers immediately understand what it means and why it matters. Leading with it is accurate and immediately differentiating.
|
||||
|
||||
The qualification: this is Git *plus* the agent primitives that make it agent-native. Automated commits (no human in the loop), API-first branching, ephemeral short-lived credentials, canvas-native integration. It's not Git with a chat interface — it's version control designed for stateless agents.
|
||||
|
||||
**Recommended headline:** "Give your agents a Git history — without touching a terminal."
|
||||
|
||||
---
|
||||
|
||||
## Buyer Profile
|
||||
|
||||
**Primary:** Platform engineers and DevOps leads evaluating AI agent platforms. They have agents running in production, they're managing agent state manually or not at all, and they need version control they can instrument. They're not necessarily Git experts — they're the people who inherited the AI agent rollout.
|
||||
|
||||
**Secondary:** Enterprise security and compliance teams. They need audit trails on agent actions. A versioned snapshot system with immutable commits is a concrete answer to "what did the agent change?" — without requiring agents to write human-readable commit messages.
|
||||
|
||||
**Not the audience:** Developers who want Git workflows in their own IDE. This isn't replacing GitHub for human developers — it's giving agents a version history that humans can audit and roll back.
|
||||
|
||||
---
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Use Case 1: Multi-agent pipelines without manual handoff
|
||||
Two agents, same task. Agent A writes a feature branch. Agent B reviews and approves. You merge. No Slack threads asking "did the research agent finish?" No copy-pasting outputs between workspaces.
|
||||
|
||||
### Use case 2: Crash recovery without starting over
|
||||
An agent crashes mid-task. With versioned snapshots, the last checkpoint is a Git commit. The next agent to pick up the task starts from a diff, not a blank workspace.
|
||||
|
||||
### Use case 3: Experimentation without risk
|
||||
Agents trying something risky can fork a branch first. If it fails, delete the fork. The main branch is clean. No "oops, can you revert that?" in the team Slack.
|
||||
|
||||
---
|
||||
|
||||
## Top 2 Buyer Objections
|
||||
|
||||
### Objection 1: "Why not just use GitHub? Agents can call `git commit`"
|
||||
**Likely buyer:** Platform engineers with existing GitOps workflows.
|
||||
|
||||
**The problem with this objection:** `git commit` requires a Git repo on disk, human-readable messages, and a human in the loop to resolve conflicts. Agents don't naturally produce well-structured commits. And "just use GitHub" means agents need credentials, network access, and a configured remote — which creates a dependency you have to manage.
|
||||
|
||||
**Recommended response:**
|
||||
Git was designed for humans. Agents need version control that works without a human in the commit loop — automatic snapshots, API-first branching, ephemeral credentials that never get stored. Cloudflare Artifacts gives agents their own versioned storage without requiring Git credentials on every agent instance. The four API operations (`POST /artifacts/repos`, `fork`, `import`, `tokens`) are agent-native — no terminal, no commit messages, no credential management.
|
||||
|
||||
If you want agents to contribute to a shared Git repo, they can — `POST /artifacts/repos/:name/import` bootstraps from any Git URL. But they don't need to in order to have a useful version history.
|
||||
|
||||
---
|
||||
|
||||
### Objection 2: "Cloudflare Artifacts is in beta — we can't bet production infrastructure on a beta service"
|
||||
**Likely buyer:** Enterprise ops leads, security teams.
|
||||
|
||||
**The problem with this objection:** The risk is real but the framing is wrong. Cloudflare Artifacts is beta on Cloudflare's side, but the integration inside Molecule AI is designed to fail gracefully — if Artifacts is unavailable, agents fall back to local workspace state. The version history is an enhancement, not a hard dependency.
|
||||
|
||||
**Recommended response:**
|
||||
The feature is additive, not a hard dependency. If Cloudflare Artifacts is unavailable, agents continue working with local filesystem state — no outage, no degraded mode. Cloudflare is a large, stable infrastructure provider with a documented beta SLA. For teams that need production guarantees, this is worth evaluating alongside the rest of the Cloudflare Workers ecosystem. If Cloudflare Artifacts goes GA, the integration is already live.
|
||||
|
||||
---
|
||||
|
||||
## GA Status
|
||||
|
||||
**Feature is shipped (PR #641 merged 2026-04-17).**
|
||||
|
||||
Cloudflare Artifacts is in public beta on Cloudflare's side. Molecule AI's integration is live. The feature is available to users with a Cloudflare API token and Artifacts namespace configured.
|
||||
|
||||
**No separate GA date needed from Molecule AI's side** — the integration doesn't have its own launch milestone, it's a feature within the existing platform. Social copy can proceed without a GA date announcement.
|
||||
|
||||
**Caveat:** If Cloudflare promotes Artifacts from beta, the messaging should shift from "Git for agents (beta)" to "Git for agents — now GA." Track Cloudflare's announcement channel for Artifacts GA.
|
||||
|
||||
---
|
||||
|
||||
## Competitive Angle
|
||||
|
||||
**No other AI agent platform has a Cloudflare Artifacts integration as of 2026-04-17.** This is a first-mover claim. Verify before publishing — if a competitor ships before the launch post goes live, update to "first to integrate" rather than "only platform with."
|
||||
|
||||
Monitor: LangGraph, CrewAI, AutoGen GitHub repos for Artifacts or CF Workers integration commits.
|
||||
|
||||
---
|
||||
|
||||
## Collateral Status
|
||||
|
||||
| Asset | Owner | Status |
|
||||
|-------|-------|--------|
|
||||
| Blog post | Content Marketer | Shipped (2026-04-21) |
|
||||
| Social launch thread | Social Media Brand | Blocked on brief (this doc) |
|
||||
| DevRel demo | DevRel Engineer | Unknown |
|
||||
| Docs page | DevRel | Shipped (`docs/guides/cloudflare-artifacts`) |
|
||||
| Battlecard entry | PMM | Add to Phase 34 battlecard |
|
||||
|
||||
---
|
||||
|
||||
## Recommended Social Angle (for Social Media Brand)
|
||||
|
||||
Thread opener: "Your AI agent just deleted three hours of work. Here's why that doesn't have to happen again."
|
||||
|
||||
Lead with the pain story. The technology is the answer, not the hook. Close with the CTA to the blog post.
|
||||
|
||||
---
|
||||
|
||||
## Update Triggers
|
||||
|
||||
- Cloudflare Artifacts GA announced → update from "beta" to "GA" framing
|
||||
- Any competitor ships Cloudflare Artifacts integration → update competitive claim to "first to integrate"
|
||||
- PR or issue filed about Artifacts user experience → update objections section
|
||||
|
||||
---
|
||||
|
||||
*PMM draft 2026-04-23 — ready for Social Media Brand*
|
||||
@ -1,100 +0,0 @@
|
||||
# Phase 34 — Taglines + Messaging Matrix
|
||||
**Feature group:** Partner API Keys, Tool Trace, Platform Instructions, SaaS Federation v2
|
||||
**GA date:** April 30, 2026
|
||||
**Owner:** PMM | **Status:** INTERNAL DRAFT
|
||||
**Last updated:** 2026-04-23
|
||||
|
||||
---
|
||||
|
||||
## 3 Candidate Taglines
|
||||
|
||||
### Tagline A — Production-grade (emphasizes enterprise reliability)
|
||||
> **"Production-grade AI agents. Nothing to bolt on."**
|
||||
|
||||
**Use for:** Press releases, homepage hero, paid placements, enterprise sales decks.
|
||||
**Why it works:** Directly addresses the enterprise buyer's #1 objection — "this is great for prototypes but can I run it in production?" — without overclaiming features. "Nothing to bolt on" is a dig at competitors (LangGraph, CrewAI) that require Langfuse, Helicone, or custom observability pipelines.
|
||||
|
||||
---
|
||||
|
||||
### Tagline B — Observability/visibility (emphasizes transparency)
|
||||
> **"See exactly what your AI agents did. Every tool. Every call. Every time."**
|
||||
|
||||
**Use for:** DevOps-focused channels, technical blog intros, SOC 2 / compliance audience, tool trace launch announcement.
|
||||
**Why it works:** Speaks directly to the platform engineering persona — the person who gets paged at 2am when something breaks. "Every tool. Every call. Every time." is specific and falsifiable, which builds credibility with technical audiences. It names the feature (Tool Trace) without making it a product name.
|
||||
|
||||
---
|
||||
|
||||
### Tagline C — Aspirational (emphasizes enterprise enablement)
|
||||
> **"Your AI fleet. Your rules. Your cloud."**
|
||||
|
||||
**Use for:** LinkedIn, enterprise social, brand campaigns, vision statements.
|
||||
**Why it works:** Three short declarative sentences that speak to three distinct buyer anxieties: managing at scale ("fleet"), controlling behavior ("rules"), and infrastructure autonomy ("your cloud"). Works for Platform Instructions, Partner API Keys, and SaaS Federation v2 simultaneously — it's a Phase 34 group tagline, not a single-feature tagline.
|
||||
|
||||
---
|
||||
|
||||
## Messaging Matrix — 4 Features
|
||||
|
||||
---
|
||||
|
||||
### Feature 1: Partner API Keys (`mol_pk_*`)
|
||||
|
||||
| | |
|
||||
|--|--|
|
||||
| **Pain it solves** | Partner platforms, CI/CD pipelines, and marketplace resellers cannot programmatically provision or manage Molecule AI orgs — they must use browser sessions or build custom integrations from scratch. This makes Molecule AI unembeddable for any platform that wants to offer agent orchestration as a feature. |
|
||||
| **Who cares** | Platform integrations engineers, DevRel leads building partner ecosystems, CI/CD DevOps teams, marketplace listing owners (AWS/GCP Marketplace) |
|
||||
| **One-liner** | Programmatic org provisioning via API — no browser required, no manual handoff. |
|
||||
| **Proof point** | `POST /cp/admin/partner-keys` creates a fully configured org with one API call. Keys are scoped to the org they create, rate-limited, revocable with `DELETE /cp/admin/partner-keys/:id`. Ephemeral CI test orgs: `POST` → run tests → `DELETE` → clean billing. |
|
||||
| **HN/Reddit framing** | "Molecule AI now lets partners provision orgs via API — the same week Acme Corp [design partner, placeholder] ships their integration." Do NOT claim GA. Use "beta" or "now available." |
|
||||
| **What to soft-pedal** | Specific partner tiers and pricing (PM not confirmed). Marketplace billing integration status (PM to confirm). Do not mention "Acme Corp" in published copy. |
|
||||
|
||||
---
|
||||
|
||||
### Feature 2: Tool Trace
|
||||
|
||||
| | |
|
||||
|--|--|
|
||||
| **Pain it solves** | When an agent breaks in production, teams have no structured record of what it did — only the final output. Reverse-engineering from outputs is slow, error-prone, and impossible to automate. Third-party observability tools (Langfuse, Helicone, Datadog) miss A2A-level agent behavior and require SDK instrumentation. |
|
||||
| **Who cares** | Platform engineers, DevOps leads, SREs, enterprise IT debugging production incidents |
|
||||
| **One-liner** | Built-in execution tracing for every A2A task — no SDK, no sidecar, no sampling. |
|
||||
| **Proof point** | `tool_trace[]` in every `Message.metadata` — array of `{tool, input, output_preview, run_id}` entries. Entries written to `activity_logs.tool_trace` as JSONB. run_id pairs concurrent calls so parallel traces don't merge. Platform-native: ships with the A2A response, no instrumentation required. |
|
||||
| **HN/Reddit framing** | Lead with the developer experience: "Tool Trace ships today in Molecule AI. Every agent turn now includes a structured record of every tool called — inputs, output previews, run_id-paired for parallel calls." Be honest: this is a beta feature. |
|
||||
| **What to soft-pedal** | Technical implementation details (run_id pairing schema, JSONB storage format). Overlap with Langfuse/Helicone — frame as complementary, not competitive. |
|
||||
|
||||
---
|
||||
|
||||
### Feature 3: Platform Instructions
|
||||
|
||||
| | |
|
||||
|--|--|
|
||||
| **Pain it solves** | Agent governance that only filters outputs after the agent has already acted is governance that failed. Enterprise IT and compliance teams need to shape agent behavior *before* the first token is generated — without requiring a code change or deployment. |
|
||||
| **Who cares** | Enterprise IT, Security/Compliance leads, Platform Engineering, CISO office |
|
||||
| **One-liner** | Enforce org-wide agent governance at the system prompt level — before the first turn, not after an incident. |
|
||||
| **Proof point** | Platform Instructions prepends workspace-scoped rules to the system prompt at startup. Two scopes: global (every workspace in the org) and workspace-specific. Rules take effect before the first agent turn — not after. Policy update requires no code deploy, no agent restart, no application change. |
|
||||
| **HN/Reddit framing** | Frame as "the missing governance layer for production agents." Avoid overclaiming compliance certifications. Do not compare directly to OPA/Sentinel — say "complements runtime policy engines" not "replaces them." |
|
||||
| **What to soft-pedal** | Overlap with the existing audit trail panel (Issue #759) — they are complementary (Tool Trace feeds the audit trail). Don't let buyers think they have to choose. Specific policy examples until PM confirms which are GA-ready. |
|
||||
|
||||
---
|
||||
|
||||
### Feature 4: SaaS Federation v2
|
||||
|
||||
| | |
|
||||
|--|--|
|
||||
| **Pain it solves** | Enterprises and marketplaces that need to offer agent orchestration to multiple end-customers (tenants) cannot do so safely with a single-tenant architecture: cross-tenant data isolation, centralized billing, org-level access control, and per-tenant audit trails are all required for enterprise procurement. |
|
||||
| **Who cares** | Enterprise procurement, IT procurement teams, marketplace operators, SaaS resellers, multi-tenant ISVs |
|
||||
| **One-liner** | Multi-tenant agent platform with cross-tenant isolation, centralized billing, and org-level governance — built for enterprises and marketplaces. |
|
||||
| **Proof point** | SaaS Federation v2 tutorial at `docs/tutorials/saas-federation` (PR #1613). Org-scoped keys + control plane boundary. Isolated per-tenant workspaces with centralized admin view. |
|
||||
| **HN/Reddit framing** | ⚠️ **WARNING:** SaaS Federation v2 is listed in Issue #1836 as a Phase 34 feature, but no PMM positioning brief or blog post exists for it yet. Do NOT draft community copy for this feature until PM confirms: (a) what it actually ships, (b) the GA/beta/alpha label, and (c) the primary use case narrative. Current content gap — not ready for external copy. |
|
||||
| **What to soft-pedal** | Until PM confirms details, do not publish any claims about SaaS Federation v2. |
|
||||
|
||||
---
|
||||
|
||||
## Feature Cross-Sell Angles
|
||||
|
||||
**Phase 30 → Phase 34 linkage (for sellers):**
|
||||
> "Phase 30 shipped per-workspace auth tokens (`mol_ws_*`). Phase 34 ships partner-level keys (`mol_pk_*`). Together, Molecule AI is the only platform with workspace-level isolation *and* partner-level scoping — enterprise-ready from day one."
|
||||
|
||||
**Governance stack (Platform Instructions + Tool Trace):**
|
||||
> "Platform Instructions shapes what agents do *before* they run. Tool Trace records what they did *after*. Together: governance before, observability after. Nothing leaves production unaccounted for."
|
||||
|
||||
**Partner platform stack (Partner API Keys + SaaS Federation v2 + Platform Instructions):**
|
||||
> "Provision tenants via API. Isolate them in a multi-tenant control plane. Govern their behavior at the system prompt level. Revoke access in one call. That's a complete partner platform — not a collection of features."
|
||||
@ -1,87 +0,0 @@
|
||||
# Phase 34 — Positioning One-Pager
|
||||
**Feature group:** Partner API Keys, Tool Trace, Platform Instructions, SaaS Federation v2
|
||||
**GA date:** April 30, 2026
|
||||
**Status:** INTERNAL DRAFT — for PMM review and press kit use
|
||||
**Owner:** PMM
|
||||
**Last updated:** 2026-04-23
|
||||
|
||||
---
|
||||
|
||||
## One-Sentence Positioning Statement
|
||||
|
||||
Molecule AI Phase 34 gives enterprise teams the platform-native primitives — programmable access, built-in observability, and pre-execution governance — required to run AI agents in production, without the bolt-on integrations that add latency, maintenance burden, and security gaps.
|
||||
|
||||
---
|
||||
|
||||
## Target Audience
|
||||
|
||||
| | Role | What they care about |
|
||||
|--|------|----------------------|
|
||||
| **Primary** | Platform Engineering / DevOps leads | Shipping reliable agent infrastructure: observability, CI/CD integration, multi-environment support |
|
||||
| **Primary** | Enterprise IT / Security Governance | Controlling agent behavior before it happens: policy enforcement, audit trails, compliance |
|
||||
| **Secondary** | Partner / Marketplace integrations engineers | Embedding Molecule AI as the orchestration layer for their platform or marketplace |
|
||||
| **Secondary** | Developer advocates / DevRel | Demonstrating enterprise-grade capabilities to prospective enterprise buyers |
|
||||
|
||||
---
|
||||
|
||||
## Problem We Solve
|
||||
|
||||
Enterprise teams adopting AI agents face three compounding failures at once:
|
||||
|
||||
1. **Observability gaps** — Agents run and produce outputs, but teams have no structured record of *what the agent actually did*: which tools it called, with what inputs, in what order. Debugging is reverse-engineering from outputs. Cross-platform observability (Langfuse, Datadog) adds a pipeline but misses A2A-level agent behavior.
|
||||
|
||||
2. **Governance gaps** — Agent behavior policies are enforced *after* the agent has already acted — filtering outputs, blocking writes post-hoc. Governance that only works after the fact is governance that failed. Enterprise IT and compliance teams need controls that shape behavior *before* the first token is generated.
|
||||
|
||||
3. **Integration gaps** — Platforms that want to embed agent orchestration programmatically face a choice between building it themselves (months of work) or using browser sessions (brittle, non-programmatic). CI/CD teams need ephemeral test orgs per PR. Neither is solved by existing agent platforms.
|
||||
|
||||
---
|
||||
|
||||
## Our Solution — Phase 34 Angle
|
||||
|
||||
Phase 34 ships four features that address each failure at the platform layer — not as integrations, not as SDKs, not as post-hoc configuration:
|
||||
|
||||
- **Partner API Keys** (`mol_pk_*`) — Scoped, revocable API tokens that let partner platforms, CI/CD pipelines, and marketplace resellers programmatically provision and manage Molecule AI orgs. No browser. No manual handoff.
|
||||
- **Tool Trace** — `tool_trace[]` in every A2A `Message.metadata`. A structured, run_id-paired execution record: tool name, inputs, output previews, timing. No SDK, no sidecar, no sampling.
|
||||
- **Platform Instructions** — Workspace-scoped system prompt rules that take effect at startup. Governance happens before the first turn, not after an incident.
|
||||
- **SaaS Federation v2** — Multi-tenant control plane architecture: isolated orgs, cross-tenant guardrails, centralized billing for enterprise and marketplace deployments.
|
||||
|
||||
**The Phase 34 angle:** These four features work together. A partner platform provisions an org via Partner API Keys, configures Platform Instructions for their tenants, gets full observability via Tool Trace, and operates it all inside a SaaS Federation v2 multi-tenant control plane. This is a coherent enterprise stack — not four unrelated features.
|
||||
|
||||
---
|
||||
|
||||
## Key Differentiators vs. Competitors
|
||||
|
||||
| Differentiator | LangGraph Cloud | CrewAI | Molecule AI Phase 34 |
|
||||
|---------------|----------------|--------|----------------------|
|
||||
| Built-in agent observability (no SDK) | ❌ | ❌ | **✅ Tool Trace** |
|
||||
| Pre-execution governance (system prompt level) | ❌ | ❌ | **✅ Platform Instructions** |
|
||||
| Programmatic partner org provisioning | ❌ (seat licensing only) | ❌ (marketplace listing only) | **✅ Partner API Keys** |
|
||||
| CI/CD-native ephemeral orgs | ❌ | ❌ | **✅ Partner API Keys + CI/CD example** |
|
||||
| Multi-tenant SaaS control plane | ❌ | ❌ | **✅ SaaS Federation v2** |
|
||||
| A2A-native protocol | ✅ (in-progress, Q2-Q3 2026) | ❌ | **✅ live today** |
|
||||
|
||||
**Counter-framing for sellers:**
|
||||
> "LangGraph Cloud and CrewAI are end-user platforms. Molecule AI is infrastructure your platform builds on — with the governance and observability built in, not bolted on."
|
||||
|
||||
---
|
||||
|
||||
## Proof Points
|
||||
|
||||
| Claim | Evidence |
|
||||
|-------|----------|
|
||||
| Molecule AI is the only agent platform with built-in execution tracing | `tool_trace[]` in `Message.metadata` — no SDK, no sidecar. LangGraph and CrewAI require Langfuse/Helicone instrumentation. |
|
||||
| Platform Instructions enforce governance before agents run | Workspace startup path prepends rules to system prompt. Policy takes effect before first token generated. |
|
||||
| Partner API Keys enable programmatic org provisioning | `POST /cp/admin/partner-keys` creates orgs via API. Keys are SHA-256 hashed, org-scoped, rate-limited, revocable via `DELETE`. |
|
||||
| Ephemeral test orgs per PR are fully automated | CI/CD example in partner onboarding guide: `POST` create → run tests → `DELETE` teardown. No manual cleanup, no shared-state contamination. |
|
||||
| SaaS Federation v2 enables multi-tenant isolation | Tutorial at `docs/marketing/launches/pr-1613-saas-federation-v2.md`. Org-scoped keys + control plane boundary. |
|
||||
| Design partner (Acme Corp) validates enterprise readiness | Acme Corp integration (design partner, name pending PM confirmation). Reference use case: partner-provisioned orgs for Acme's customer base. |
|
||||
|
||||
---
|
||||
|
||||
## Internal Use Notes
|
||||
|
||||
- Partner API Keys are **BETA** — do not claim GA in press materials. Use "now available in beta" or "shipping April 30, 2026."
|
||||
- Tool Trace and Platform Instructions shipped via PR #1686 — **BETA**.
|
||||
- SaaS Federation v2 — **BETA** or **EARLY ACCESS**, pending PM label confirmation.
|
||||
- Do not use "Acme Corp" in any externally published copy — placeholder only. Confirm partner name with PM before press release.
|
||||
- Phase 30 linkage: Phase 30 shipped `mol_ws_*` (per-workspace auth). Phase 34 extends to `mol_pk_*` (partner-level keys). Cross-sell: "Phase 30 workspace isolation + Phase 34 partner scoping — the only platform with both."
|
||||
@ -1,106 +0,0 @@
|
||||
# A2A Enterprise Deep-Dive — Social Copy
|
||||
**Source:** `docs/blog/2026-04-22-a2a-v1-agent-platform/index.md` (staged, approved)
|
||||
**Status:** APPROVED (PMM — 72h window, Marketing Lead offline)
|
||||
**Blog slug:** `a2a-enterprise-any-agent-any-infrastructure`
|
||||
**Key angle:** "A2A is solved. A2A governance is not."
|
||||
**Campaign:** A2A Enterprise Deep-Dive | Phase 30 T+1
|
||||
**Owner:** PMM | **Executor:** Social Media Brand
|
||||
**OG image:** `docs/assets/blog/2026-04-22-a2a-enterprise-og.png` (VERIFY — file not found in workspace assets, use `marketing/assets/phase30-fleet-diagram.png` as fallback)
|
||||
|
||||
**Git branch note:** This file is on `staging` branch — not committed to origin/main. For execution on origin/main, copy must be cherry-picked or the branch switched. Confirm executor has staging access.
|
||||
|
||||
---
|
||||
|
||||
## X Post 1 — The Protocol Moment (lead hook)
|
||||
```
|
||||
A2A v1.0 shipped March 12. 23.3k stars. Five official SDKs. 383 implementations.
|
||||
|
||||
That's the moment the agent internet gets a standard.
|
||||
|
||||
The question isn't whether your platform supports it — it's whether it was built for it or added on top.
|
||||
|
||||
Molecule AI: built for it from day one.
|
||||
|
||||
#A2A #MultiAgent #AIAgents
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## X Post 2 — Native vs. Added (governance differentiator)
|
||||
```
|
||||
Most platforms add A2A as a feature layer on top of existing architecture.
|
||||
|
||||
Molecule AI: A2A is the operating system. The org chart is the routing table. Per-workspace auth tokens are enforced on every call — not conventions a misconfigured integration can bypass.
|
||||
|
||||
That's the difference between bolted-on and built-in.
|
||||
|
||||
#A2A #EnterpriseAI #AgentGovernance
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## X Post 3 — Code proof (technical credibility)
|
||||
```
|
||||
You can register an external agent on Molecule AI in under 100 lines.
|
||||
|
||||
One POST to register. A heartbeat loop. That's it.
|
||||
Agents stay where they are — on-prem, AWS, GCP — and join the fleet canvas.
|
||||
|
||||
No VPN. No custom integration. Just A2A.
|
||||
|
||||
#A2A #DevOps #MultiAgent
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## X Post 4 — Enterprise buyer close (audit + governance)
|
||||
```
|
||||
For production AI agent fleets, A2A compatibility isn't enough.
|
||||
|
||||
You need:
|
||||
→ Per-workspace auth tokens enforced at every route
|
||||
→ Audit trail that survives agent migrations
|
||||
→ Org-level revocation, not integration-level policy
|
||||
|
||||
That's protocol-native governance. Not bolted on.
|
||||
|
||||
#EnterpriseAI #AIAgents #AgentGovernance
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn Post — Full narrative (100–200 words)
|
||||
```
|
||||
A2A v1.0 shipped March 12, 2026. 23,300 GitHub stars. Five official SDKs. 383 community implementations.
|
||||
|
||||
The agent internet just got a standard. And every AI platform now has to answer the same question: Is A2A something you were built for, or something you added on top?
|
||||
|
||||
Most platforms add it. One platform was built for it from the ground up.
|
||||
|
||||
Molecule AI's A2A implementation is structural — not a feature. Every authenticated route enforces per-workspace bearer tokens. Every agent, whether it runs in the platform's Docker network or on a different cloud, appears on the same fleet canvas with the same audit trail.
|
||||
|
||||
External agents register in under 100 lines of Python. No VPN. No custom integration. Agents stay where they are and join the fleet.
|
||||
|
||||
This is what protocol-native AI agent governance looks like in production — not on a roadmap.
|
||||
|
||||
→ Read the full A2A v1.0 deep-dive: https://docs.molecule.ai/blog/a2a-v1-agent-platform?utm_source=social&utm_medium=linkedin&utm_campaign=a2a-enterprise-deep-dive
|
||||
→ Register an external agent: https://docs.molecule.ai/docs/guides/external-agent-registration?utm_source=social&utm_medium=linkedin&utm_campaign=a2a-enterprise-deep-dive
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Self-Review Checklist
|
||||
- [x] No benchmarks or performance claims
|
||||
- [x] No person names
|
||||
- [x] No timeline claims or dates (other than March 12 A2A ship — fact, not claim)
|
||||
- [x] No competitor names in copy (cloud provider absorption framed as protocol validation, not attack)
|
||||
- [x] All claims traceable to blog post source material
|
||||
- [x] No GA date mentions
|
||||
- [x] CTA links are canonical Molecule AI domain
|
||||
|
||||
---
|
||||
|
||||
## Execution Notes
|
||||
- X credentials gap still open (Social Media Brand blocked). Manual posting workflow applies if credentials not restored.
|
||||
- Hashtags: `#A2A #MultiAgent #AIAgents #EnterpriseAI #AgentGovernance #DevOps`
|
||||
- Canonical URL: `docs.molecule.ai/blog/a2a-v1-agent-platform`
|
||||
@ -1,97 +0,0 @@
|
||||
# Org-Scoped API Keys — Social Copy
|
||||
**Campaign:** Org-Scoped API Keys | **Blog:** `docs/blog/2026-04-25-org-scoped-api-keys/index.md`
|
||||
**Canonical URL:** `moleculesai.app/blog/org-scoped-api-keys`
|
||||
**Status:** APPROVED — URL and asset fixes applied by PMM (2026-04-25 Day 5 pre-publish)
|
||||
**Owner:** PMM → Social Media Brand | **Launch:** Coordinated with PR #1342 merge
|
||||
|
||||
---
|
||||
|
||||
## X (140–280 chars)
|
||||
|
||||
### Version A — Security framing
|
||||
```
|
||||
Every integration. One credential. Zero shared secrets.
|
||||
|
||||
Org-scoped API keys: named, revocable, with full audit trail. Rotate without downtime. Attribute every call back to the key that made it.
|
||||
|
||||
Your security team called — this is the answer.
|
||||
```
|
||||
|
||||
### Version B — Production use cases
|
||||
```
|
||||
Three things that break at scale with a shared ADMIN_TOKEN:
|
||||
|
||||
1. You can't rotate without downtime
|
||||
2. You can't tell which agent called your API
|
||||
3. Compromised token = everything compromised
|
||||
|
||||
Org-scoped keys fix all three.
|
||||
```
|
||||
|
||||
### Version C — Developer angle
|
||||
```
|
||||
How to give a CI pipeline its own API key:
|
||||
|
||||
1. POST /org/tokens with a name
|
||||
2. Store the token (shown once)
|
||||
3. Done.
|
||||
|
||||
That's it. Named. Revocable. Audited.
|
||||
```
|
||||
|
||||
### Version D — Enterprise angle
|
||||
```
|
||||
Replace your shared ADMIN_TOKEN.
|
||||
|
||||
Org-scoped API keys: one per integration, immediate revocation, full audit trail. Rotate without coordinating downtime.
|
||||
|
||||
Tiers: Lazy bootstrap → WorkOS session → Org token → ADMIN_TOKEN (break-glass).
|
||||
|
||||
Security teams love this architecture.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn (100–200 words)
|
||||
|
||||
```
|
||||
When your engineering team scales from two agents to twenty, a single ADMIN_TOKEN hardcoded in your environment is a single point of failure.
|
||||
|
||||
Org-scoped API keys give every integration its own credential: named, revocable, with full audit trail. Rotate without coordinating downtime across ten agents. Identify exactly which integration called your API. Revoke one key without touching the others.
|
||||
|
||||
The security model: tier-based authentication priority (WorkOS session first, org tokens primary for service integrations, ADMIN_TOKEN as break-glass only). When a request arrives, the platform checks in priority order — and every org API key call is attributed in the audit log with its key prefix and creation provenance.
|
||||
|
||||
Every call traced. Every key revocable. Every rotation zero-downtime.
|
||||
|
||||
Navigate to Settings → Org API Keys in the Canvas, or use the REST API directly.
|
||||
|
||||
→ moleculesai.app/blog/org-scoped-api-keys
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Image suggestions
|
||||
|
||||
| Post | Image | Source |
|
||||
|---|---|---|
|
||||
| X Version A | `before-after-credential-model.png` — shared key vs org-scoped (red/green table) | `campaigns/org-api-keys-launch/` |
|
||||
| X Version B | 3-item checklist: Rotate without downtime / Attribute every call / Revoke one key | Custom graphic |
|
||||
| X Version C | `audit-log-terminal.png` — terminal showing token creation and audit attribution | `campaigns/org-api-keys-launch/` |
|
||||
| X Version D | Auth tier hierarchy: Lazy bootstrap → WorkOS → Org token → ADMIN_TOKEN (break-glass) | Custom graphic |
|
||||
| LinkedIn | `canvas-org-api-keys-ui.png` — Canvas Settings → Org API Keys tab | `campaigns/org-api-keys-launch/` |
|
||||
|
||||
**Do NOT use:** `phase30-fleet-diagram.png` — wrong visual for this campaign.
|
||||
|
||||
**CTA URL:** `moleculesai.app/blog/org-scoped-api-keys` *(corrected from `moleculesai.app/blog/deploy-anywhere`)*
|
||||
|
||||
---
|
||||
|
||||
## Hashtags
|
||||
|
||||
`#MoleculeAI #APIKeys #EnterpriseSecurity #A2A #DevOps #MultiAgent`
|
||||
|
||||
---
|
||||
|
||||
## UTM
|
||||
|
||||
`?utm_source=linkedin&utm_medium=social&utm_campaign=org-api-keys-launch`
|
||||
@ -1,121 +0,0 @@
|
||||
# AGENTS.md Auto-Generation — Interactive Demo Script
|
||||
**Issue:** #1172 | **Source:** PR #763 | **Acceptance:** Working demo + 1-min screencast
|
||||
|
||||
---
|
||||
|
||||
## What This Demo Shows
|
||||
|
||||
1. A workspace with a `role` and `description` in `config.yaml`
|
||||
2. `generate_agents_md()` called at startup
|
||||
3. The resulting `AGENTS.md` that peer agents can read
|
||||
4. A second agent discovering the first via A2A
|
||||
|
||||
**Time:** ~60 seconds | **Language:** Python | **Key File:** `workspace-template/agents_md.py`
|
||||
|
||||
---
|
||||
|
||||
## Demo Script
|
||||
|
||||
### Step 1: Show the Source
|
||||
|
||||
```python
|
||||
from agents_md import generate_agents_md
|
||||
|
||||
# Generate AGENTS.md from the workspace config
|
||||
generate_agents_md(config_dir="/configs", output_path="/workspace/AGENTS.md")
|
||||
|
||||
# Read what was generated
|
||||
print(Path("/workspace/AGENTS.md").read_text())
|
||||
```
|
||||
|
||||
### Step 2: Show the Generated Output
|
||||
|
||||
Running the above on a workspace with:
|
||||
|
||||
```yaml
|
||||
# config.yaml
|
||||
name: Code Reviewer
|
||||
role: Senior Code Reviewer
|
||||
description: Reviews pull requests, flags security issues, suggests test coverage improvements.
|
||||
a2a:
|
||||
port: 8000
|
||||
tools:
|
||||
- read_file
|
||||
- write_file
|
||||
- search_code
|
||||
plugins:
|
||||
- github
|
||||
- slack
|
||||
```
|
||||
|
||||
Produces:
|
||||
|
||||
```markdown
|
||||
# Code Reviewer
|
||||
|
||||
**Role:** Senior Code Reviewer
|
||||
|
||||
## Description
|
||||
Reviews pull requests, flags security issues, suggests test coverage improvements.
|
||||
|
||||
## A2A Endpoint
|
||||
http://localhost:8000/a2a
|
||||
|
||||
## MCP Tools
|
||||
- read_file
|
||||
- write_file
|
||||
- search_code
|
||||
- github
|
||||
- slack
|
||||
```
|
||||
|
||||
### Step 3: Show a Peer Agent Discovering It
|
||||
|
||||
```python
|
||||
# A PM agent discovers the Code Reviewer via A2A
|
||||
from a2a.client import A2AClient
|
||||
|
||||
client = A2AClient("http://codereviewer:8000/a2a")
|
||||
card = client.discover() # Reads their AGENTS.md
|
||||
|
||||
print(f"Discovered agent: {card.name} ({card.role})")
|
||||
print(f"Available tools: {card.tools}")
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
Discovered agent: Code Reviewer (Senior Code Reviewer)
|
||||
Available tools: ['read_file', 'write_file', 'search_code', 'github', 'slack']
|
||||
```
|
||||
|
||||
**Narrative:** "No configuration files to maintain. No registry to update. Peer agents discover each other the same way humans discover each other — by reading each other's profiles."
|
||||
|
||||
---
|
||||
|
||||
## Screencast Outline (~60s)
|
||||
|
||||
| Time | Action |
|
||||
|------|--------|
|
||||
| 0–15s | Open `config.yaml` — show `role` field |
|
||||
| 15–30s | Show `generate_agents_md()` call in `main.py` — "called at startup" |
|
||||
| 30–45s | Run it — show the generated `AGENTS.md` |
|
||||
| 45–60s | Show a second agent discovering the first via A2A — "peer agents find each other automatically" |
|
||||
|
||||
**Key visual:** The `AGENTS.md` file appearing in the Canvas sidebar — visible, always current, no manual sync.
|
||||
|
||||
---
|
||||
|
||||
## The AGENTS.md Standard
|
||||
|
||||
This implements the [AAIF / Linux Foundation AGENTS.md standard](https://github.com/AI-Agents/AGENTS.md). Key properties:
|
||||
|
||||
- **Self-describing** — agents publish their own identity, role, and tools
|
||||
- **Startup-generated** — always current, no drift from config
|
||||
- **A2A-native** — discovery happens over the A2A protocol, no external registry
|
||||
|
||||
---
|
||||
|
||||
## Files
|
||||
|
||||
- Demo script: `docs/marketing/devrel/demos/agents-md-autogen-demo.md`
|
||||
- Source file: `workspace-template/agents_md.py` (PR #763)
|
||||
@ -1,119 +0,0 @@
|
||||
# Cloudflare Artifacts — Interactive Demo Script
|
||||
**Issue:** #1173 | **Source:** PR #641 | **Acceptance:** Working demo + repo link + 1-min screencast
|
||||
|
||||
---
|
||||
|
||||
## What This Demo Shows
|
||||
|
||||
1. Provision a Cloudflare Artifacts Git repo for a workspace
|
||||
2. Clone it, write a file, push a commit
|
||||
3. Fork a branch, make a change, merge back
|
||||
|
||||
**Time:** ~60 seconds | **Tools:** curl, git, Molecule AI Canvas | **Setup:** `CLOUDFLARE_API_TOKEN`, `CLOUDFLARE_ARTIFACTS_NAMESPACE`
|
||||
|
||||
---
|
||||
|
||||
## Demo Script
|
||||
|
||||
### Step 1: Create a Repo
|
||||
|
||||
```bash
|
||||
curl -s -X POST https://your-deployment.moleculesai.app/artifacts/repos \
|
||||
-H "Authorization: Bearer $ORG_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "demo-workspace", "description": "Agent demo workspace"}' | jq .
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```json
|
||||
{
|
||||
"id": "repo_abc123",
|
||||
"name": "demo-workspace",
|
||||
"remote_url": "https://x:<TOKEN>@hash.artifacts.cloudflare.net/git/repo-abc123.git",
|
||||
"created_at": "2026-04-21T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Narrative:** "Every Molecule AI workspace can now have its own versioned Git repo on Cloudflare's edge."
|
||||
|
||||
---
|
||||
|
||||
### Step 2: Clone and Push a Snapshot
|
||||
|
||||
```bash
|
||||
# Clone the repo (TOKEN is embedded in the remote URL from Step 1)
|
||||
git clone https://x:<TOKEN>@hash.artifacts.cloudflare.net/git/repo-abc123.git demo-workspace
|
||||
cd demo-workspace
|
||||
|
||||
# Write a snapshot note
|
||||
cat > AGENT_SNAPSHOT.md << 'EOF'
|
||||
# Agent Run — 2026-04-21
|
||||
|
||||
Task: Refactored the auth module. 3 tests added, 1 bug fixed.
|
||||
Status: Complete. Ready for reviewer agent.
|
||||
EOF
|
||||
|
||||
git add AGENT_SNAPSHOT.md
|
||||
git commit -m "feat: agent run snapshot — auth module refactor"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
**Narrative:** "The agent writes its work as a Git commit. Every run is versioned."
|
||||
|
||||
---
|
||||
|
||||
### Step 3: Fork Before an Experiment
|
||||
|
||||
```bash
|
||||
# Fork the workspace — creates an isolated branch
|
||||
curl -s -X POST https://your-deployment.moleculesai.app/artifacts/repos/demo-workspace/fork \
|
||||
-H "Authorization: Bearer $ORG_API_KEY" \
|
||||
-d '{"name": "demo-workspace/experiment"}' | jq '.repo.remote_url'
|
||||
```
|
||||
|
||||
```bash
|
||||
git clone https://x:<TOKEN>@hash.artifacts.cloudflare.net/git/repo-abc123-fork.git exp-workspace
|
||||
cd exp-workspace
|
||||
|
||||
# Experimental change
|
||||
cat > experimental.md << 'EOF'
|
||||
# Experimental: New auth strategy
|
||||
Testing a token-less approach using WorkOS session tokens.
|
||||
EOF
|
||||
|
||||
git add experimental.md
|
||||
git commit -m "feat(experiment): token-less auth prototype"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
**Narrative:** "Before a risky change, the agent forks — like a Git branch. If it fails, main stays clean."
|
||||
|
||||
---
|
||||
|
||||
### Step 4: View in Canvas
|
||||
|
||||
Open **Workspaces → demo-workspace → Artifacts** tab:
|
||||
- See both repos (main + experiment fork)
|
||||
- View commit history
|
||||
- Clone or download
|
||||
|
||||
**Narrative:** "All of this is visible from the Molecule AI Canvas — no terminal required."
|
||||
|
||||
---
|
||||
|
||||
## Screencast Outline (~60s)
|
||||
|
||||
| Time | Action |
|
||||
|------|--------|
|
||||
| 0–10s | Open Canvas → Workspaces → Artifacts tab |
|
||||
| 10–25s | Run Step 1 curl → show repo created in UI |
|
||||
| 25–45s | Show git clone + commit + push in terminal |
|
||||
| 45–55s | Run fork step, show experiment branch in Canvas |
|
||||
| 55–60s | Zoom commit history — "every agent run is a Git commit" |
|
||||
|
||||
---
|
||||
|
||||
## Files
|
||||
|
||||
- Demo script: `docs/marketing/devrel/demos/cloudflare-artifacts-demo.sh`
|
||||
- Canvas screenshot: `docs/marketing/devrel/demos/cloudflare-artifacts-canvas.png`
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,81 +0,0 @@
|
||||
# Discord Adapter Day 2 — Community Copy
|
||||
|
||||
> Posted 2026-04-21. Discord adapter launched Day 1; Day 2 covers Reddit, Hacker News.
|
||||
> Blog URL: https://moleculesai.app/blog/discord-adapter-launch
|
||||
> PR: https://github.com/Molecule-AI/molecule-core/pull/656
|
||||
|
||||
---
|
||||
|
||||
## Reddit r/LocalLLaMA
|
||||
|
||||
**Title:** Molecule AI now connects to Discord via a webhook — no bot account, no Gateway, no OAuth
|
||||
|
||||
```
|
||||
Molecule AI workspaces can now send messages to Discord and receive slash commands using only a webhook URL. No Discord Developer Portal, no intents, no bot token — just an inbound webhook and your agent is in the channel.
|
||||
|
||||
Built it as a proof-of-concept to keep our own team workflow on Discord without the overhead of a full bot app. Figured other people might want the same thing.
|
||||
|
||||
The adapter uses Discord's built-in webhook delivery for outbound + slash command reception. No polling. No Gateway connection. Works behind NAT — the agent initiates all outbound connections to the platform, which proxies to Discord.
|
||||
|
||||
Here's the architecture gist:
|
||||
- Outbound: POST to Discord webhook URL (standard, no auth beyond the URL token)
|
||||
- Inbound: Discord delivers slash command payloads to a platform endpoint; platform fans out to the relevant workspace via A2A
|
||||
- No Discord bot app required. No Developer Portal setup.
|
||||
|
||||
If your team lives in Discord and you want an AI agent that can post summaries, respond to /ask commands, and route alerts — it's now a webhook URL and a config line.
|
||||
|
||||
Demo repo and docs: https://github.com/Molecule-AI/molecule-core/tree/main/docs/blog/2026-04-21-discord-adapter
|
||||
|
||||
Happy to answer questions about the adapter design.
|
||||
```
|
||||
|
||||
**Tags:** `discord`, `mcp`, `molecule-ai`, `webhook`, `ai-agents`
|
||||
|
||||
---
|
||||
|
||||
## Reddit r/MachineLearning
|
||||
|
||||
**Title:** Show HN: Molecule AI Discord adapter — AI agents in Discord via webhook, no bot account needed
|
||||
|
||||
```
|
||||
Show HN: Molecule AI Discord adapter — webhook-only, no Gateway connection required
|
||||
|
||||
HN: built a Discord integration for Molecule AI workspaces that requires zero bot app setup. It's just a webhook URL and an agent config.
|
||||
|
||||
The problem: Discord bot integrations typically require a Developer Portal app, OAuth flow, Gateway connection management, intent configuration, and rate limit handling. That's a meaningful chunk of work before your agent can say hello.
|
||||
|
||||
The approach: use Discord's native webhook delivery for inbound slash commands (no Gateway) and standard webhook POST for outbound messages. The platform acts as a proxy — Discord delivers to the platform endpoint, the platform routes to the relevant workspace via A2A. Works behind NAT since the agent initiates outbound connections.
|
||||
|
||||
No bot token. No intents. No Gateway.
|
||||
|
||||
Code: https://github.com/Molecule-AI/molecule-core/tree/main/docs/blog/2026-04-21-discord-adapter
|
||||
Launch post: https://moleculesai.app/blog/discord-adapter-launch
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Hacker News
|
||||
|
||||
**Title:** Molecule AI — Discord adapter via webhook (no bot account, no Gateway)
|
||||
|
||||
**Body:**
|
||||
|
||||
Built a Discord integration for Molecule AI workspaces that works with just a webhook URL — no Discord Developer Portal setup, no bot token, no Gateway connection.
|
||||
|
||||
**Why**
|
||||
|
||||
Our own team lives in Discord. We wanted a lightweight way to have an AI agent respond to slash commands and post updates without the overhead of a full bot app. Realized Discord's native webhook primitives cover both inbound (slash command delivery) and outbound (channel messages) if you proxy through a platform endpoint.
|
||||
|
||||
**How it works**
|
||||
|
||||
- Outbound: agent POSTs to a Discord webhook URL (standard, URL contains the auth token)
|
||||
- Inbound: Discord delivers slash command payloads to a platform endpoint; platform fans out to the relevant workspace via A2A
|
||||
- No bot account required. No Gateway. Works behind NAT — the agent only initiates outbound connections.
|
||||
|
||||
The adapter lives in the MCP server (`mcp-server/src/tools/channels/discord.go`) alongside Telegram and other channel adapters. Each workspace configures its own Discord channel with a webhook URL.
|
||||
|
||||
**Links**
|
||||
|
||||
- Docs: https://moleculesai.app/blog/discord-adapter-launch
|
||||
- Code + examples: https://github.com/Molecule-AI/molecule-core/tree/main/docs/blog/2026-04-21-discord-adapter
|
||||
- PR: https://github.com/Molecule-AI/molecule-core/pull/656
|
||||
@ -1,59 +0,0 @@
|
||||
# Launch Brief: Waitlist Page with Contact Form
|
||||
**PR:** [#1080](https://github.com/Molecule-AI/molecule-core/pull/1080) — `feat(canvas): /waitlist page with contact form`
|
||||
**Merged:** 2026-04-20T16:47:35Z
|
||||
**Owner:** PMM
|
||||
**Status:** DRAFT
|
||||
|
||||
---
|
||||
|
||||
## Problem
|
||||
|
||||
Users whose email isn't on the beta allowlist hit a dead end after WorkOS auth redirect — no capture mechanism, no explanation, no next step. The loop wasn't closed on the unauthenticated user experience.
|
||||
|
||||
---
|
||||
|
||||
## Solution
|
||||
|
||||
A dedicated `/waitlist` page that captures waitlist interest with email + optional name + use-case. Soft dedup prevents spam. Privacy guard ensures client never auto-pre-fills email from URL params (regression test included).
|
||||
|
||||
---
|
||||
|
||||
## 3 Core Claims
|
||||
|
||||
1. **No more dead ends.** Email not on allowlist → friendly waitlist page with context, not a broken auth redirect.
|
||||
2. **Capture + qualify.** Name + use-case fields let the team segment and prioritize inbound interest.
|
||||
3. **Privacy by design.** Client-side privacy test ensures email is never auto-pre-filled from URL params — compliance-adjacent and trust-building.
|
||||
|
||||
---
|
||||
|
||||
## Target Developer
|
||||
|
||||
- Developers evaluating Molecule AI who hit the beta wall
|
||||
- Indie devs and teams wanting early access
|
||||
- PM/sales for waitlist segmentation
|
||||
|
||||
---
|
||||
|
||||
## CTA
|
||||
|
||||
"Join the waitlist → [form]" — Captures warm inbound interest for future GA outreach.
|
||||
|
||||
---
|
||||
|
||||
## Positioning Alignment
|
||||
|
||||
- Low-key feature, not a core positioning angle
|
||||
- Secondary signal: demonstrates product care (privacy regression test = security-minded team)
|
||||
- Useful as a "we're growing responsibly" proof point in growth metrics
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
- Is this waitlist for self-hosted users, SaaS users, or both?
|
||||
- Is there a CRM integration for the captured leads?
|
||||
- Does this need a blog post or is it an infra/UX maintenance item?
|
||||
|
||||
---
|
||||
|
||||
*Not high priority for launch brief promotion. Monitor for CRM workflow integration.*
|
||||
@ -1,64 +0,0 @@
|
||||
# Launch Brief: Org-Scoped API Keys
|
||||
**PR:** [#1105](https://github.com/Molecule-AI/molecule-core/pull/1105) — `feat(auth): org-scoped API keys`
|
||||
**Merged:** 2026-04-20
|
||||
**Owner:** PMM | **Status:** DRAFT — routing to Content Marketer
|
||||
|
||||
---
|
||||
|
||||
## Problem
|
||||
|
||||
Everyday development and integrations required full-admin tokens (`ADMIN_TOKEN`). There was no way to issue a token scoped to a specific org — you either got full access or nothing. For platform teams sharing tokens across tools, this was a silent security risk and a governance gap enterprise buyers flag in security reviews.
|
||||
|
||||
---
|
||||
|
||||
## Solution
|
||||
|
||||
User-minted full-admin tokens replace `ADMIN_TOKEN` for everyday use, with org-level scoping and a canvas UI tab for token management. Admins can now issue, rotate, and revoke tokens with the minimum required scope — org only, no global access.
|
||||
|
||||
---
|
||||
|
||||
## 3 Core Claims
|
||||
|
||||
1. **Scoped by default.** Org-level bearer tokens replace shared admin keys. Workspace A's token cannot hit Workspace B — enforced at the protocol level (Phase 30.1 auth model).
|
||||
2. **Self-service token management.** Canvas UI tab lets admins issue, rotate, and revoke tokens without touching infra config.
|
||||
3. **Enterprise procurement-ready.** Org scoping closes the gap that security reviewers flag in eval questionnaires — no more "one global key for everything."
|
||||
|
||||
---
|
||||
|
||||
## Target Developer
|
||||
|
||||
- **Indie devs / small teams** who want to rotate tokens without redeploying
|
||||
- **Platform teams** integrating Molecule AI into multi-tenant tooling
|
||||
- **Enterprise security reviewers** who require scoped auth before purchase
|
||||
|
||||
---
|
||||
|
||||
## CTA
|
||||
|
||||
"Replace your shared admin key. Issue org-scoped tokens from the canvas." → Docs link: TBD (confirm routing)
|
||||
|
||||
---
|
||||
|
||||
## Coverage Decision (from Content Marketer, 2026-04-21)
|
||||
|
||||
**No standalone blog post needed.** Folds into Phase 30 secure-by-design narrative. Social copy at `campaigns/org-api-keys-launch/social-copy.md` is the right level of coverage.
|
||||
|
||||
---
|
||||
|
||||
## Positioning Alignment
|
||||
|
||||
- Strengthens Phase 30.1 auth narrative (`X-Workspace-ID` + per-workspace tokens)
|
||||
- Directly addresses the "governance" concern surfaced in enterprise positioning
|
||||
- No competitor has a clear org-scoped token story — potential differentiation angle
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [x] Does this need a dedicated blog post? → No (Content Marketer confirmed)
|
||||
- [ ] Does the canvas UI tab have a public GA date?
|
||||
- [ ] CTA doc link — confirm docs routing before publish
|
||||
|
||||
---
|
||||
|
||||
*PMM — route social copy to Social Media Brand once canvas UI tab is GA.*
|
||||
@ -1,92 +0,0 @@
|
||||
# Positioning Brief: EC2 Instance ID Persistence
|
||||
**PR:** [#1531](https://github.com/Molecule-AI/molecule-core/pull/1531) — `feat(workspace): persist CP-returned EC2 instance_id on provision`
|
||||
**Merged:** 2026-04-22T01:40Z (~21h ago)
|
||||
**Owner:** PMM | **Status:** DRAFT — pending Marketing Lead review
|
||||
|
||||
---
|
||||
|
||||
## Situation
|
||||
|
||||
Control Plane workspace provisioning (SaaS / Phase 30 infrastructure) runs on EC2. The CP returns an `instance_id` when a workspace is provisioned, but previously this was not stored — the platform couldn't distinguish a CP-provisioned workspace from a Docker workspace once running.
|
||||
|
||||
PR #1531 persists the `instance_id` returned by the CP into the workspaces table, enabling downstream features that require knowing which EC2 instance backs a workspace.
|
||||
|
||||
---
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Downstream features — notably browser-based terminal (EC2 Instance Connect SSH, PR #1533) and audit attribution — require a reliable `instance_id` field on the workspace record. Without it:
|
||||
- Terminal tab can't determine which EC2 instance to connect to
|
||||
- Audit log can't cross-reference workspace events with actual EC2 activity in CloudTrail
|
||||
- Cost attribution by instance can't work reliably
|
||||
|
||||
The CP already returns `instance_id`; the platform just wasn't storing it.
|
||||
|
||||
---
|
||||
|
||||
## Core Claims
|
||||
|
||||
### Claim 1: Platform now knows which EC2 instance backs each workspace
|
||||
|
||||
The `instance_id` is stored at provision time and available on every subsequent workspace API response. This is a prerequisite for several Phase 30 features — not visible to end users directly, but enables the features that are.
|
||||
|
||||
### Claim 2: Browser-based terminal is now possible for all CP-provisioned workspaces
|
||||
|
||||
EICE (PR #1533) uses `instance_id` to initiate the SSH session. Without #1531, EICE can't know which instance to target. Together, #1531 + #1533 = SaaS users get a terminal tab with no SSH keys.
|
||||
|
||||
### Claim 3: Audit trail is now attributable to specific EC2 instances
|
||||
|
||||
Workspace-level CloudTrail events can now be correlated to the actual EC2 instance via `instance_id`. Compliance teams get more complete audit data.
|
||||
|
||||
---
|
||||
|
||||
## Target Audience
|
||||
|
||||
**Primary:** DevOps and platform engineers managing SaaS-provisioned workspaces. The `instance_id` is invisible to them unless they look at the API — but the features it enables (terminal, audit) are visible.
|
||||
|
||||
**Secondary:** Enterprise security/compliance reviewers evaluating Molecule AI SaaS. `instance_id` persistence + CloudTrail attribution is a governance signal.
|
||||
|
||||
---
|
||||
|
||||
## Positioning Alignment
|
||||
|
||||
- **Phase 30 remote workspaces**: `instance_id` is prerequisite infrastructure for the SaaS-side remote workspace UX (terminal + audit)
|
||||
- **Per-workspace auth tokens**: Platform-level resource identification supports token-scoped access decisions
|
||||
- **Immutable audit trail**: `instance_id` cross-reference makes CloudTrail events attributable to specific workspaces
|
||||
|
||||
This is a **prerequisite PR** — it ships the data layer for features in PR #1533 and future CP-provisioned workspace capabilities. Not a standalone launch.
|
||||
|
||||
---
|
||||
|
||||
## Channel Coverage
|
||||
|
||||
| Channel | Asset | Owner | Notes |
|
||||
|---------|-------|-------|-------|
|
||||
| Release notes | Mention in Phase 30 release notes | DevRel | Brief entry — "EC2 instance_id now stored on provision" |
|
||||
| Phase 30 blog | Call out in remote workspaces blog | Content Marketer | One sentence — "CP-provisioned workspaces now store their EC2 instance ID" |
|
||||
| No standalone blog or social | Not warranted — prerequisite PR | — | |
|
||||
|
||||
**This is not a standalone campaign.** The value is in enabling other features.
|
||||
|
||||
---
|
||||
|
||||
## Relationship to PR #1533 (EC2 Instance Connect SSH)
|
||||
|
||||
PR #1531 + #1533 together deliver: SaaS workspace gets a browser-based terminal tab, no SSH keys required.
|
||||
|
||||
- **PR #1531**: Store the `instance_id` (data layer) ✅ **this brief**
|
||||
- **PR #1533**: Connect via EICE using `instance_id` (UX layer) — brief exists at `pr-1533-ec2-instance-connect-ssh.md`
|
||||
|
||||
Route both to DevRel together. Content Marketer uses #1531 as one sentence in the EC2 Instance Connect SSH blog post.
|
||||
|
||||
---
|
||||
|
||||
## Sign-off
|
||||
|
||||
- [x] PMM positioning: approved
|
||||
- [ ] Marketing Lead: pending
|
||||
- [ ] DevRel: note in release notes + coordinate with #1533
|
||||
|
||||
---
|
||||
|
||||
*PMM — this PR is a prerequisite. Coordinate release note entry with #1533. Close when routed.*
|
||||
@ -1,152 +0,0 @@
|
||||
# Positioning Brief: EC2 Instance Connect SSH
|
||||
**PR:** [#1533](https://github.com/Molecule-AI/molecule-core/pull/1533) — `feat(terminal): remote path via aws ec2-instance-connect + pty`
|
||||
**Merged:** 2026-04-22
|
||||
**Owner:** PMM | **Status:** APPROVED — routing to team
|
||||
|
||||
---
|
||||
|
||||
## Situation
|
||||
|
||||
When workspace provisioning moved from local Docker to the SaaS control plane (Fly Machines / EC2), a gap opened: Docker workspaces had a canvas terminal tab. SaaS-provisioned EC2 workspaces didn't — there was no path to exec into a cloud VM from the browser without a public IP, pre-configured SSH keys, or a bastion host.
|
||||
|
||||
PR #1533 closes that gap using **EC2 Instance Connect Endpoint (EICE)** — a purpose-built AWS service for IAM-authenticated, key-free SSH access to instances, including those in private subnets.
|
||||
|
||||
---
|
||||
|
||||
## Problem Statement
|
||||
|
||||
Getting a terminal into a SaaS-provisioned EC2 workspace requires infrastructure that most users don't have set up. The options available before this PR:
|
||||
|
||||
| Option | What's needed | Works for agents? |
|
||||
|--------|---------------|---------------------|
|
||||
| Direct SSH | Public IP + keypair + key distribution | No — no public IP on private-subnet EC2s |
|
||||
| Bastion host | Separate EC2 + SSH config + key for bastion | No — extra infra, adds attack surface |
|
||||
| SSM Session Manager | SSM agent installed + IAM profile + session document | Partially — requires pre-config per instance |
|
||||
| EC2 Instance Connect CLI | `aws ec2-instance-connect ssh` — but must be run from a machine with the right IAM | Designed for humans, not agent runtimes |
|
||||
|
||||
For an agent runtime that spins up workspaces dynamically, none of these are acceptable. EC2 Instance Connect via EICE is the right fit: it requires only IAM permissions and a VPC Endpoint (already available in the SaaS VPC), and the session is initiated server-side by the platform — not by the agent's laptop.
|
||||
|
||||
---
|
||||
|
||||
## Solution
|
||||
|
||||
CP-provisioned workspaces (those with an `instance_id` in the workspaces table) get a terminal tab in the canvas automatically. The platform handles the EICE handshake and proxies the PTY over the WebSocket — the user sees a fully interactive terminal with no configuration required.
|
||||
|
||||
```
|
||||
User opens terminal tab in canvas
|
||||
→ platform checks workspace.instance_id
|
||||
→ instance_id found → spawn aws ec2-instance-connect ssh --connection-type eice
|
||||
→ PTY bridged to canvas WebSocket
|
||||
→ user gets interactive shell in < 3 seconds
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Core Claims
|
||||
|
||||
### Claim 1: No SSH keys, no bastion, no public IP
|
||||
|
||||
EC2 Instance Connect pushes a temporary RSA key to the instance metadata via the AWS API, valid for 60 seconds. The session uses that key — no pre-shared key on disk, no key rotation to manage, no key distribution to instances. The platform initiates the connection; users never touch an SSH key.
|
||||
|
||||
### Claim 2: Private subnet instances work out of the box
|
||||
|
||||
EICE (EC2 Instance Connect Endpoint) routes the connection through AWS's internal network — no internet egress, no public IP, no ingress security group rules. The only requirement is a VPC Endpoint for EC2 Instance Connect in the same VPC as the target instance. The SaaS VPC already has this.
|
||||
|
||||
### Claim 3: Zero per-user configuration
|
||||
|
||||
The terminal tab appears for every CP-provisioned workspace automatically. No IAM role setup by the user, no SSM configuration, no bastion. The platform's IAM credentials (the same ones used to provision the instance) are used for EICE — the user doesn't need to know anything about AWS IAM policies to get a shell.
|
||||
|
||||
---
|
||||
|
||||
## Target Audience
|
||||
|
||||
**Primary:** DevOps and platform engineers managing SaaS-provisioned workspaces on EC2. They want browser-based terminal access without SSH key overhead. They likely already have IAM roles set up for their AWS environment and will recognise EICE as the right primitive.
|
||||
|
||||
**Secondary:** Enterprise security reviewers evaluating Molecule AI's SaaS offering. The ability to connect to cloud VMs via IAM — not shared SSH keys — is a meaningful signal. It aligns with the enterprise governance narrative and per-workspace auth token story.
|
||||
|
||||
**Not the audience:** Self-hosted users (Docker workspaces already have terminal via `docker exec`). The value proposition is SaaS/Control Plane-specific.
|
||||
|
||||
---
|
||||
|
||||
## Competitive Angle
|
||||
|
||||
EC2 Instance Connect integration for browser-based terminal access is not documented for any competitor:
|
||||
|
||||
- **LangGraph**: No terminal integration. Users who want shell access to provisioned resources must SSH manually or use SSM Session Manager via the AWS CLI.
|
||||
- **CrewAI**: No cloud VM terminal story. Enterprise tier has SaaS management UI, but no browser-based shell access.
|
||||
- **AutoGen (Microsoft)**: No EC2 integration documented. Relies on user-managed infrastructure.
|
||||
- **Custom/self-rolled agent platforms**: Must implement EICE or SSM themselves. Molecule AI ships it as a product feature.
|
||||
|
||||
This is an uncontested claim for the AWS-aligned segment. It belongs in press briefings and analyst conversations as a concrete example of the SaaS control plane doing work users would otherwise have to do themselves.
|
||||
|
||||
---
|
||||
|
||||
## Messaging Tier
|
||||
|
||||
**Feature tier: Enhancement** (not a standalone product launch)
|
||||
|
||||
EC2 Instance Connect SSH is a meaningful UX improvement to the SaaS workspace experience. It belongs in:
|
||||
- Phase 30 remote workspaces narrative as "SaaS terminal access"
|
||||
- SaaS onboarding copy ("your EC2 workspace has a terminal tab — no SSH keys needed")
|
||||
- Release notes (not a press release)
|
||||
|
||||
**Do not frame as:**
|
||||
- A new standalone product
|
||||
- A replacement for local Docker terminal
|
||||
- A competitor-specific feature (lead with the benefit, not the AWS integration)
|
||||
|
||||
---
|
||||
|
||||
## Taglines
|
||||
|
||||
Primary: *"Your SaaS workspace has a terminal tab. No SSH keys required."*
|
||||
|
||||
Secondary: *"Connect to any EC2 workspace from the canvas — IAM-authorized, no bastion, no public IP."*
|
||||
|
||||
Fallback (technical): *"CP-provisioned workspaces get browser-based terminal via AWS EC2 Instance Connect Endpoint. No keypair on disk. No bastion. No configuration."*
|
||||
|
||||
---
|
||||
|
||||
## Channel Coverage
|
||||
|
||||
| Channel | Asset | Owner | Status |
|
||||
|---------|-------|-------|--------|
|
||||
| Blog post | "How to access your EC2 workspace terminal from the canvas" | Content Marketer | Blocked: needs DevRel code demo first (#1545) |
|
||||
| Social launch thread | 5 posts: problem → solution → claim 1 → claim 2 → CTA | Social Media Brand | ✅ APPROVED — copy at `docs/marketing/social/2026-04-22-ec2-instance-connect-ssh/social-copy.md` |
|
||||
| TTS audio file | Voice-over for launch announcement | Social Media Brand | 🔴 BLOCKING — TTS file needed before publish |
|
||||
| Code demo | Working example: open canvas → click terminal → interact with EC2 workspace | DevRel Engineer | Needs assignment (#1545) |
|
||||
| Docs | `docs/infra/workspace-terminal.md` | DevRel Engineer | ✅ Shipped in PR #1533 |
|
||||
|
||||
**Coverage decision:** Blog post + social thread. Not a standalone campaign. Frame as "SaaS workspace terminal" within the Phase 30 remote workspaces narrative.
|
||||
|
||||
---
|
||||
|
||||
## Positioning Alignment
|
||||
|
||||
- **Phase 30 remote workspaces**: EICE terminal completes the remote workspace UX — agents register, accept tasks, and now also have a terminal, all without leaving the canvas
|
||||
- **Per-workspace auth tokens**: The same IAM-scoped credentials that authorize A2A also authorize EICE — the platform manages the credential lifecycle, not the user
|
||||
- **Enterprise governance**: No SSH keys means no orphaned keys in AWS IAM. Connection authorization via IAM is auditable in CloudTrail. This is a governance argument as much as a UX argument.
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
- [x] Does the terminal UI expose EC2 Instance Connect as a distinct connection type? → No — seamless; the platform handles it transparently
|
||||
- [x] Is there a docs page? → Yes: `docs/infra/workspace-terminal.md` (shipped in PR #1533)
|
||||
- [x] Social Media Brand: confirm launch thread length (5 posts recommended)
|
||||
- [ ] Confirm EICE VPC Endpoint is present in the SaaS production VPC (DevOps/ops check)
|
||||
- [x] Social copy status → APPROVED (social-copy.md on staging, 2026-04-22)
|
||||
- [ ] 🔴 TTS audio file: Social Media Brand needs TTS generation before publish
|
||||
|
||||
---
|
||||
|
||||
## Sign-off
|
||||
|
||||
- [x] PMM positioning: approved
|
||||
- [ ] Marketing Lead: pending
|
||||
- [ ] DevRel: needs assignment (#1545)
|
||||
- [ ] Content Marketer: blocked on DevRel code demo
|
||||
|
||||
---
|
||||
|
||||
*PMM — routing to DevRel (#1545 code demo) → Content Marketer (#1546 blog) → Social Media Brand (#1547 launch thread). Close when all routed.*
|
||||
@ -1,117 +0,0 @@
|
||||
# Chrome DevTools MCP — Social Copy
|
||||
**Source:** PR #1306 merged to origin/main (2026-04-21)
|
||||
**Status:** MERGED — awaiting Marketing Lead approval for publishing
|
||||
|
||||
---
|
||||
|
||||
## X (140–280 chars)
|
||||
|
||||
### Version A — Governance angle
|
||||
```
|
||||
Chrome DevTools MCP gives agents full browser control. Screenshot, DOM, JS execution — all through a standard interface.
|
||||
|
||||
Raw CDP is all-or-nothing. Molecule AI adds the governance layer: which agents get access, what they can do, how to revoke it.
|
||||
|
||||
Audit trail included.
|
||||
```
|
||||
|
||||
### Version B — Production use cases
|
||||
```
|
||||
Three things you couldn't automate before Chrome DevTools MCP + Molecule AI governance:
|
||||
|
||||
1. Lighthouse CI/CD audits — agent opens Chrome, runs Lighthouse, posts score to PR
|
||||
2. Visual regression testing — screenshot diffs across agent workflow runs
|
||||
3. Authenticated session scraping — agent behind a login with managed cookies
|
||||
|
||||
All with org API key audit trail.
|
||||
```
|
||||
|
||||
### Version C — Problem framing
|
||||
```
|
||||
Chrome DevTools MCP: browser automation as a first-class MCP tool.
|
||||
|
||||
For prototypes: great. For production: you need something between no browser and full admin. That's the gap Molecule AI's MCP governance fills.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn (100–200 words)
|
||||
|
||||
Chrome DevTools MCP shipped in early 2026 — and browser automation is now a standard tool for any compatible AI agent.
|
||||
|
||||
Screenshot. DOM inspection. Network interception. JavaScript execution. No custom wrappers, no browser-driver installation.
|
||||
|
||||
That's the prototype story. For production — especially anything touching customer-facing workflows or authenticated sessions — all-or-nothing CDP access is a governance gap.
|
||||
|
||||
Molecule AI's MCP governance layer answers the production questions:
|
||||
- Which agents can open a browser?
|
||||
- What can they do with it?
|
||||
- How do you revoke access?
|
||||
- When something goes wrong, who accessed what session data?
|
||||
|
||||
Real-world use cases the layer enables: automated Lighthouse performance audits in CI/CD, screenshot-based visual regression testing, and authenticated session scraping — agents operating behind a login with cookies managed through the platform's secrets system.
|
||||
|
||||
Every action is logged. Every browser operation is attributed to an org API key and workspace ID.
|
||||
|
||||
Chrome DevTools MCP plus Molecule AI's governance layer: browser automation that meets production standards.
|
||||
|
||||
---
|
||||
|
||||
## Image suggestions
|
||||
|
||||
| Post | Image |
|
||||
|---|---|
|
||||
| X Version A | Fleet diagram: `marketing/assets/phase30-fleet-diagram.png` (reusable) |
|
||||
| X Version B | Custom: 3-item checklist graphic — "Lighthouse / Regression / Auth Scraping" |
|
||||
| X Version C | Quote card: "something between no browser and full admin" |
|
||||
| LinkedIn | Quote card or the checklist graphic |
|
||||
|
||||
---
|
||||
|
||||
## Hashtags
|
||||
|
||||
`#MCP` `#BrowserAutomation` `#AIAgents` `#MoleculeAI` `#DevOps` `#QA` `#CI/CD`
|
||||
|
||||
---
|
||||
|
||||
## Blog canonical URL
|
||||
|
||||
`docs.moleculesai.app/blog/browser-automation-ai-agents-mcp`
|
||||
|
||||
---
|
||||
|
||||
## MCP Server List Explainer
|
||||
**File:** `docs/marketing/campaigns/mcp-server-list/social-copy.md` (staging, commit `0d3ad96`)
|
||||
**Status:** COPY READY — awaiting visual assets + X credentials
|
||||
**Canonical URL:** `docs.molecule.ai/blog/mcp-server-list`
|
||||
**Owner:** Social Media Brand | **Day:** Ready once visual assets done
|
||||
|
||||
5-post X thread + LinkedIn post. Full copy on staging.
|
||||
|
||||
---
|
||||
|
||||
## Discord Adapter Day 2
|
||||
**File:** `discord-adapter-social-copy.md` (local)
|
||||
**Status:** COPY READY — awaiting visual assets + X credentials
|
||||
**Canonical URL:** `docs.molecule.ai/blog/discord-adapter` (live, PR #1301 merged)
|
||||
**Owner:** Social Media Brand | **Day:** Ready once visual assets done
|
||||
|
||||
See `discord-adapter-social-copy.md` for full copy (4 X variants + LinkedIn draft).
|
||||
|
||||
---
|
||||
|
||||
## Fly.io Deploy Anywhere (T+3 catch-up)
|
||||
**Source:** Blog live 2026-04-17 | Social delayed 5 days
|
||||
**File:** `fly-deploy-anywhere-social-copy.md` (local)
|
||||
**Status:** COPY READY — PMM executing Option A (retrospective catch-up). Awaiting X credentials.
|
||||
**Canonical URL:** `moleculesai.app/blog/deploy-anywhere`
|
||||
**Owner:** Social Media Brand | **Day:** Queue immediately after Chrome DevTools MCP Day 1 posts
|
||||
**Decision:** PMM chose Option A per decision brief. Frame: "we shipped this last week."
|
||||
|
||||
Retrospective framing: "Week in review: we shipped Fly.io Deploy Anywhere last week. Here's what it means for your agent infrastructure."
|
||||
|
||||
Social Media Brand: hold Fly.io post until Chrome DevTools MCP Day 1 posts land, then queue Fly.io in the same session.
|
||||
|
||||
---
|
||||
|
||||
## EC2 Instance Connect SSH (PR #1533)
|
||||
@ -1,148 +0,0 @@
|
||||
# EC2 Instance Connect SSH — Social Copy
|
||||
Campaign: ec2-instance-connect-ssh | PR: molecule-core#1533
|
||||
Publish day: 2026-04-22 (today)
|
||||
Assets: `marketing/devrel/campaigns/ec2-instance-connect-ssh/assets/`
|
||||
Status: Draft — pending Marketing Lead approval + credential availability
|
||||
|
||||
---
|
||||
|
||||
## X (Twitter) — Primary thread (5 posts)
|
||||
|
||||
### Post 1 — Hook
|
||||
|
||||
> Your AI agent has a workspace on an EC2 instance.
|
||||
>
|
||||
> How do you get a shell inside it right now?
|
||||
>
|
||||
> Old answer: copy the IP, find the key, `ssh -i key.pem ec2-user@X.X.X.X`, hope your
|
||||
> security group is right.
|
||||
>
|
||||
> New answer: click Terminal in Canvas.
|
||||
>
|
||||
> Molecule AI now speaks AWS EC2 Instance Connect.
|
||||
|
||||
---
|
||||
|
||||
### Post 2 — The problem it solves
|
||||
|
||||
> SSH into a cloud agent workspace sounds simple.
|
||||
>
|
||||
> It's not.
|
||||
>
|
||||
> → Instance IP changes on restart
|
||||
> → Key management across your whole agent fleet
|
||||
> → Security group rules you have to get right every time
|
||||
> → No audit trail on who SSH'd in and when
|
||||
>
|
||||
> EC2 Instance Connect handles all of it. Molecule AI wires it up so
|
||||
> your agent workspace is one Terminal tab away.
|
||||
|
||||
---
|
||||
|
||||
### Post 3 — How it works
|
||||
|
||||
> Molecule AI + EC2 Instance Connect:
|
||||
>
|
||||
> → Workspace provisioned in your VPC, instance_id stored
|
||||
> → Click Terminal tab in Canvas → WebSocket opens
|
||||
> → Platform calls `aws ec2-instance-connect ssh` under the hood
|
||||
> → EIC Endpoint opens a tunnel, STS pushes a temporary key
|
||||
> → PTY bridges directly to the Canvas terminal
|
||||
>
|
||||
> No keys to manage. No IP to find. No security group dance.
|
||||
> One click.
|
||||
|
||||
---
|
||||
|
||||
### Post 4 — Security angle
|
||||
|
||||
> Every SSH access to a cloud agent workspace should be attributable.
|
||||
>
|
||||
> With EC2 Instance Connect:
|
||||
>
|
||||
> → IAM policy gates access (condition: `Role=workspace` tag)
|
||||
> → STS temporary key, auto-expires
|
||||
> → EIC audit log shows which principal requested the tunnel
|
||||
> → No long-lived SSH keys anywhere
|
||||
>
|
||||
> Your security team will appreciate this.
|
||||
|
||||
---
|
||||
|
||||
### Post 5 — CTA
|
||||
|
||||
> EC2 Instance Connect SSH is live in Molecule AI (PR #1533).
|
||||
>
|
||||
> Provision a CP-managed workspace → open the Terminal tab → you're in.
|
||||
>
|
||||
> If you're still `ssh -i key.pem` into your agent fleet — there's a better way.
|
||||
>
|
||||
> [CTA: docs.molecule.ai/infra/workspace-terminal — pending docs publish]
|
||||
> #AgenticAI #MoleculeAI #AWS #DevOps #PlatformEngineering
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn — Single post
|
||||
|
||||
**Title:** We gave AI agents their own terminal tab — powered by AWS EC2 Instance Connect
|
||||
|
||||
**Body:**
|
||||
|
||||
Getting a shell inside a cloud-hosted AI agent used to mean: find the instance IP, locate the SSH key, configure the security group, run `ssh`, hope nothing broke.
|
||||
|
||||
That's now one click inside Molecule AI.
|
||||
|
||||
We shipped EC2 Instance Connect SSH integration (PR #1533). Here's what changed:
|
||||
|
||||
**The old flow:**
|
||||
Copy the EC2 IP → find the SSH key → configure the security group to allow port 22 → `ssh -i key.pem ec2-user@X.X.X.X` → verify you're connected
|
||||
|
||||
**The new flow:**
|
||||
Provision a workspace in Canvas → click Terminal → you have a bash prompt
|
||||
|
||||
What makes this possible is AWS EC2 Instance Connect. The platform stores the `instance_id` from provisioning, calls `aws ec2-instance-connect ssh --connection-type eice` on your behalf, and the EIC Endpoint opens a tunnel with an STS-pushed temporary key. The PTY bridges straight into the Canvas Terminal tab.
|
||||
|
||||
Why this matters beyond convenience:
|
||||
|
||||
→ No long-lived SSH keys to manage or rotate
|
||||
→ IAM policy controls access (condition on `aws:ResourceTag/Role=workspace`)
|
||||
→ EIC audit log gives you provenance on every tunnel open event
|
||||
→ Temporary keys auto-expire
|
||||
|
||||
Your agent workspaces are now as easy to access as your browser tab — with better audit trails than a manually managed SSH key rotation process.
|
||||
|
||||
EC2 Instance Connect SSH is live now for all CP-provisioned workspaces.
|
||||
|
||||
---
|
||||
|
||||
## Visual Asset Specifications
|
||||
|
||||
1. **Terminal demo GIF** — Canvas Terminal tab showing bash prompt inside an EC2 workspace:
|
||||
- Canvas UI with a workspace node selected
|
||||
- Terminal tab open, showing `ec2-user@ip-10-0-x-x:~$` prompt
|
||||
- Optional: running `whoami` or `hostname` to show EC2 context
|
||||
- Format: GIF or looping MP4, max 10s
|
||||
- Dark theme, molecule navy background
|
||||
|
||||
2. **Architecture diagram** (optional for LI):
|
||||
- Canvas (browser) → WebSocket → Platform (Go) → `aws ec2-instance-connect ssh` → EIC Endpoint → EC2 Instance
|
||||
- Shows the tunnel path for audience who wants to understand the mechanism
|
||||
|
||||
---
|
||||
|
||||
## Campaign notes
|
||||
|
||||
**Audience:** DevOps, platform engineers, ML infrastructure teams running agents in AWS
|
||||
**Tone:** Practical — the IAM/audit story is the differentiator for security-conscious buyers; the "one click" story is the differentiator for developer audience
|
||||
**Differentiation:** No manual SSH key management vs. traditional bastion host approach
|
||||
**Hashtags:** #AgenticAI #MoleculeAI #AWS #EC2InstanceConnect #PlatformEngineering #DevOps
|
||||
**CTA links:** docs pending (workspace-terminal.md docs need to be published)
|
||||
|
||||
---
|
||||
|
||||
## Self-review applied
|
||||
|
||||
- No timeline claims ("today", "just shipped", etc.) beyond what's confirmed in PR state
|
||||
- No person names
|
||||
- No benchmarks or performance claims
|
||||
- CTA links marked as pending until docs confirm live
|
||||
@ -1,83 +0,0 @@
|
||||
# EC2 Console Output — Social Copy
|
||||
Campaign: EC2 Console Output | Source: PR #1178
|
||||
Publish day: 2026-04-24 (Day 4)
|
||||
Status: ✅ APPROVED — Marketing Lead 2026-04-22 (PM confirmed)
|
||||
Assets: `ec2-console-output-canvas.png` (1200×800, dark mode)
|
||||
|
||||
---
|
||||
|
||||
## X (Twitter) — Primary thread (4 posts)
|
||||
|
||||
### Post 1 — Hook
|
||||
Your workspace failed.
|
||||
You already know that.
|
||||
What you don't know is *why* — and right now that means switching to the AWS Console, finding the instance, pulling the console output, and switching back.
|
||||
|
||||
That's about to get better.
|
||||
|
||||
---
|
||||
|
||||
### Post 2 — The old workflow
|
||||
Before this fix:
|
||||
Click failed workspace → tab switch → AWS Console → log in → find instance → Actions → Get system log.
|
||||
|
||||
You're in the right place. You have the output. But you're also outside Canvas — you've lost the context of what the agent was doing, which workspace it was, and what the last_sample_error said.
|
||||
|
||||
Still doable. Still a minute of your time. Still a context switch.
|
||||
|
||||
---
|
||||
|
||||
### Post 3 — The new workflow
|
||||
After PR #1178:
|
||||
Click failed workspace → EC2 Console tab → full instance boot log, colorized by level, directly in Canvas.
|
||||
|
||||
Same output as AWS Console. Same detail. No tab switch. No context loss.
|
||||
|
||||
Thirty seconds to root cause, if that.
|
||||
|
||||
---
|
||||
|
||||
### Post 4 — CTA
|
||||
EC2 Console Output is now in Canvas — no AWS Console required.
|
||||
|
||||
Works for any workspace: local Docker, remote EC2, on-prem VM.
|
||||
If Molecule AI manages the instance, the console log is one click away.
|
||||
|
||||
→ [See how it works](https://docs.molecule.ai/docs/guides/remote-workspaces)
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn — Single post
|
||||
|
||||
**Title:** The fastest way to debug a failed AI agent workspace
|
||||
|
||||
When an AI agent workspace fails in production, the debugging question is always the same: what happened on the instance?
|
||||
|
||||
Before this week, the answer required leaving the canvas. Log into AWS. Find the instance. Pull the system log. Cross-reference with the workspace ID. Piece together what the agent was doing.
|
||||
|
||||
That workflow just changed.
|
||||
|
||||
Molecule AI now surfaces EC2 Console Output directly in the Canvas workspace detail panel. Full instance boot log, colorized by log level — INFO, WARN, ERROR — without leaving your workflow.
|
||||
|
||||
The practical difference: root cause in thirty seconds instead of three minutes. No tab switch. No losing the workspace context you were already looking at.
|
||||
|
||||
Works for any workspace Molecule AI manages: local Docker, remote EC2, on-prem VM. The console output is there when you need it.
|
||||
|
||||
EC2 Console Output ships with Phase 30.
|
||||
|
||||
→ [Read the docs](https://docs.molecule.ai/docs/guides/remote-workspaces)
|
||||
→ [Molecule AI on GitHub](https://github.com/Molecule-AI/molecule-core)
|
||||
|
||||
#AIagents #DevOps #AWs #CloudComputing #MoleculeAI
|
||||
|
||||
---
|
||||
|
||||
## Campaign notes
|
||||
|
||||
**Audience:** Platform engineers, DevOps, MLOps (X + LinkedIn)
|
||||
**Tone:** Operational. Concrete. Shows the workflow, not the feature announcement.
|
||||
**Differentiation:** EC2 Console Output in Canvas is a canvas/workspace UX differentiator — directly in the operator's workflow, not in a separate AWS tab.
|
||||
**CTA:** /docs/guides/remote-workspaces — ties back to Phase 30 Remote Workspaces
|
||||
**Coordinate with:** Day 4 of Phase 30 social campaign. Post after Discord Adapter (Day 2) and Org API Keys (Day 3).
|
||||
|
||||
*Draft by Marketing Lead 2026-04-21 — based on PR #1178 + EC2 Console demo storyboard*
|
||||
@ -1,156 +0,0 @@
|
||||
# Org-Scoped API Keys — Social Copy
|
||||
Campaign: org-scoped-api-keys | Source: PR #1105
|
||||
Publish day: 2026-04-25 (Day 5)
|
||||
Status: ✅ Approved by Marketing Lead — 2026-04-21
|
||||
|
||||
---
|
||||
|
||||
## Feature summary (source: PR #1105)
|
||||
- Org-scoped API keys: named, revocable, audited credentials replacing the shared ADMIN_TOKEN
|
||||
- Mint from Canvas UI or `POST /org/tokens`
|
||||
- sha256 hash stored server-side, plaintext shown once on creation
|
||||
- Prefix visible in every audit log line
|
||||
- Immediate revocation — next request, key is dead
|
||||
- Works across all workspaces AND workspace sub-routes
|
||||
- Scoped roles (read-only, workspace-write) on the roadmap
|
||||
|
||||
**Angle:** "Your AI agent now has its own org-admin identity — named, revokable, audited. No more shared ADMIN_TOKEN."
|
||||
|
||||
---
|
||||
|
||||
## X (Twitter) — Primary thread (5 posts)
|
||||
|
||||
### Post 1 — Hook
|
||||
You have 20 agents running in production.
|
||||
|
||||
One of them is making calls you can't trace.
|
||||
|
||||
That's not a hypothetical. That's what happens when you scale past
|
||||
"one ADMIN_TOKEN works fine" — and it usually happens the week before
|
||||
a compliance review.
|
||||
|
||||
Molecule AI org-scoped API keys: named, revocable, audit-attributable
|
||||
credentials for every integration.
|
||||
|
||||
→ [blog post link]
|
||||
|
||||
---
|
||||
|
||||
### Post 2 — Problem framing
|
||||
ADMIN_TOKEN works great — until it doesn't.
|
||||
|
||||
→ Can't rotate without downtime (10 agents use it simultaneously)
|
||||
→ Can't attribute which integration made a call (no prefix in logs)
|
||||
→ Can't revoke just one (one compromised token compromises everything)
|
||||
|
||||
Org-scoped API keys fix all three.
|
||||
|
||||
→ [blog post link]
|
||||
|
||||
---
|
||||
|
||||
### Post 3 — How it works (the product)
|
||||
Molecule AI org API keys:
|
||||
|
||||
→ Mint via Canvas UI or POST /org/tokens
|
||||
→ sha256 hash stored server-side, plaintext shown once
|
||||
→ Prefix visible in every audit log line
|
||||
→ Immediate revocation — next request, key is dead
|
||||
→ Works across all workspaces AND workspace sub-routes
|
||||
|
||||
Rotate without downtime. Attribute every call. Revoke instantly.
|
||||
|
||||
→ [blog post link]
|
||||
|
||||
---
|
||||
|
||||
### Post 4 — Compliance angle
|
||||
"We need to know which integration called that API endpoint."
|
||||
|
||||
Org-scoped API keys: every call tagged with the key's display prefix
|
||||
in the audit log. Full provenance in `created_by` — which admin minted
|
||||
the key, when, what it's been calling.
|
||||
|
||||
That's the answer your compliance team needs.
|
||||
|
||||
→ [blog post link]
|
||||
|
||||
---
|
||||
|
||||
### Post 5 — CTA
|
||||
Org-scoped API keys are live on all Molecule AI deployments.
|
||||
|
||||
If you're running multi-agent infrastructure and still using a single
|
||||
ADMIN_TOKEN — fix that.
|
||||
|
||||
→ [org API keys docs link]
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn — Single post
|
||||
|
||||
**Title:** One ADMIN_TOKEN across your whole agent fleet is a compliance risk, not a convenience
|
||||
|
||||
**Body:**
|
||||
|
||||
At two agents, one ADMIN_TOKEN feels fine.
|
||||
|
||||
At twenty agents, it's a single point of failure that you can't rotate,
|
||||
can't audit, and can't compartmentalize.
|
||||
|
||||
Molecule AI's org-scoped API keys change the model:
|
||||
|
||||
→ One credential per integration — "ci-deploy-bot", "devops-rev-proxy",
|
||||
not "the ADMIN_TOKEN"
|
||||
|
||||
→ Every API call tagged with the key's prefix in your audit logs
|
||||
|
||||
→ Instant revocation — one key compromised, one key revoked,
|
||||
zero downtime for other integrations
|
||||
|
||||
→ `created_by` provenance on every key — which admin created it,
|
||||
when, and what it can reach
|
||||
|
||||
The keys work across every workspace in your org — including workspace
|
||||
sub-routes, not just admin endpoints.
|
||||
|
||||
This is the credential model that makes multi-agent infrastructure
|
||||
defensible at scale.
|
||||
|
||||
Org-scoped API keys are available now on all Molecule AI deployments.
|
||||
|
||||
→ [org API keys docs link]
|
||||
|
||||
UTM: `?utm_source=linkedin&utm_medium=social&utm_campaign=org-scoped-api-keys`
|
||||
|
||||
---
|
||||
|
||||
## Visual Asset Requirements
|
||||
|
||||
1. **Canvas UI screenshot** — Org API Keys tab showing key list
|
||||
(name, prefix, created date, last used)
|
||||
2. **Before/after credential model** — "ADMIN_TOKEN (single, shared,
|
||||
un-auditable)" vs "Org-scoped API keys (one per integration,
|
||||
named, revocable, attributed)"
|
||||
3. **Audit log terminal output** — key prefix, workspace ID, timestamp
|
||||
in every line
|
||||
|
||||
---
|
||||
|
||||
## Campaign Notes
|
||||
|
||||
- **Publish day:** 2026-04-25 (Day 5)
|
||||
- **Hashtags:** #AgenticAI #MoleculeAI #DevOps #PlatformEngineering
|
||||
- **X platform tone:** Lead with attribution — "which agent made that call?"
|
||||
resonates with developer/DevOps audience
|
||||
- **LinkedIn platform tone:** Lead with compliance/risk — "one ADMIN_TOKEN
|
||||
is a single point of failure" resonates with enterprise audience
|
||||
- **Key naming examples:** `ci-deploy-bot`, `devops-rev-proxy` — concrete,
|
||||
relatable for target audience
|
||||
- **Self-review applied:** no timeline claims, no person names, no benchmarks
|
||||
- **CTA links:** org API keys docs page — pending live URL
|
||||
|
||||
---
|
||||
|
||||
*Source: Molecule-AI/internal `marketing/devrel/social/gh-issue-pr1105-org-api-keys-launch.md`*
|
||||
*Status: ✅ Approved by Marketing Lead 2026-04-21 — ready for Social Media Brand to publish once credentials are provisioned — Marketing Lead approval required before publish*
|
||||
@ -1,145 +0,0 @@
|
||||
# Discord Adapter — Social Copy
|
||||
**Feature:** Discord channel adapter (inbound via Interactions webhook, outbound via Incoming Webhooks)
|
||||
**Campaign:** Discord Adapter | **Docs:** `docs/agent-runtime/social-channels.md` (Discord Setup section)
|
||||
**Canonical URL:** `github.com/Molecule-AI/molecule-core/blob/main/docs/agent-runtime/social-channels.md` (moleculesai.app TBD — outage confirmed)
|
||||
**Status:** APPROVED (PMM proxy — Marketing Lead offline) | Reddit/HN copy ADDED by PMM
|
||||
**Owner:** PMM → Social Media Brand | **Day:** Ready to post once X credentials are restored
|
||||
|
||||
---
|
||||
|
||||
## X (140–280 chars)
|
||||
|
||||
### Version A — Slash commands for agents
|
||||
```
|
||||
Your Discord community just got an agent layer.
|
||||
|
||||
Connect a Molecule AI workspace to any Discord channel. Members query your agents via slash commands — no bot token setup for outbound.
|
||||
|
||||
Governance included. Audit trail included.
|
||||
```
|
||||
|
||||
### Version B — Multi-channel agent access
|
||||
```
|
||||
Your AI agents can already handle Telegram, email, and Slack.
|
||||
Now add Discord — without changing how agents work.
|
||||
|
||||
Slash commands → agent workspace → response to any channel.
|
||||
One protocol. Any channel. Molecule AI's channel adapter.
|
||||
```
|
||||
|
||||
### Version C — Developer angle
|
||||
```
|
||||
Setting up an AI agent in Discord used to mean: create app, configure intents, handle events.
|
||||
|
||||
Molecule AI's Discord adapter: paste a webhook URL. Done.
|
||||
|
||||
Inbound via Interactions. Outbound via Incoming Webhook. Zero bot token management.
|
||||
```
|
||||
|
||||
### Version D — Platform angle
|
||||
```
|
||||
Discord communities can now talk to your agent fleet.
|
||||
|
||||
Molecule AI's channel adapter: one workspace, any social platform. Telegram, Slack, Discord — all the same agent underneath.
|
||||
|
||||
Your agents. Your channels. One canvas.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn (100–200 words)
|
||||
|
||||
```
|
||||
Connecting your AI agent fleet to Discord just got simpler — and more powerful.
|
||||
|
||||
Molecule AI's Discord adapter ships today. Here's what that means in practice:
|
||||
|
||||
Outbound messages: paste an Incoming Webhook URL. That's it. No Discord bot app, no OAuth token, no intent configuration — just a webhook URL and your agent is live in any channel.
|
||||
|
||||
Inbound: slash commands and message components arrive as signed Interactions payloads. The adapter parses them, forwards them to the workspace agent, and routes the response back to Discord.
|
||||
|
||||
Your Discord community gets access to the same agent capabilities as your Telegram users, your Slack channels, and your Canvas — without duplicating the agent logic or managing separate bot tokens.
|
||||
|
||||
One protocol. Any channel. Molecule AI's channel adapter layer makes social platforms first-class citizen channels for your agent fleet.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Image suggestions
|
||||
|
||||
| Post | Image | Source |
|
||||
|---|---|---|
|
||||
| X Version A | Slash command dropdown screenshot — `/agent` in Discord | Custom: Discord UI screenshot |
|
||||
| X Version B | Multi-channel diagram: Telegram + Slack + Discord → same workspace agent | Custom: platform diagram |
|
||||
| X Version C | Before/after: complex bot setup vs "paste webhook URL" | Custom: simple comparison card |
|
||||
| X Version D | Canvas Channels tab with Discord connected | Custom: Canvas screenshot |
|
||||
| LinkedIn | Multi-platform diagram | Custom |
|
||||
|
||||
---
|
||||
|
||||
## Hashtags
|
||||
|
||||
`#MoleculeAI` `#Discord` `#AIAgents` `#MCP` `#SocialChannels` `#MultiChannel` `#AgentPlatform` `#DevOps`
|
||||
|
||||
---
|
||||
|
||||
## CTA
|
||||
|
||||
`moleculesai.app/docs/agent-runtime/social-channels`
|
||||
|
||||
---
|
||||
|
||||
## Campaign timing
|
||||
|
||||
Ready to post once:
|
||||
1. X consumer credentials (`X_API_KEY` + `X_API_SECRET`) are restored to Social Media Brand workspace — blocking all posts
|
||||
2. Discord Adapter Day 2 copy is approved by Marketing Lead (coordinate with Social Media Brand)
|
||||
|
||||
---
|
||||
|
||||
*PMM drafted 2026-04-22 — no prior social copy file found for Discord adapter*
|
||||
*Positioning note: Discord adapter is outbound-primary (no separate bot token for outbound); inbound via Interactions webhook — leverage this simplicity in copy*
|
||||
|
||||
---
|
||||
|
||||
## Reddit Post (r/LocalLLaMA or r/MachineLearning)
|
||||
```
|
||||
Molecule AI just shipped a Discord adapter for AI agent fleets.
|
||||
|
||||
The setup: paste a webhook URL. That's it — no Discord bot app, no OAuth token, no intent configuration.
|
||||
|
||||
Inbound: slash commands and message components arrive as signed Interactions payloads. The adapter parses them, forwards to your workspace agent, routes the response back to Discord.
|
||||
|
||||
Outbound: same incoming webhook, no separate bot token needed.
|
||||
|
||||
One workspace. Any channel. Your Telegram, Slack, and Discord users all hit the same agent underneath — no duplicated logic, no separate bot tokens per platform.
|
||||
|
||||
GitHub: github.com/Molecule-AI/molecule-core
|
||||
Docs: github.com/Molecule-AI/molecule-core/blob/main/docs/agent-runtime/social-channels.md
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Hacker News — Show HN
|
||||
```
|
||||
Show HN: Molecule AI Discord adapter — webhook URL setup, zero bot token management
|
||||
|
||||
Molecule AI shipped a Discord channel adapter for AI agent fleets.
|
||||
|
||||
The problem it solves: connecting Discord to an AI agent fleet usually means creating a Discord app, configuring intents, handling events, managing token rotation. The agent logic isn't the hard part — the integration is.
|
||||
|
||||
What we built: a Discord adapter that uses Discord's Interactions webhooks for inbound and Incoming Webhooks for outbound. No Discord bot app required. No OAuth token. No intent configuration.
|
||||
|
||||
Setup: paste an Incoming Webhook URL. Done.
|
||||
|
||||
Inbound: slash commands and message components arrive as signed Interactions payloads. The adapter parses them, forwards to your workspace agent, routes the response back to the channel.
|
||||
|
||||
Outbound: same incoming webhook. No separate bot token for outbound messages.
|
||||
|
||||
What this means in practice: your Discord community gets access to the same agent capabilities as your Telegram users, your Slack channels, and your Canvas — without duplicating the agent logic or managing separate bot tokens per platform.
|
||||
|
||||
Under 100 lines to add Discord to an existing Molecule AI workspace. Full source in the linked repo.
|
||||
|
||||
GitHub: github.com/Molecule-AI/molecule-core
|
||||
Docs: github.com/Molecule-AI/molecule-core/blob/main/docs/agent-runtime/social-channels.md
|
||||
```
|
||||
@ -1,132 +0,0 @@
|
||||
# EC2 Instance Connect SSH — Social Copy
|
||||
**Feature:** PR #1533 — `feat(terminal): remote path via aws ec2-instance-connect + pty`
|
||||
**Campaign:** EC2 Instance Connect SSH | **Blog:** `docs/infra/workspace-terminal.md` (shipped in PR #1533)
|
||||
**Canonical URL:** `moleculesai.app/docs/infra/workspace-terminal`
|
||||
**Status:** APPROVED — unblocked for Social Media Brand
|
||||
**Owner:** PMM → Social Media Brand | **Day:** Blocked on DevRel code demo (#1545) + Content Marketer blog (#1546)
|
||||
**Positioning approved by:** PMM (GH issue #1637)
|
||||
|
||||
---
|
||||
|
||||
## Headline Angle: "No SSH keys, no bastion, no public IP"
|
||||
**Primary security differentiator:** Ephemeral keys (60-second RSA key lifespan via AWS API — no persistent key on disk, no rotation, no orphaned credential risk)
|
||||
|
||||
Secondary angle: Zero key rot — the 60-second key window means there's nothing to rotate, nothing to revoke, nothing exposed on developer machines.
|
||||
|
||||
---
|
||||
|
||||
## X / Twitter (140–280 chars)
|
||||
|
||||
### Version A — Infrastructure angle ✅ (ops simplicity, approved primary)
|
||||
```
|
||||
Your SaaS-provisioned EC2 workspace has a terminal tab. No SSH keys needed.
|
||||
|
||||
Molecule AI connects via EC2 Instance Connect Endpoint — IAM-authorized, no bastion, no public IP required.
|
||||
|
||||
One click. You're in.
|
||||
```
|
||||
|
||||
### Version B — Zero credential overhead (ops simplicity)
|
||||
```
|
||||
Connecting to a cloud VM used to mean: SSH key, bastion host, public IP, and a security review.
|
||||
|
||||
EC2 Instance Connect changes that. Your IAM role is the auth layer. No keys on disk. No rotation. No gap.
|
||||
|
||||
The terminal just works.
|
||||
```
|
||||
|
||||
### Version C — Developer angle (DX)
|
||||
```
|
||||
Your agent's EC2 workspace just got a terminal tab.
|
||||
|
||||
No pre-configured SSH keys. No bastion. No public IP needed.
|
||||
|
||||
Molecule AI handles EC2 Instance Connect for you — IAM-authorized, PTY over WebSocket, in the canvas.
|
||||
|
||||
That's the SaaS difference.
|
||||
```
|
||||
|
||||
### Version D — Security / Enterprise (zero key rot) ✅
|
||||
```
|
||||
SSH key left on a laptop. Former employee. Rotation takes a week.
|
||||
|
||||
EC2 Instance Connect: every connection uses an ephemeral key pushed to instance metadata — valid 60 seconds, never touches a developer machine.
|
||||
|
||||
No orphaned keys. No rotation SLAs. IAM is the auth layer.
|
||||
|
||||
Security teams notice this architecture.
|
||||
```
|
||||
|
||||
### Version E — Ephemeral key story (new — security lead)
|
||||
```
|
||||
Traditional SSH: key lives on disk, gets shared, gets forgotten, becomes a liability.
|
||||
|
||||
EC2 Instance Connect SSH in Molecule AI: a temporary RSA key appears in instance metadata for 60 seconds, then disappears.
|
||||
|
||||
No key on disk. No key rotation. No blast radius when someone leaves.
|
||||
|
||||
The terminal just works. The key doesn't outlast the session.
|
||||
```
|
||||
|
||||
### Version F — Problem → solution (ops lead)
|
||||
```
|
||||
Problem: SaaS-provisioned EC2 workspaces don't have a terminal tab without SSH keys, a bastion, and a public IP.
|
||||
|
||||
Solution: EC2 Instance Connect Endpoint. IAM-authorized. Platform-initiated. No user-side key management.
|
||||
|
||||
Your canvas workspace just got a shell.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn (100–200 words)
|
||||
|
||||
```
|
||||
Getting a terminal into a cloud VM shouldn't require a security review, a bastion host, and an SSH keypair.
|
||||
|
||||
For SaaS-provisioned workspaces — the ones running on Fly Machines or EC2 — that was the reality until this week. Connecting to a remote VM meant: pre-configured keys, a jump box, and either a public IP or an SSM agent installed per instance.
|
||||
|
||||
EC2 Instance Connect Endpoint changes this. The platform's IAM credentials authorize the connection. A temporary RSA key appears in the instance metadata (valid for 60 seconds), and the session is proxied over WebSocket to the canvas terminal tab. No keys on disk. No bastion. No configuration required.
|
||||
|
||||
The terminal tab appears automatically for every CP-provisioned workspace. The connection is IAM-authorized, so every session is attributable in CloudTrail. Revocation is immediate — stop the IAM role, the connection stops.
|
||||
|
||||
This is what SaaS terminal access looks like when it's designed for agents, not humans with SSH config files.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Image suggestions
|
||||
|
||||
| Post | Image | Source |
|
||||
|---|---|---|
|
||||
| X Version A | Canvas screenshot: terminal tab open on a REMOTE badge workspace | Custom: needs DevRel code demo screenshot |
|
||||
| X Version D | Timeline graphic: "Key pushed to metadata → 60s window → key invalidated" | Custom: AWS/EC2 flow diagram |
|
||||
| X Version E | Before/after: key-on-disk vs ephemeral key lifecycle | Custom graphic |
|
||||
| X Version F | Problem/solution card: "Before: bastion + keys + public IP" vs "After: one click, canvas terminal" | Custom graphic |
|
||||
| LinkedIn | Canvas terminal screenshot with REMOTE badge | Custom |
|
||||
|
||||
---
|
||||
|
||||
## Hashtags
|
||||
|
||||
`#MoleculeAI` `#AWS` `#EC2` `#AIInfrastructure` `#AgentPlatform` `#DevOps` `#Security` `#A2A` `#RemoteWorkspaces`
|
||||
|
||||
**Note:** `#AgenticAI` removed — does not appear in Phase 30 positioning brief; keep messaging consistent.
|
||||
|
||||
---
|
||||
|
||||
## CTA
|
||||
|
||||
`moleculesai.app/docs/infra/workspace-terminal`
|
||||
|
||||
---
|
||||
|
||||
## Campaign timing
|
||||
|
||||
Dependent on: DevRel code demo (#1545) → Content Marketer blog (#1546) → Social Media Brand launch thread.
|
||||
Recommended: Coordinate with DevRel screencast; social posts should reference the demo for credibility.
|
||||
|
||||
---
|
||||
|
||||
*PMM drafted 2026-04-22 — updated 2026-04-22 (GH issue #1637 positioning decision: lead with ops simplicity, highlight ephemeral key property in security-focused posts)*
|
||||
*Positioning brief: `docs/marketing/launches/pr-1533-ec2-instance-connect-ssh.md`*
|
||||
@ -1,91 +0,0 @@
|
||||
# Fly.io Deploy Anywhere — Social Copy
|
||||
**Campaign:** Fly.io Deploy Anywhere | **Blog:** `docs/blog/2026-04-17-deploy-anywhere/index.md`
|
||||
**Canonical URL:** `moleculesai.app/blog/deploy-anywhere`
|
||||
**Status:** DRAFT — PMM wrote this copy; no file existed anywhere before this entry
|
||||
**Owner:** PMM → Social Media Brand | **Day:** T+3 (campaign delayed from April 17)
|
||||
|
||||
---
|
||||
|
||||
## X (140–280 chars)
|
||||
|
||||
### Version A — Infrastructure freedom
|
||||
```
|
||||
Your cloud. Your choice.
|
||||
|
||||
Molecule AI workspaces now run on Docker, Fly.io, or your control plane — with one config change. No agent code changes. No migration tax.
|
||||
|
||||
Your agents. Your infra.
|
||||
```
|
||||
|
||||
### Version B — Developer pain
|
||||
```
|
||||
Setting up AI agent infrastructure on Fly.io took a week. With Molecule AI it takes one environment variable.
|
||||
|
||||
Three variables. Done. That's it.
|
||||
```
|
||||
|
||||
### Version C — Multi-cloud reality
|
||||
```
|
||||
Most agent platforms assume you run Docker. Molecule AI doesn't.
|
||||
|
||||
Docker, Fly.io, or control plane — the backend is a runtime choice, not an architectural commitment. Your agent code stays the same.
|
||||
```
|
||||
|
||||
### Version D — Indie dev angle
|
||||
```
|
||||
Fly.io's economics for AI agents — scale to zero when nobody's working, pay per use.
|
||||
|
||||
Molecule AI workspaces run on Fly Machines. Zero config. One env var. Production-ready from day one.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn (100–200 words)
|
||||
|
||||
```
|
||||
Your infrastructure choice just got decoupled from your agent platform choice.
|
||||
|
||||
Molecule AI ships three production-ready workspace backends — Docker, Fly.io, and a control plane — and switching between them takes a single environment variable. Your agent code, model choices, and workspace topology stay exactly the same.
|
||||
|
||||
Until this week, if you wanted Fly.io's economics — pay-per-use compute, fast cold starts, scale to zero when nobody's working — you had to migrate your agent platform. That trade-off is gone.
|
||||
|
||||
Today: set three environment variables on your Molecule AI tenant instance, and your workspaces provision as Fly Machines. No separate Docker host. No idle infrastructure. Your agents run on Fly.io with Molecule AI's canvas, A2A protocol, and auth model — same platform, different backend.
|
||||
|
||||
Set it and forget it — until you want to switch back.
|
||||
|
||||
Molecule AI workspace backends: Docker, Fly.io, Control Plane. One config change.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Image suggestions
|
||||
|
||||
| Post | Image |
|
||||
|---|---|
|
||||
| X Version A | Comparison card: Docker vs Fly.io vs Control Plane — three boxes, same logo |
|
||||
| X Version B | Terminal: 3 env vars → workspace online on Fly.io |
|
||||
| X Version C | Diagram: "Backend = runtime choice" — agent code central, 3 arrows to Docker/Fly.io/Control Plane |
|
||||
| LinkedIn | Fleet diagram (reusable from Phase 30 — same visual, different caption) |
|
||||
|
||||
---
|
||||
|
||||
## Hashtags
|
||||
|
||||
`#MoleculeAI` `#FlyIO` `#AIInfrastructure` `#AgentPlatform` `#DevOps` `#AIAgents` `#A2A` `#RemoteWorkspaces`
|
||||
|
||||
**Note:** `#AgenticAI` removed per Phase 30 positioning brief. `#AIAgents` and `#A2A` added for cross-campaign consistency.
|
||||
|
||||
---
|
||||
|
||||
## Campaign timing note
|
||||
|
||||
Blog went live April 17. As of April 22 this campaign is 5 days stale. Recommend one of:
|
||||
- Fold into Phase 30 social push as a variant (low effort, reuse fleet diagram)
|
||||
- Hold for a Fly Machines pricing/GA moment
|
||||
- Drop from active queue
|
||||
|
||||
Confirm with Marketing Lead.
|
||||
|
||||
---
|
||||
|
||||
*PMM drafted 2026-04-21 — no prior social copy file found anywhere in workspace*
|
||||
@ -1,91 +0,0 @@
|
||||
# Phase 30 — Short-Form Social Copy
|
||||
**Source:** PR #1306 merged to origin/main (2026-04-21)
|
||||
**Status:** MERGED — awaiting Marketing Lead approval for publishing
|
||||
|
||||
---
|
||||
|
||||
## X (140–280 chars)
|
||||
|
||||
### Version A — Technical
|
||||
```
|
||||
Phase 30 ships: Molecule AI remote workspaces are GA.
|
||||
|
||||
Agents running on your laptop, AWS, GCP, or on-prem now register to the same org as your Docker agents. Same A2A. Same auth. Same canvas.
|
||||
|
||||
Remote badge. That's the only difference.
|
||||
→ docs: https://moleculesai.app/docs/guides/remote-workspaces
|
||||
```
|
||||
|
||||
### Version B — Product
|
||||
```
|
||||
Your laptop is now a valid Molecule AI runtime.
|
||||
|
||||
One org. Mixed fleet: Docker agents on the platform, remote agents wherever your infrastructure lives. One canvas. One audit trail.
|
||||
|
||||
Phase 30 is live.
|
||||
```
|
||||
|
||||
### Version C — Developer
|
||||
```
|
||||
How to run a Molecule AI agent on your laptop in 3 steps:
|
||||
|
||||
1. Create a workspace (runtime: external)
|
||||
2. Run the Python SDK
|
||||
3. Watch it appear on the canvas
|
||||
|
||||
That's it. Phase 30 is live.
|
||||
docs → https://moleculesai.app/docs/guides/remote-workspaces
|
||||
```
|
||||
|
||||
### Version D — Enterprise
|
||||
```
|
||||
Multi-cloud AI agent fleets, single governance plane.
|
||||
|
||||
Phase 30: agents on AWS, GCP, on-prem, your laptop — all visible in one canvas, all governed by the same platform auth, all auditable.
|
||||
|
||||
GA today.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## LinkedIn (150–300 words)
|
||||
|
||||
```
|
||||
We're launching Phase 30: Remote Workspaces.
|
||||
|
||||
Most AI agent platforms assume all agents run in the same environment as the control plane. Molecule AI didn't — but until today, that's where the story ended.
|
||||
|
||||
Phase 30 changes that. Your agent can now run anywhere:
|
||||
|
||||
- On a developer's laptop, for local iteration and debugging
|
||||
- On AWS or GCP, for production workloads in your cloud
|
||||
- On an on-premises server, for enterprise environments with data residency requirements
|
||||
- On a third-party endpoint, for existing SaaS integrations
|
||||
|
||||
And from the canvas, you can't tell the difference. Same workspace card. Same status. Same chat tab. Same audit trail. The only visible signal: a purple REMOTE badge.
|
||||
|
||||
The governance is the same. The A2A protocol is the same. The auth contract is the same. Where the agent runs is a deployment detail — not an architectural constraint.
|
||||
|
||||
Phase 30 is generally available today.
|
||||
|
||||
See the quick start → [link]
|
||||
Read the guide → [link]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Image suggestions per post
|
||||
|
||||
| Post | Best image |
|
||||
|---|---|
|
||||
| X Version A (Technical) | Fleet diagram: `marketing/assets/phase30-fleet-diagram.png` |
|
||||
| X Version B (Product) | Canvas screenshot: `marketing/assets/phase30-canvas-remote-badge.png` (once captured) |
|
||||
| X Version C (Developer) | Terminal screenshot: `python3 run.py` + canvas showing REMOTE badge |
|
||||
| X Version D (Enterprise) | Fleet diagram (same as A) |
|
||||
| LinkedIn | Fleet diagram OR canvas screenshot |
|
||||
|
||||
---
|
||||
|
||||
## Hashtags
|
||||
|
||||
`#MoleculeAI` `#RemoteWorkspaces` `#AIAgents` `#AgentFleet` `#AIPlatform` `#MCP` `#A2A` `#MultiCloud`
|
||||
@ -7,6 +7,28 @@ ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
echo "==> Ensuring shared docker network exists..."
|
||||
docker network create molecule-monorepo-net 2>/dev/null || true
|
||||
|
||||
# Populate the template / plugin registry.
|
||||
# workspace-configs-templates/, org-templates/, and plugins/ are intentionally
|
||||
# gitignored — the curated set lives in manifest.json as external repos. Without
|
||||
# them the Canvas template palette is empty and workspace provisioning falls
|
||||
# through to a bare default. The script itself is idempotent (skips dirs that
|
||||
# already have content), so re-running setup.sh is safe.
|
||||
if [ -f "$ROOT_DIR/manifest.json" ] && [ -f "$ROOT_DIR/scripts/clone-manifest.sh" ]; then
|
||||
if ! command -v jq >/dev/null 2>&1; then
|
||||
echo "==> NOTE: jq not installed — skipping template registry populate."
|
||||
echo " Install with: brew install jq (macOS) / apt install jq (Debian)"
|
||||
echo " Then rerun: bash scripts/clone-manifest.sh manifest.json \\"
|
||||
echo " workspace-configs-templates/ org-templates/ plugins/"
|
||||
else
|
||||
echo "==> Populating template / plugin registry from manifest.json..."
|
||||
bash "$ROOT_DIR/scripts/clone-manifest.sh" \
|
||||
"$ROOT_DIR/manifest.json" \
|
||||
"$ROOT_DIR/workspace-configs-templates" \
|
||||
"$ROOT_DIR/org-templates" \
|
||||
"$ROOT_DIR/plugins"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "==> Starting infrastructure..."
|
||||
docker compose -f "$ROOT_DIR/docker-compose.infra.yml" up -d
|
||||
|
||||
|
||||
@ -1,108 +0,0 @@
|
||||
DESIGN NOTES — phase30-fleet-diagram.png
|
||||
=========================================
|
||||
Generated by: /workspace/gen_fleet_diagram.py (matplotlib / Python)
|
||||
Output size: 128,967 bytes · nominal 1800×1050 px at 150 dpi
|
||||
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
COLOUR PALETTE
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
Background / Canvas
|
||||
#0A0E1A Deep navy Background (facecolor on save)
|
||||
|
||||
Panel interiors
|
||||
#111827 Dark steel Agent boxes, Platform box interior fill
|
||||
|
||||
Panel borders / accents
|
||||
#1F2A40 Muted slate PANEL_EDGE (reserved, not rendered)
|
||||
|
||||
Brand colours used
|
||||
#4A90D9 Molecule Blue Platform box border, A2A Proxy sections,
|
||||
connector line: platform → canvas
|
||||
|
||||
#8B5CF6 Purple REMOTE Agent Fleet boxes (border, header tint,
|
||||
dashed outer ring, fan-in connector lines)
|
||||
|
||||
#22C55E Green Online/active status dots (×5 per agent),
|
||||
"Canvas" box border, canvas live dot,
|
||||
"One canvas / All agents" text
|
||||
|
||||
#F59E0B Amber/Orange "WebSocket Fanout" label inside platform box
|
||||
|
||||
Supporting neutrals
|
||||
#94A3B8 Steel gray Body text, sub-labels (endpoint paths,
|
||||
Secrets Management, State Polling, etc.)
|
||||
|
||||
#F1F5F9 Near white Title text, "Molecule AI Platform" header,
|
||||
box title labels
|
||||
|
||||
#60A5FA Light blue Section headers inside platform (A2A Proxy,
|
||||
Registry + Heartbeat) — lighter tint of
|
||||
Molecule Blue for sub-panel hierarchy
|
||||
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
TYPOGRAPHY
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
Font family : DejaVu Sans (matplotlib default, no custom .ttf loaded)
|
||||
Title : 13 pt bold #F1F5F9
|
||||
Subtitle : 7 pt #94A3B8
|
||||
Box titles : 9 pt bold #F1F5F9
|
||||
Section heads: 7.5 pt bold #60A5FA (inside platform box)
|
||||
Body labels : 5.5–5.8 pt #94A3B8
|
||||
Agent names : 6.5 pt bold #F1F5F9
|
||||
FW pill : 6.0 pt bold #F1F5F9 (on purple background)
|
||||
Legend items : 6.5 pt #94A3B8
|
||||
Version tag : 5.5 pt #94A3B8 (60% alpha)
|
||||
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
VISUAL ELEMENTS
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
Agent icon labels (top-left of each agent box)
|
||||
EC2 — AWS EC2
|
||||
Mac — Laptop/Mac
|
||||
Srv — On-Prem
|
||||
|
||||
Framework pills (centre of each agent box, purple background)
|
||||
[ LangGraph ] [Claude Code] [ CrewAI ]
|
||||
|
||||
Status dots — 5 green dots per agent box, radius 0.075 figure-units,
|
||||
spaced 0.26 apart, centred horizontally in each box.
|
||||
|
||||
Dashed outer rings — thin (0.9 pt) dashed border surrounds each box
|
||||
category with its brand colour at ~45–50% alpha.
|
||||
|
||||
Connector lines — solid, 1.2 pt purple for fan-in from agents to platform;
|
||||
1.4 pt blue for platform-to-canvas drop. Arrowhead at destination end.
|
||||
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
REFINEMENT CHECKLIST (design team)
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
□ Swap "EC2 / Mac / Srv" labels for proper cloud-platform icons
|
||||
(AWS logo, macOS logo, server rack icon) via a bundled .ttf or
|
||||
matplotlib.matplotlib_fname() + FontManager trick to register a
|
||||
custom icon font such as Font Awesome 6 Free.
|
||||
□ Align connector lines to converge at a single "junction point" above
|
||||
the platform box rather than three separate lines fanning from
|
||||
identical y-coordinates — more closely mirrors the ASCII diagram.
|
||||
□ Add a faint grid or dot-grid background texture to the figure canvas
|
||||
to reinforce the tech/diagram aesthetic.
|
||||
□ Consider a subtle horizontal divider inside each agent box between
|
||||
the header band and the framework pill / dots area.
|
||||
□ Source the Molecule AI wordmark / logo SVG and embed it in the
|
||||
platform title bar (requires converting SVG → matplotlib transforms
|
||||
or rasterising to a numpy array via Pillow).
|
||||
□ Export at 300 dpi for print-ready assets; current 150 dpi is
|
||||
optimised for screen/web display.
|
||||
□ Validate colour contrast ratios (WCAG AA) for body text on dark bg —
|
||||
#94A3B8 on #0A0E1A should be re-checked; may need to shift body
|
||||
text to #B0BEC5 or lighter for legibility.
|
||||
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
REPRODUCIBILITY
|
||||
────────────────────────────────────────────────────────────────────────────
|
||||
matplotlib >= 3.5
|
||||
numpy (bundled with matplotlib)
|
||||
Python >= 3.8
|
||||
Pillow (not required for this script — pure matplotlib rendering)
|
||||
|
||||
Run: python /workspace/gen_fleet_diagram.py
|
||||
Output: /workspace/marketing/assets/phase30-fleet-diagram.png
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 126 KiB |
@ -1,15 +0,0 @@
|
||||
Every time an agent in your Molecule AI organization does something — delegates a task, calls a tool, reads a secret, or makes an external API call — that event is written to an append-only audit log. That log is chained with HMAC-SHA256 so that any tampering with past entries is detectable, provable, and logged.
|
||||
|
||||
This post explains how that system works, and what changed in Pull Request number one-three-three-nine.
|
||||
|
||||
The problem with plain audit logs is that nothing stops someone with database access from editing past rows. A malicious actor can remove or modify entries, and the log looks perfectly fine.
|
||||
|
||||
Molecule AI's audit ledger is an append-only, chain-verified log. Each entry contains the event data, an HMAC-SHA256 of the current entry signed with a server-side secret, and the HMAC of the previous entry embedded as part of the signing context. This creates a chain — every entry's HMAC depends on the previous entry's HMAC. If you change any past entry, its HMAC changes. That breaks the chain at the next verification step.
|
||||
|
||||
Verify Audit Chain walks the log from the beginning, recomputing each HMAC and comparing it against the stored value. If every entry verifies, the chain is intact. If an entry fails to verify, the function returns false. The tamper is detectable.
|
||||
|
||||
Organization-scoped API keys are the attribution layer on top of the integrity layer. Each organization key carries a name, a hash, and a prefix. Every authenticated call carries that prefix in the audit row. Combined with the HMAC chain, you get integrity and attribution simultaneously.
|
||||
|
||||
The bug Pull Request one-three-three-nine fixed was a panic vulnerability. In Go, slicing a string beyond its length causes a panic. Verify Audit Chain was using a twelve-character truncation on HMACs for log readability. But if an audit row had been corrupted, the stored HMAC could be shorter than twelve bytes, and the verification pass would crash. The fix adds a length check before truncation. The logic is unchanged — if the HMAC is long enough, the same twelve-character prefix is logged. If it is short or missing, a shorter prefix is logged. Either way, the chain verification still runs, and mismatches still fail correctly.
|
||||
|
||||
For teams running SOC two or ISO twenty-seven-zero-zero-one, this is the difference between, here's a log, and, here is a cryptographically verifiable, attributable record of everything that happened.
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,13 +0,0 @@
|
||||
Molecule AI ships Phase 30 today — Remote Workspaces is generally available.
|
||||
|
||||
Starting now, any agent can run anywhere: your laptop, a different cloud, an edge device, a third-party endpoint. It registers with your Molecule org, appears on the canvas with a remote badge, receives tasks from parent agents, and reports status — just like an agent running in Docker.
|
||||
|
||||
The auth contract is the same. The A2A protocol is the same. The canvas experience is the same. The only difference is where the agent's process lives.
|
||||
|
||||
Here's what Phase 30 delivers. Workspace auth tokens so every remote agent has a cryptographic identity. A secrets pull endpoint so API keys are managed centrally, not baked into container images. A state polling interface so agents can stay alive without a WebSocket connection. And an SDK — Python, dependency-light, just requests — that wraps all of it.
|
||||
|
||||
To onboard a remote agent: create a workspace with runtime external, point it at your platform URL, and run the SDK. Within seconds it shows up on the canvas, purple badge and all.
|
||||
|
||||
Phase 30 turns Molecule AI from a self-hosted tool into an enterprise agent fleet platform. Agents run anywhere. Governance stays in one place.
|
||||
|
||||
Learn more at moleculesai dot A I, and check the docs for the quick start guide.
|
||||
@ -1,7 +0,0 @@
|
||||
Phase 30 正式发布 — Molecule AI 远程工作区现已全面可用。
|
||||
|
||||
笔记本、云服务器、本地机房 — 都可以是 Molecule AI 的运行时。代理注册到组织,在画布上显示远程徽章,接收任务并上报状态。相同的 A2A 协议,相同的身份认证。
|
||||
|
||||
接入只需三步:将工作区运行时设为 external,指向平台地址,运行 SDK。几秒内出现在画布上,紫色徽章,一切就绪。
|
||||
|
||||
代理运行在任何地方,治理收于一处。
|
||||
Binary file not shown.
@ -1,7 +0,0 @@
|
||||
Phase 30 is live — Molecule AI Remote Workspaces is generally available.
|
||||
|
||||
Your laptop, a cloud instance, an on-premises server — any of these is now a valid Molecule AI runtime. An agent running anywhere registers with your org, appears on the canvas with a remote badge, receives tasks from parent agents, and reports status. Same A2A protocol. Same auth contract. Same canvas.
|
||||
|
||||
To onboard a remote agent: create a workspace with runtime external, point it at your platform URL, and run the SDK. Within seconds, it shows up on the canvas — purple badge and all.
|
||||
|
||||
Agents run anywhere. Governance stays in one place.
|
||||
Binary file not shown.
Binary file not shown.
@ -1,124 +0,0 @@
|
||||
# Phase 30 Launch — Community Announcements
|
||||
|
||||
> **For:** DevRel / Community Manager | **Status:** Draft
|
||||
> **Channels:** Discord, Slack (public channels), relevant forums
|
||||
|
||||
---
|
||||
|
||||
## Discord — #announcements
|
||||
|
||||
**Subject:** Phase 30 is GA — Remote Workspaces are live
|
||||
|
||||
```
|
||||
Phase 30 is generally available as of today.
|
||||
|
||||
Remote Workspaces let you run Molecule AI agents on any machine — your laptop, a cloud VM, an on-prem server — and they show up in Canvas like every other workspace. Same auth, same A2A protocol, same audit trail.
|
||||
|
||||
Quickstart → https://moleculesai.app/docs/guides/remote-workspaces
|
||||
|
||||
Two features that shipped with Phase 30 worth highlighting:
|
||||
• AGENTS.md auto-generation — peer agents can read each other's manifest without system prompts (AAIF standard)
|
||||
• Cloudflare Artifacts integration — workspace state can be versioned in a git repo, forked into new agents
|
||||
|
||||
Demo walkthroughs → https://moleculesai.app/docs/marketing/demos
|
||||
|
||||
Questions? Drop them here or in #support.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Discord — #remote-workspaces (new or existing channel)
|
||||
|
||||
```
|
||||
Heads up: Remote Workspaces are now GA in Phase 30.
|
||||
|
||||
If you've been waiting for a way to run agents locally (for debugging) or in your own cloud account, this is the release.
|
||||
|
||||
What changed:
|
||||
• Agent runtime: remote (connects via WSS, no inbound ports needed)
|
||||
• Auth: org-scoped bearer token — same as container workspaces
|
||||
• Canvas: REMOTE badge shows the runtime type
|
||||
• A2A: works across container/remote without code changes
|
||||
|
||||
Docs → https://moleculesai.app/docs/guides/remote-workspaces
|
||||
FAQ → https://moleculesai.app/docs/guides/remote-workspaces-faq
|
||||
|
||||
Known issues → reply here or ping me.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Slack — #general or #launch (public org Slack)
|
||||
|
||||
```
|
||||
Phase 30 is live.
|
||||
|
||||
Remote Workspaces are now generally available. You can run Molecule AI agents on your own infrastructure — laptop, cloud VM, on-prem — and they'll register to your org and appear in Canvas.
|
||||
|
||||
Key detail for teams evaluating data residency: agent compute can stay on your infrastructure. The platform handles orchestration, auth, and coordination.
|
||||
|
||||
Docs: https://moleculesai.app/docs/guides/remote-workspaces
|
||||
Quickstart: https://moleculesai.app/docs/guides/remote-workspaces#quick-start
|
||||
Launch post: https://moleculesai.app/blog/remote-workspaces-ga
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Slack — #devrel / #community (ecosystem channels)
|
||||
|
||||
```
|
||||
Phase 30 is GA.
|
||||
|
||||
Two things that shipped that the agent ecosystem community might care about:
|
||||
|
||||
1. AGENTS.md is now auto-generated at workspace boot — implements the AAIF / Linux Foundation standard. Peer agents can discover each other's identity and tools without reading system prompts. PR: molecule-core#763
|
||||
|
||||
2. Cloudflare Artifacts git integration — every workspace can have a git repo for versioned state snapshots. Fork the repo to bootstrap a new agent from any checkpoint. PR: molecule-core#641
|
||||
|
||||
Working demos with full API examples: https://moleculesai.app/docs/marketing/demos
|
||||
|
||||
If you're building agent coordination tooling, these two features should make your life easier.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Reddit — r/MachineLearning / r/LocalLLaMA (if applicable)
|
||||
|
||||
**Post title:** Molecule AI Phase 30: Remote Workspaces are GA — agents that run on your own infrastructure
|
||||
|
||||
**Body:**
|
||||
**Body:**
|
||||
|
||||
Molecule AI Phase 30 launched today — Remote Workspaces are now generally available.
|
||||
|
||||
Until now, all Molecule AI agents ran on the platform's Docker infrastructure. Phase 30 lets you run agents on your own hardware: your laptop, a cloud VM, an on-prem server. They register to your org and appear in the same Canvas as your platform agents.
|
||||
|
||||
What this means practically:
|
||||
- Run agents locally for debugging with your IDE, while they participate in your org's task pipeline
|
||||
- Keep agent compute on your own infrastructure for data-residency requirements
|
||||
- Mix Docker and remote agents in the same org — Canvas shows them identically, A2A works across both
|
||||
|
||||
The auth model is the same: workspace-scoped bearer tokens, revocable per-agent. No shared secrets.
|
||||
|
||||
Quick-start is under 5 minutes. Install the Python SDK, create a workspace with `runtime: external`, and your agent registers and appears in Canvas.
|
||||
|
||||
Docs: https://moleculesai.app/docs/guides/remote-workspaces
|
||||
GitHub: https://github.com/Molecule-AI/molecule-core
|
||||
|
||||
(PRs #1075-1083, 1085-1100)
|
||||
|
||||
---
|
||||
*[Short version — trim to 300 words for r/MachineLearning if needed.]*
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- Post Discord/Slack announcements the morning of launch day (09:00 UTC window)
|
||||
- Reddit posts should go up after Discord/Slack (don't want to look like spam across channels simultaneously)
|
||||
- Customize [CHANNEL-WELCOME-TONE] per channel — `#general` should be accessible, `#engineering` can be more technical
|
||||
- All links assume docs site is live — confirm before posting
|
||||
|
||||
---
|
||||
|
||||
*Drafted by DevRel. Customize sender name and channel-specific opening lines before posting.*
|
||||
@ -1,108 +0,0 @@
|
||||
# Phase 30 Launch — Hacker News Submission Guide
|
||||
|
||||
> **For:** DevRel / whoever submits | **Status:** Draft — submit when ready
|
||||
> **Trigger:** After blog post is live on docs site
|
||||
|
||||
---
|
||||
|
||||
## Why HN?
|
||||
|
||||
Hacker News has a large developer and technical audience that overlaps with Molecule AI's target users: platform engineers, indie developers building with AI, and technical evaluators. A well-crafted HN post can drive significant docs traffic and signups.
|
||||
|
||||
---
|
||||
|
||||
## What to Submit
|
||||
|
||||
**URL:** The Phase 30 launch blog post at `https://moleculesai.app/blog/remote-workspaces-ga`
|
||||
|
||||
**Title options:**
|
||||
|
||||
| Option | Title | When to use |
|
||||
|---|---|---|
|
||||
| A | Show HN — Phase 30: run AI agents on your laptop, your cloud, anywhere | Standard launch |
|
||||
| B | Show HN — Molecule AI launches Remote Workspaces (GA) | If the "Show HN" prefix is too meta |
|
||||
| C | Show HN — We built a fleet management layer for AI agents | Developer-heavy audience, less marketing |
|
||||
|
||||
**Recommended:** Option A — HN readers respond well to technical products with a clear "what it does" title.
|
||||
|
||||
---
|
||||
|
||||
## What to Write in the HN Post Body
|
||||
|
||||
The blog post is the destination. The HN post body is a 2–3 paragraph pitch that earns the click. Write it yourself — don't paste the full blog post.
|
||||
|
||||
**Template:**
|
||||
|
||||
```
|
||||
We just shipped Phase 30 — Remote Workspaces is now GA.
|
||||
|
||||
Most AI agent platforms assume all agents run inside the platform's infrastructure. Phase 30 lets agents run anywhere: your laptop, a VM in your own cloud account, an on-prem server. They register to the same org, appear in the same Canvas, and communicate via the same A2A protocol.
|
||||
|
||||
The governance is the same. The auth contract is the same. The only visible difference is a REMOTE badge on the workspace card.
|
||||
|
||||
Quickstart is under 5 minutes:
|
||||
pip install molecule-ai-sdk
|
||||
python3 run.py --runtime remote
|
||||
|
||||
Docs, demo, and quickstart guide in the link.
|
||||
|
||||
(I'm [NAME] from the Molecule AI team — AMA.)
|
||||
```
|
||||
|
||||
**Key HN-specific rules:**
|
||||
- Don't use "I" too many times — but the "(I'm ... AMA)" close is expected and encouraged
|
||||
- Don't hard-sell or use marketing language — just describe the product
|
||||
- Be specific about what it does ("A2A protocol", "workspace auth tokens") — that signals technical depth
|
||||
- Keep it short — 2–3 paragraphs, not an essay
|
||||
|
||||
---
|
||||
|
||||
## When to Submit
|
||||
|
||||
**Timing matters:**
|
||||
|
||||
- Submit when HN traffic is high but not oversaturated
|
||||
- **Best window:** Tuesday–Thursday, 10:00–13:00 UTC (roughly when US East Coast is morning and Europe is mid-day)
|
||||
- **Avoid:** Mondays (low traffic), Fridays (weekend readers don't upvote), major news events
|
||||
- **Recommended day:** Wednesday of launch week, 11:00 UTC
|
||||
|
||||
---
|
||||
|
||||
## What Happens After Submitting
|
||||
|
||||
1. **Monitor for 2–4 hours** after submission — respond to comments, answer technical questions
|
||||
2. **Don't be defensive** if criticism comes — acknowledge legitimate issues, don't argue
|
||||
3. **Upvote your own post once** — this is normal and expected on HN
|
||||
4. **If it hits the front page:** brace for volume — keep at least one team member monitoring
|
||||
|
||||
---
|
||||
|
||||
## Comment Templates for Common Questions
|
||||
|
||||
**"How is this different from Modal / Railway?"**
|
||||
> Modal and Railway run your code on their infrastructure. Molecule AI Remote Workspaces run on yours — you own the compute, the data stays on your machine. We're an orchestration layer, not an inference platform.
|
||||
|
||||
**"How is this different from Cursor / Copilot?"**
|
||||
> Cursor and Copilot are individual developer tools — one human, one AI. Molecule AI is an agent orchestration platform — multiple autonomous agents coordinating with each other. Remote Workspaces are about running *agents* that collaborate, not just one developer and one AI pairing.
|
||||
|
||||
**"Why would I want agents on my laptop?"**
|
||||
> Local iteration + debugging with your IDE, while the agent still participates in your org's task pipeline. Also useful for data-residency requirements — agent compute on your infrastructure while orchestration stays on the platform.
|
||||
|
||||
**"Is this production-ready?"**
|
||||
> Yes — Phase 30 is generally available. Remote Workspaces are in the same GA release as container workspaces.
|
||||
|
||||
---
|
||||
|
||||
## Alternate: "Ask HN"
|
||||
|
||||
If the team prefers an "Ask HN" format (more engagement, more questions):
|
||||
|
||||
**Title:** Ask HN — What would you build with a remote AI agent that runs on your own infrastructure?
|
||||
|
||||
**Body:** Short framing paragraph + question. This format tends to get high comment volume. Risk: less control over the narrative.
|
||||
|
||||
**Recommended format for launch:** Standard URL submission. More traffic, cleaner signal.
|
||||
|
||||
---
|
||||
|
||||
*Replace [NAME] with actual submitter name before posting.*
|
||||
@ -1,76 +0,0 @@
|
||||
# Phase 30 — Remote Workspaces: Landing Page Copy
|
||||
|
||||
> **Cycle:** Marketing work cycle — offline content prep
|
||||
> **Status:** Draft for Marketing Lead review
|
||||
> **Placement:** Primary CTA section and benefit blocks on the Phase 30 landing page
|
||||
|
||||
---
|
||||
|
||||
## Hero Section
|
||||
|
||||
**Headline:** Agents that run where you need them.
|
||||
|
||||
**Subheadline:** Remote Workspaces let your AI agents operate on any machine — your laptop, a data-center VM, a customer environment — while staying fully managed in Molecule AI Canvas.
|
||||
|
||||
---
|
||||
|
||||
## Primary CTA Block
|
||||
|
||||
**Button:** Start with Remote Workspaces →
|
||||
|
||||
**Microcopy below button:** No credit card. 5-minute setup. Runs on any machine with outbound HTTPS.
|
||||
|
||||
---
|
||||
|
||||
## Benefit Block 1 — "Your environment. Your agents."
|
||||
|
||||
**Headline:** Works where your data lives.
|
||||
|
||||
**Body:** Remote Workspaces run on-premises or in your own cloud account. No data leaves your infrastructure — the platform only coordinates the agent. Ideal for regulated environments, on-prem workloads, and data-locality requirements.
|
||||
|
||||
---
|
||||
|
||||
## Benefit Block 2 — "One Canvas. Every runtime."
|
||||
|
||||
**Headline:** Mixed fleets without compromise.
|
||||
|
||||
**Body:** Run container workspaces for ephemeral tasks and remote workspaces for persistent, environment-specific agents — all visible in the same Canvas. A2A coordination works across runtimes without code changes.
|
||||
|
||||
---
|
||||
|
||||
## Benefit Block 3 — "Enterprise controls, everywhere."
|
||||
|
||||
**Headline:** Same governance, any infrastructure.
|
||||
|
||||
**Body:** MCP plugin allowlists, org-scoped API keys, workspace audit logs, and session-tier access controls apply to remote workspaces identically. The remote runtime is a transport — not a separate security model.
|
||||
|
||||
---
|
||||
|
||||
## Social Proof / Trust Bar
|
||||
|
||||
*For [Company] and [Company], data residency isn't optional.*
|
||||
*Remote Workspaces keep agent compute on your infrastructure — while Canvas keeps you in control.*
|
||||
|
||||
---
|
||||
|
||||
## Bottom CTA
|
||||
|
||||
**Headline:** Ready to expand your fleet?
|
||||
|
||||
**Body:** Remote Workspaces are in GA. Self-serve setup in minutes. Talk to us if you need a custom enterprise deployment.
|
||||
|
||||
**Button:** Talk to Sales | View the Docs
|
||||
|
||||
---
|
||||
|
||||
## X/LinkedIn Short-form Variant
|
||||
|
||||
**Option A (technical audience):**
|
||||
> Your agents. Your infra. Remote Workspaces are GA — run AI agents on any machine while Canvas keeps them coordinated. MCP governance travels with the agent. → [docs link]
|
||||
|
||||
**Option B (buyer audience):**
|
||||
> Phase 30 is live: Remote Workspaces let your AI agents run where your data lives — on-prem, in your cloud, anywhere. One Canvas. Mixed fleet. Enterprise controls. → [link]
|
||||
|
||||
---
|
||||
|
||||
*Needs Marketing Lead review for brand voice consistency and competitive callouts.*
|
||||
@ -1,65 +0,0 @@
|
||||
# Phase 30 Demos — DevRel Package
|
||||
|
||||
Demo specs for two Phase 30-adjacent features requiring working demonstrations.
|
||||
|
||||
---
|
||||
|
||||
## Demo 1: #1172 — AGENTS.md Auto-Generation
|
||||
|
||||
**Issue:** `Molecule-AI/internal#1172`
|
||||
**PR:** `molecule-core#763`
|
||||
**Feature:** `workspace/agents_md.py` — auto-generates `AGENTS.md` at boot using the AAIF standard
|
||||
**Acceptance:** working demo + repo link + 1-min screencast
|
||||
|
||||
### Files
|
||||
| File | Description |
|
||||
|---|---|
|
||||
| `marketing/demos/agents-md-auto-generation/README.md` | Full working demo, API calls, screencast outline, TTS narration |
|
||||
| `marketing/demos/agents-md-auto-generation/narration.mp3` | 30s narration audio |
|
||||
|
||||
### Screencast (1 min)
|
||||
1. Canvas: pm-agent + researcher online
|
||||
2. Terminal: read PM's AGENTS.md via platform files API
|
||||
3. AGENTS.md output shown: role, A2A endpoint, tools
|
||||
4. Researcher sends A2A task to PM using discovered endpoint
|
||||
5. Canvas shows both active — close on "agents that can read each other"
|
||||
|
||||
### Repo link
|
||||
`workspace/agents_md.py` on `molecule-core` main
|
||||
Direct: `workspace/agents_md.py`
|
||||
|
||||
---
|
||||
|
||||
## Demo 2: #1173 — Cloudflare Artifacts Integration
|
||||
|
||||
**Issue:** `Molecule-AI/internal#1173`
|
||||
**PR:** `molecule-core#641`
|
||||
**Feature:** `POST/GET /workspaces/:id/artifacts`, fork, token endpoints — "Git for agents"
|
||||
**Acceptance:** workspace snapshot to/from CF Artifacts + 1-min screencast
|
||||
|
||||
### Files
|
||||
| File | Description |
|
||||
|---|---|
|
||||
| `marketing/demos/cloudflare-artifacts/README.md` | Full working demo, API calls, screencast outline, TTS narration |
|
||||
| `marketing/demos/cloudflare-artifacts/narration.mp3` | 30s narration audio |
|
||||
|
||||
### Screencast (1 min)
|
||||
1. Canvas: workspace online
|
||||
2. Terminal: `POST /workspaces/:id/artifacts` — repo created, remote URL returned
|
||||
3. Mint git credential, `git clone` with authenticated URL
|
||||
4. Write snapshot, `git push` — push succeeds
|
||||
5. Fork call: `POST /workspaces/:id/artifacts/fork` — new repo created
|
||||
6. Close on "versioned agent state, built into the platform"
|
||||
|
||||
### Repo link
|
||||
`workspace-server/internal/handlers/artifacts.go` on `molecule-core` main
|
||||
Direct: `workspace-server/internal/handlers/artifacts.go`
|
||||
|
||||
---
|
||||
|
||||
## Audio Assets
|
||||
|
||||
| File | Duration | Voice | Description |
|
||||
|---|---|---|---|
|
||||
| `agents-md-auto-generation/narration.mp3` | ~30s | en-US-AriaNeural | AGENTS.md auto-generation narration |
|
||||
| `cloudflare-artifacts/narration.mp3` | ~30s | en-US-AriaNeural | Cloudflare Artifacts narration |
|
||||
@ -1,178 +0,0 @@
|
||||
# AGENTS.md Auto-Generation — Working Demo
|
||||
|
||||
> **PR:** #763 — AGENTS.md auto-generation for Molecule AI workspaces
|
||||
> **What it ships:** `workspace/agents_md.py` — generates `AGENTS.md` at boot
|
||||
> **Acceptance criteria:** working demo + repo link + 1-min screencast
|
||||
|
||||
---
|
||||
|
||||
## What This Demo Shows
|
||||
|
||||
An AI agent (the "coordinator") reads another agent's `AGENTS.md` file to discover its identity, A2A endpoint, and toolset — without reading the full system prompt. This is the AAIF / Linux Foundation AGENTS.md standard in action.
|
||||
|
||||
**The flow:**
|
||||
1. A PM workspace starts up — `agents_md.py` auto-generates `AGENTS.md`
|
||||
2. A researcher workspace starts up — same process
|
||||
3. The researcher reads the PM's `AGENTS.md` to understand what tools it has and how to reach it
|
||||
4. The researcher dispatches a task to the PM via A2A using the discovered endpoint
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Molecule AI platform running (`go run ./cmd/server` from `workspace-server/`)
|
||||
- Canvas open at `http://localhost:3000`
|
||||
- Two workspaces: one running as PM role, one as researcher
|
||||
- For the script demo: `python3` and `requests`
|
||||
|
||||
---
|
||||
|
||||
## Working Demo Script
|
||||
|
||||
### 1. Check the AGENTS.md file on a running workspace
|
||||
|
||||
On the PM workspace container:
|
||||
|
||||
```bash
|
||||
# Inside the PM workspace container
|
||||
cat /workspace/AGENTS.md
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```markdown
|
||||
# pm-agent
|
||||
|
||||
**Role:** Project Manager
|
||||
|
||||
## Description
|
||||
PM agent — coordinates tasks, dispatches to reports, manages timeline.
|
||||
|
||||
## A2A Endpoint
|
||||
http://pm-workspace:8000/a2a
|
||||
|
||||
## MCP Tools
|
||||
- delegate_to_workspace
|
||||
- check_delegation_status
|
||||
- commit_memory
|
||||
- recall_memory
|
||||
```
|
||||
|
||||
The file was generated automatically at boot by `agents_md.py`. It reflects the workspace `config.yaml` in real time — any change to the role, description, or plugins is reflected on the next regeneration.
|
||||
|
||||
---
|
||||
|
||||
### 2. See the generation in the workspace logs
|
||||
|
||||
The workspace startup log includes:
|
||||
|
||||
```
|
||||
INFO agents_md: Generated AGENTS.md at /workspace/AGENTS.md for workspace 'pm-agent'
|
||||
```
|
||||
|
||||
This confirms `generate_agents_md()` ran as part of `main.py` startup.
|
||||
|
||||
---
|
||||
|
||||
### 3. See the regeneration on config change
|
||||
|
||||
If you edit `config.yaml` and call `generate_agents_md()` again:
|
||||
|
||||
```bash
|
||||
# On the PM workspace
|
||||
python3 -c "
|
||||
from agents_md import generate_agents_md
|
||||
generate_agents_md('/configs', '/workspace/AGENTS.md')
|
||||
print('Regenerated')
|
||||
"
|
||||
cat /workspace/AGENTS.md
|
||||
```
|
||||
|
||||
The file reflects the updated role or description immediately.
|
||||
|
||||
---
|
||||
|
||||
### 4. See a peer agent read the AGENTS.md (demo scenario)
|
||||
|
||||
This is the coordination moment — the scenario from issue #1172.
|
||||
|
||||
```python
|
||||
# Researcher workspace: read PM's AGENTS.md via the platform files API
|
||||
|
||||
import requests, base64
|
||||
|
||||
PLATFORM_URL = "http://localhost:8080"
|
||||
WORKSPACE_TOKEN = "researcher-workspace-token"
|
||||
|
||||
# Get the PM workspace ID (known from canvas or registry)
|
||||
# For this demo: PM workspace ID = ws-pm-123
|
||||
|
||||
# Read PM's AGENTS.md via the platform's file API
|
||||
resp = requests.get(
|
||||
f"{PLATFORM_URL}/workspaces/ws-pm-123/files/AGENTS.md",
|
||||
headers={"Authorization": f"Bearer {WORKSPACE_TOKEN}"},
|
||||
)
|
||||
print(resp.json()["content"])
|
||||
```
|
||||
|
||||
Parses the PM's `AGENTS.md`:
|
||||
```markdown
|
||||
# pm-agent
|
||||
|
||||
**Role:** Project Manager
|
||||
|
||||
## Description
|
||||
PM agent — coordinates tasks, dispatches to reports, manages timeline.
|
||||
|
||||
## A2A Endpoint
|
||||
http://pm-workspace:8000/a2a
|
||||
|
||||
## MCP Tools
|
||||
- delegate_to_workspace
|
||||
- check_delegation_status
|
||||
```
|
||||
|
||||
Now the researcher knows:
|
||||
- PM's role is "Project Manager" → it dispatches, not executes
|
||||
- PM's A2A endpoint → where to send coordination requests
|
||||
- PM has `delegate_to_workspace` tool → it can cascade tasks to reports
|
||||
|
||||
The researcher then uses this to coordinate: sends a status report to the PM, knowing the PM will route it up or dispatch a follow-up task.
|
||||
|
||||
---
|
||||
|
||||
## Screencast Outline (1 min)
|
||||
|
||||
**0:00–0:10** Canvas shows two workspaces online — pm-agent and researcher. Researcher node shows current task: "idle".
|
||||
|
||||
**0:10–0:25** Terminal on researcher workspace: `curl` or Python script reads PM's `AGENTS.md` via the platform files API. Output shows the PM's role, A2A endpoint, and tools.
|
||||
|
||||
**0:25–0:40** Researcher sends an A2A task to the PM: "Status: data pipeline complete, ready for review." PM receives it in its canvas chat.
|
||||
|
||||
**0:40–0:55** PM's `AGENTS.md` is shown briefly in the researcher terminal — the researcher used it to understand PM's capabilities before sending the task.
|
||||
|
||||
**0:55–1:00** Canvas shows both workspaces active. Narration: *"AGENTS.md means every agent knows what its peers can do — without reading system prompts."*
|
||||
|
||||
---
|
||||
|
||||
## Code Reference
|
||||
|
||||
| File | What it does |
|
||||
|---|---|
|
||||
| `workspace/agents_md.py` | `generate_agents_md()` — reads `config.yaml`, writes `AGENTS.md` |
|
||||
| `workspace/main.py` | Calls `generate_agents_md()` at startup |
|
||||
| `config.py` | `load_config()` — reads `config.yaml` |
|
||||
|
||||
**Source:** `workspace/agents_md.py` (PR #763)
|
||||
|
||||
```python
|
||||
from agents_md import generate_agents_md
|
||||
|
||||
# Called automatically at startup; can be called again on config change
|
||||
generate_agents_md(config_dir="/configs", output_path="/workspace/AGENTS.md")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## TTS Narration Script (30s)
|
||||
|
||||
> When a PM agent starts up in Molecule AI, it generates an AGENTS.md file automatically — not manually written, not kept in sync by hand. It reflects the workspace config in real time. Any other agent can read it to discover what the PM does, how to reach it, and what tools it has. No system prompts, no guessing. Just the facts. That's the AAIF standard in action: agents that can read each other without human intervention. AGENTS.md auto-generation, from Molecule AI workspace.
|
||||
Binary file not shown.
@ -1,163 +0,0 @@
|
||||
# Screencast Storyboard — AGENTS.md Auto-Generation
|
||||
|
||||
> **PR:** #763 | **Feature:** `workspace/agents_md.py` | **Duration:** 60 seconds
|
||||
> **Format:** Terminal-led with Canvas overlay cuts
|
||||
|
||||
---
|
||||
|
||||
## Pre-roll (0:00–0:03)
|
||||
|
||||
**Canvas — full screen**
|
||||
Two workspace cards in Canvas: `pm-agent [ONLINE]` and `researcher [IDLE]`.
|
||||
|
||||
Narration (VO, 0:00–0:03):
|
||||
> "Two agents. The PM coordinates. The researcher does the work. They need to talk to each other — without humans in the loop."
|
||||
|
||||
**Camera:** Static Canvas view. No cursor movement. Clean frame.
|
||||
|
||||
---
|
||||
|
||||
## Moment 1 — PM boots, AGENTS.md generated (0:03–0:12)
|
||||
|
||||
**Cut to:** Terminal window, terminal prompt: `agent@pm-workspace:~$`
|
||||
|
||||
```bash
|
||||
# Simulate the workspace startup — truncated log
|
||||
INFO main: Starting workspace pm-agent
|
||||
INFO agents_md: Generating AGENTS.md for workspace 'pm-agent'
|
||||
INFO agents_md: Generated AGENTS.md at /workspace/AGENTS.md
|
||||
INFO a2a: A2A server listening on :8000
|
||||
INFO main: Workspace 'pm-agent' online
|
||||
```
|
||||
|
||||
**Camera:** Type-in animation. Cursor blinks. Text appears line by line (simulate with playback speed 2x).
|
||||
|
||||
Narration (0:06–0:12):
|
||||
> "When the PM workspace starts up, AGENTS.md is generated automatically — from the config file, not a human."
|
||||
|
||||
**Highlight:** `INFO agents_md: Generated AGENTS.md at /workspace/AGENTS.md` — brief yellow highlight ring (1s).
|
||||
|
||||
---
|
||||
|
||||
## Moment 2 — Researcher reads PM's AGENTS.md (0:12–0:25)
|
||||
|
||||
**Cut to:** Second terminal tab. Prompt: `agent@researcher:~$`
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
resp = requests.get(
|
||||
"https://acme.moleculesai.app/workspaces/ws-pm-123/files/AGENTS.md",
|
||||
headers={"Authorization": "Bearer researcher-token-xxx"},
|
||||
)
|
||||
print(resp.json()["content"])
|
||||
```
|
||||
|
||||
**Cut to:** Terminal output (scroll):
|
||||
|
||||
```
|
||||
# pm-agent
|
||||
|
||||
**Role:** Project Manager
|
||||
|
||||
## Description
|
||||
PM agent — coordinates tasks, dispatches to reports, manages timeline.
|
||||
|
||||
## A2A Endpoint
|
||||
http://pm-workspace:8000/a2a
|
||||
|
||||
## MCP Tools
|
||||
- delegate_to_workspace
|
||||
- check_delegation_status
|
||||
```
|
||||
|
||||
**Camera:** Scroll to show the full file. Hold 2s.
|
||||
|
||||
Narration (0:14–0:22):
|
||||
> "The researcher reads the PM's AGENTS.md — through the platform API. Instantly knows the PM's role, its A2A endpoint, and the tools it has."
|
||||
|
||||
**Highlight:** `A2A Endpoint` and `MCP Tools` lines — brief underline pulse.
|
||||
|
||||
**Callout text appears bottom-left:**
|
||||
`No system prompts. No documentation lookup. Just the facts.`
|
||||
|
||||
---
|
||||
|
||||
## Moment 3 — Researcher dispatches A2A task (0:25–0:42)
|
||||
|
||||
**Terminal continues:**
|
||||
|
||||
```python
|
||||
from a2a import A2ATask
|
||||
|
||||
task = A2ATask(
|
||||
to="http://pm-workspace:8000/a2a",
|
||||
type="status_report",
|
||||
payload={
|
||||
"milestone": "data-pipeline",
|
||||
"status": "complete",
|
||||
"artifacts": ["dataset-v3.parquet"],
|
||||
}
|
||||
)
|
||||
result = task.send()
|
||||
print(result)
|
||||
```
|
||||
|
||||
**Terminal output:**
|
||||
|
||||
```
|
||||
{"task_id": "task-abc-456", "status": "queued", "pm_receipt": "2026-04-21T00:00:22Z"}
|
||||
```
|
||||
|
||||
**Camera:** Type-in animation. Brief hold on result JSON.
|
||||
|
||||
Narration (0:27–0:35):
|
||||
> "Now the researcher has everything it needs. It sends an A2A task to the PM — using the endpoint it discovered from AGENTS.md. No hardcoded addresses."
|
||||
|
||||
---
|
||||
|
||||
## Moment 4 — PM receives task (0:42–0:52)
|
||||
|
||||
**Cut to:** Canvas — pm-agent card.
|
||||
|
||||
New message bubble appears in pm-agent's canvas chat:
|
||||
`researcher: Status report — data-pipeline complete. 1 artifact ready.`
|
||||
|
||||
Below the message, status indicator changes: `pm-agent [ACTIVE]`
|
||||
|
||||
Researcher card shows: `researcher [DISPATCHED]`
|
||||
|
||||
Narration (0:42–0:48):
|
||||
> "The PM receives it in Canvas. Status updated. The coordination happened without human input — AAIF in action."
|
||||
|
||||
---
|
||||
|
||||
## Close (0:52–1:00)
|
||||
|
||||
**Canvas — full frame.** Both cards visible. `pm-agent [ACTIVE]` + `researcher [DISPATCHED]`.
|
||||
|
||||
Narration (0:52–0:58):
|
||||
> "AGENTS.md means every agent knows what its peers can do — without reading system prompts. Auto-generated. Always current. That's the AAIF standard, from Molecule AI."
|
||||
|
||||
**End card:**
|
||||
|
||||
```
|
||||
AGENTS.md Auto-Generation
|
||||
workspace/agents_md.py — molecule-core#763
|
||||
```
|
||||
|
||||
**Fade to black.**
|
||||
|
||||
---
|
||||
|
||||
## Production Notes
|
||||
|
||||
- **Terminal theme:** Dark, monospace, minimal chrome. Use `ITerm2` profile "Molecule Dark" or equivalent.
|
||||
- **Font:** SF Mono 14pt or JetBrains Mono 13pt.
|
||||
- **Canvas cutaways:** Use the dev canvas at `localhost:3000` with two workspaces in active states. Pre-record these moments.
|
||||
- **Camera:** Screenflow or Camtasia for macOS. Record at 1440×900, export at 1080p.
|
||||
- **VO recording:** Record after final edit is locked. Use `en-US-AriaNeural` as reference voice for timing.
|
||||
- **Narration pacing:** Read the script against the timeline before locking the VO session.
|
||||
- **Music:** No music — keep it clean and technical. Consider a subtle 2s click sound at 0:03 (boot log) to anchor the start.
|
||||
- **Highlights:** Use a yellow/amber ring `#E8A000` with 1s fade-in/out for callouts.
|
||||
- **End card:** Centered, white text on dark background. 1080p canvas.
|
||||
@ -1,199 +0,0 @@
|
||||
# Cloudflare Artifacts — Working Demo
|
||||
|
||||
> **PR:** #641 — Cloudflare Artifacts demo integration
|
||||
> **What it ships:** `POST/GET /workspaces/:id/artifacts`, `POST /workspaces/:id/artifacts/fork`, `POST /workspaces/:id/artifacts/token`
|
||||
> **Concept:** "Git for agents" — versioned workspace snapshot storage
|
||||
> **Acceptance criteria:** working demo showing workspace snapshot to/from Cloudflare Artifacts + 1-min screencast
|
||||
|
||||
---
|
||||
|
||||
## What This Demo Shows
|
||||
|
||||
A workspace links to a Cloudflare Artifacts git repo. The agent can push snapshots (git commits) and later fork the repo to bootstrap a new workspace. This is versioned workspace state — like `git init` for agent memory.
|
||||
|
||||
**The flow:**
|
||||
1. Attach a CF Artifacts repo to a workspace (or import an existing Git repo)
|
||||
2. Mint a short-lived git credential via the platform
|
||||
3. Agent clones the repo, writes a snapshot, pushes
|
||||
4. Fork the repo to bootstrap a new workspace
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Molecule AI platform with `CF_ARTIFACTS_API_TOKEN` and `CF_ARTIFACTS_NAMESPACE` set
|
||||
- A running workspace with a bearer token
|
||||
- `git` and `curl` on the caller machine
|
||||
|
||||
---
|
||||
|
||||
## Working Demo Script
|
||||
|
||||
### 1. Attach / create a CF Artifacts repo to a workspace
|
||||
|
||||
```bash
|
||||
# Admin token or workspace token
|
||||
WORKSPACE_ID=ws-abc123
|
||||
PLATFORM=https://acme.moleculesai.app
|
||||
TOKEN=your-workspace-or-admin-token
|
||||
|
||||
# Create (or import) the repo
|
||||
curl -s -X POST "$PLATFORM/workspaces/$WORKSPACE_ID/artifacts" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "my-workspace-snapshots",
|
||||
"description": "Versioned snapshots of workspace state"
|
||||
}' | jq
|
||||
```
|
||||
|
||||
Response (201):
|
||||
```json
|
||||
{
|
||||
"id": "art-uuid-456",
|
||||
"workspace_id": "ws-abc123",
|
||||
"cf_repo_name": "my-workspace-snapshots",
|
||||
"cf_namespace": "my-namespace",
|
||||
"remote_url": "https://hash.artifacts.cloudflare.net/git/my-workspace-snapshots.git",
|
||||
"description": "Versioned snapshots of workspace state",
|
||||
"created_at": "2026-04-20T12:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
The repo was created in Cloudflare Artifacts and linked to the workspace. No separate CF dashboard login needed.
|
||||
|
||||
---
|
||||
|
||||
### 2. Import an existing GitHub repo instead
|
||||
|
||||
```bash
|
||||
curl -s -X POST "$PLATFORM/workspaces/$WORKSPACE_ID/artifacts" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "researcher-agent",
|
||||
"description": "Researcher agent workspace",
|
||||
"import_url": "https://github.com/myorg/researcher-agent.git",
|
||||
"import_branch": "main",
|
||||
"import_depth": 1
|
||||
}' | jq
|
||||
```
|
||||
|
||||
The platform calls the CF Artifacts API to import the GitHub repo. The workspace now has a full git history of the agent's work.
|
||||
|
||||
---
|
||||
|
||||
### 3. Mint a git credential
|
||||
|
||||
```bash
|
||||
curl -s -X POST "$PLATFORM/workspaces/$WORKSPACE_ID/artifacts/token" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"scope": "write", "ttl": 3600}' | jq
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"token": "cf_at_xxxxx...xxxx",
|
||||
"scope": "write",
|
||||
"expires_at": "2026-04-20T13:00:00Z",
|
||||
"clone_url": "https://x:cf_at_xxxxx...xxxx@artifacts.cloudflare.net/git/my-workspace-snapshots.git",
|
||||
"message": "Save this token — it cannot be retrieved again."
|
||||
}
|
||||
```
|
||||
|
||||
The `clone_url` is the authenticated git remote. Use it directly:
|
||||
|
||||
```bash
|
||||
git clone https://x:cf_at_xxxxx@artifacts.cloudflare.net/git/my-workspace-snapshots.git
|
||||
```
|
||||
|
||||
The token is scoped to this workspace's repo only. It expires in 1 hour (configurable up to 7 days).
|
||||
|
||||
---
|
||||
|
||||
### 4. Clone, snapshot, push
|
||||
|
||||
```bash
|
||||
# Clone the workspace repo
|
||||
git clone "https://x:cf_at_xxxxx@artifacts.cloudflare.net/git/my-workspace-snapshots.git" \
|
||||
/tmp/workspace-snapshots
|
||||
|
||||
cd /tmp/workspace-snapshots
|
||||
|
||||
# Agent writes a snapshot: memory dump, active task state, config
|
||||
echo "current_task: researching competitor X" > snapshot.md
|
||||
echo "uptime_seconds: 3600" >> snapshot.md
|
||||
echo "memory_summary: analyzed 12 sources, 3 key findings" >> snapshot.md
|
||||
|
||||
git add snapshot.md
|
||||
git commit -m "snapshot: researching competitor X — 3 findings ready"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
The workspace state is now in Cloudflare Artifacts — versioned, accessible to other workspaces, recoverable.
|
||||
|
||||
---
|
||||
|
||||
### 5. Fork the repo for a new workspace
|
||||
|
||||
```bash
|
||||
# Researcher wants to start from the PM's workspace snapshot
|
||||
curl -s -X POST "$PLATFORM/workspaces/ws-pm-123/artifacts/fork" \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "researcher-from-pm",
|
||||
"description": "Forked from pm-agent workspace",
|
||||
"default_branch_only": true
|
||||
}' | jq
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"fork": {
|
||||
"name": "researcher-from-pm",
|
||||
"namespace": "my-namespace",
|
||||
"remote_url": "https://hash2.artifacts.cloudflare.net/git/researcher-from-pm.git"
|
||||
},
|
||||
"object_count": 47,
|
||||
"remote_url": "https://hash2.artifacts.cloudflare.net/git/researcher-from-pm.git"
|
||||
}
|
||||
```
|
||||
|
||||
The forked repo is a separate Cloudflare Artifacts repository with the full snapshot history. A new workspace can clone it and pick up where the PM left off.
|
||||
|
||||
---
|
||||
|
||||
## Screencast Outline (1 min)
|
||||
|
||||
**0:00–0:10** Canvas: a workspace is online. Terminal: `curl POST /workspaces/:id/artifacts` — repo created, response shows CF Artifacts remote URL.
|
||||
|
||||
**0:10–0:25** Terminal: mint a git credential. `clone_url` shown in response. `git clone` runs, repo clones in <5s.
|
||||
|
||||
**0:25–0:40** Agent writes a workspace snapshot to the repo. `echo` → `git add` → `git commit` → `git push`. Output shows the push succeeded.
|
||||
|
||||
**0:40–0:55** Canvas: fork call. `POST /workspaces/:id/artifacts/fork` → new repo created in CF Artifacts. The new workspace ID is returned.
|
||||
|
||||
**0:55–1:00** Narration: *"Every workspace can have its own git history. Snapshot state, version it, fork it into a new agent. Git for agents, built into the platform."*
|
||||
|
||||
---
|
||||
|
||||
## TTS Narration Script (30s)
|
||||
|
||||
> Cloudflare Artifacts turns your Molecule AI workspace into a versioned git repository. Attach a repo, mint a short-lived credential, and the agent can push snapshots — memory dumps, task state, config — and other agents can fork the history to bootstrap from the same point. No external git service configuration. No separate dashboard. The platform manages the credential lifecycle and the repo link. Versioned agent state, built into the platform. That's the first-mover advantage: Git for agents, from Molecule AI.
|
||||
|
||||
---
|
||||
|
||||
## API Reference
|
||||
|
||||
| Method | Path | What |
|
||||
|---|---|---|
|
||||
| `POST` | `/workspaces/:id/artifacts` | Attach/create CF Artifacts repo |
|
||||
| `GET` | `/workspaces/:id/artifacts` | Get linked repo info |
|
||||
| `POST` | `/workspaces/:id/artifacts/fork` | Fork repo to new workspace |
|
||||
| `POST` | `/workspaces/:id/artifacts/token` | Mint short-lived git credential |
|
||||
|
||||
**Source:** `workspace-server/internal/handlers/artifacts.go` (PR #641)
|
||||
Binary file not shown.
@ -1,201 +0,0 @@
|
||||
# Screencast Storyboard — Cloudflare Artifacts Integration
|
||||
|
||||
> **PR:** #641 | **Feature:** `POST/GET /workspaces/:id/artifacts`, `/artifacts/fork`, `/artifacts/token` | **Duration:** 60 seconds
|
||||
> **Format:** Terminal-led, clean dark theme
|
||||
|
||||
---
|
||||
|
||||
## Pre-roll (0:00–0:04)
|
||||
|
||||
**Canvas — full screen**
|
||||
Single workspace card in Canvas: `data-agent [ONLINE]`. Status: `idle`.
|
||||
|
||||
Narration (0:00–0:04):
|
||||
> "This data-agent has been running for three hours. It has context, task state, memory. What happens when it disconnects?"
|
||||
|
||||
**Camera:** Static Canvas frame. 3-second hold. No cursor.
|
||||
|
||||
---
|
||||
|
||||
## Moment 1 — Attach a CF Artifacts repo (0:04–0:16)
|
||||
|
||||
**Cut to:** Terminal window, dark theme.
|
||||
|
||||
Prompt: `agent@data-agent:~$`
|
||||
|
||||
```bash
|
||||
WORKSPACE_ID="ws-data-agent-001"
|
||||
PLATFORM="https://acme.moleculesai.app"
|
||||
TOKEN="Bearer ws-token-xxx"
|
||||
|
||||
curl -s -X POST "$PLATFORM/workspaces/$WORKSPACE_ID/artifacts" \
|
||||
-H "Authorization: $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "data-agent-snapshots",
|
||||
"description": "Versioned snapshots of data-agent workspace"
|
||||
}' | jq
|
||||
```
|
||||
|
||||
**Terminal output (JSON, formatted):**
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "art-uuid-789",
|
||||
"workspace_id": "ws-data-agent-001",
|
||||
"cf_repo_name": "data-agent-snapshots",
|
||||
"cf_namespace": "acme-production",
|
||||
"remote_url": "https://hash.artifacts.cloudflare.net/git/data-agent-snapshots.git",
|
||||
"created_at": "2026-04-21T00:00:10Z"
|
||||
}
|
||||
```
|
||||
|
||||
**Camera:** Cursor to `remote_url` field, highlight ring. Hold 1s.
|
||||
|
||||
Narration (0:06–0:14):
|
||||
> "One API call attaches a Cloudflare Artifacts git repo to the workspace. A remote URL is returned — no CF dashboard required."
|
||||
|
||||
**Callout text (bottom-left):**
|
||||
`Git for agents. No separate setup.`
|
||||
|
||||
---
|
||||
|
||||
## Moment 2 — Mint a credential, clone the repo (0:16–0:28)
|
||||
|
||||
**Terminal continues:**
|
||||
|
||||
```bash
|
||||
# Mint a short-lived git credential
|
||||
TOKEN_RESP=$(curl -s -X POST "$PLATFORM/workspaces/$WORKSPACE_ID/artifacts/token" \
|
||||
-H "Authorization: $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"scope": "write", "ttl": 3600}')
|
||||
|
||||
CLONE_URL=$(echo $TOKEN_RESP | jq -r '.clone_url')
|
||||
|
||||
# Clone the workspace repo
|
||||
git clone "$CLONE_URL" /tmp/data-agent-snapshots
|
||||
```
|
||||
|
||||
**Terminal output:**
|
||||
|
||||
```
|
||||
Cloning into '/tmp/data-agent-snapshots'...
|
||||
remote: Enumerating objects: 12, done.
|
||||
remote: Counting objects: 100% | (12/12), done.
|
||||
Receiving objects: 100% | (12/12), 12.00 KiB, done.
|
||||
```
|
||||
|
||||
**Camera:** Scroll through git clone output. Brief hold on `Receiving objects: 100%`. Clean finish.
|
||||
|
||||
Narration (0:18–0:26):
|
||||
> "A short-lived git credential is minted — valid for one hour. The agent clones the repo. Cloudflare Artifacts handles the git transport."
|
||||
|
||||
---
|
||||
|
||||
## Moment 3 — Agent writes a snapshot (0:28–0:44)
|
||||
|
||||
**Terminal continues:**
|
||||
|
||||
```bash
|
||||
cd /tmp/data-agent-snapshots
|
||||
|
||||
# Agent writes its state to the repo
|
||||
echo "# Workspace State — 2026-04-21" > snapshot.md
|
||||
echo "current_task: analyzing sales pipeline Q1" >> snapshot.md
|
||||
echo "data_sources_analyzed: 8" >> snapshot.md
|
||||
echo "key_findings: [revenue-drop-may, churn-signal-3pc, upsell-opportunity]" >> snapshot.md
|
||||
echo "uptime_seconds: 10800" >> snapshot.md
|
||||
echo "last_status: COMPLETE" >> snapshot.md
|
||||
|
||||
git add snapshot.md
|
||||
git commit -m "snapshot: pipeline analysis complete — 3 key findings"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
**Terminal output:**
|
||||
|
||||
```
|
||||
[main abc1234] snapshot: pipeline analysis complete — 3 key findings
|
||||
1 file changed, 5 insertions(+)
|
||||
Counting objects: 100% | (3/3), done.
|
||||
Writing objects: 100% | (3/3), done.
|
||||
remote: success
|
||||
```
|
||||
|
||||
**Camera:** Full commit → push sequence. Hold on `remote: success`. Green checkmark indicator.
|
||||
|
||||
Narration (0:30–0:40):
|
||||
> "The agent writes a snapshot — current task, data sources, key findings — commits and pushes. The state is now in Cloudflare Artifacts. Versioned. Recoverable."
|
||||
|
||||
**Callout text:**
|
||||
`Versioned agent state — every push is a checkpoint.`
|
||||
|
||||
---
|
||||
|
||||
## Moment 4 — Fork the repo for a new workspace (0:44–0:54)
|
||||
|
||||
**Terminal:**
|
||||
|
||||
```bash
|
||||
# A new researcher workspace forks the data-agent's repo
|
||||
curl -s -X POST "$PLATFORM/workspaces/$WORKSPACE_ID/artifacts/fork" \
|
||||
-H "Authorization: $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "researcher-from-data-agent",
|
||||
"description": "Forked from data-agent workspace",
|
||||
"default_branch_only": true
|
||||
}' | jq
|
||||
```
|
||||
|
||||
**Terminal output:**
|
||||
|
||||
```json
|
||||
{
|
||||
"fork": {
|
||||
"name": "researcher-from-data-agent",
|
||||
"namespace": "acme-production",
|
||||
"remote_url": "https://hash2.artifacts.cloudflare.net/git/researcher-from-data-agent.git"
|
||||
},
|
||||
"object_count": 47,
|
||||
"remote_url": "https://hash2.artifacts.cloudflare.net/git/researcher-from-data-agent.git"
|
||||
}
|
||||
```
|
||||
|
||||
**Camera:** Highlight the `remote_url` and `object_count` fields. Hold 2s.
|
||||
|
||||
Narration (0:45–0:52):
|
||||
> "Another agent forks the repo — a separate, isolated copy. 47 objects transferred. The new workspace can clone it and continue from the same point."
|
||||
|
||||
---
|
||||
|
||||
## Close (0:54–1:00)
|
||||
|
||||
**Terminal clean frame.** Cursor at prompt.
|
||||
|
||||
Narration (0:54–0:58):
|
||||
> "Every workspace can have its own git history. Snapshot state, version it, fork it into a new agent. Git for agents, built into the platform."
|
||||
|
||||
**End card:**
|
||||
|
||||
```
|
||||
Cloudflare Artifacts Integration
|
||||
workspace-server/internal/handlers/artifacts.go — molecule-core#641
|
||||
```
|
||||
|
||||
**Fade to black.**
|
||||
|
||||
---
|
||||
|
||||
## Production Notes
|
||||
|
||||
- **Terminal theme:** Same as AGENTS.md storyboard — dark, SF Mono / JetBrains Mono 14pt.
|
||||
- **Canvas cutaway (pre-roll + close):** Use dev canvas with one workspace in active state. Pre-record before the session.
|
||||
- **Camera:** Screenflow / Camtasia. 1440×900 record → 1080p export.
|
||||
- **Callout text:** Amber ring `#E8A000`, 1s fade-in/out, positioned bottom-left at 90% opacity on semi-transparent dark background.
|
||||
- **Green success indicator:** On the `git push` moment, use a green ring pulse (`#22C55E`) for the `remote: success` line — 1.5s hold.
|
||||
- **JSON jq output:** Use `jq` with a custom `.絹` (color) filter or `--monochrome-output` to keep it clean and readable in dark theme.
|
||||
- **VO recording:** Match VO session with AGENTS.md storyboard — use the same voice talent and consistent pacing.
|
||||
- **Music:** No music. Consider a subtle single-tone click at 0:04 (repo attached) and 0:54 (end card) for visual rhythm.
|
||||
- **Speed:** The curl/git clone/push sequence should run at 2x playback in moments 1–4 for pacing. VO rides over the cuts.
|
||||
@ -1,178 +0,0 @@
|
||||
# Phase 30 Launch — DevRel Asset Inventory
|
||||
|
||||
> **Cycle:** Marketing work cycle — offline asset tracking
|
||||
> **Status:** Master list, update as content ships
|
||||
> **Branch:** `content/blog/memory-backup-restore` (9 commits ahead of main; push blocked on GH_TOKEN)
|
||||
|
||||
Use this as the source of truth for what DevRel has produced this cycle, what's pending review, what's staged, and what's blocked.
|
||||
|
||||
---
|
||||
|
||||
## How to Read This Doc
|
||||
|
||||
- **✅ LIVE** — published to docs site or social channels
|
||||
- **🔍 REVIEW** — written, needs eyes from Marketing Lead / Doc Specialist / Support
|
||||
- **📦 STAGED** — committed to `content/blog/memory-backup-restore`, ready to push
|
||||
- **🔒 BLOCKED** — requires action (GH_TOKEN refresh, design team screenshot, PMM asset)
|
||||
|
||||
---
|
||||
|
||||
## Blog Posts
|
||||
|
||||
| File | Status | Owner | Needs |
|
||||
|---|---|---|---|
|
||||
| `docs/blog/2026-04-20-remote-workspaces/index.md` | 📦 STAGED | DevRel | Marketing Lead final read |
|
||||
| `docs/blog/2026-04-20-chrome-devtools-mcp/index.md` | 📦 STAGED | DevRel | Technical accuracy check |
|
||||
| `docs/blog/2026-04-20-container-vs-remote/index.md` | 📦 STAGED | DevRel | Marketing Lead voice review |
|
||||
| `docs/blog/2026-04-20-secure-by-design/index.md` | 📦 STAGED | DevRel | Security Lead accuracy review |
|
||||
| `docs/blog/2026-04-17-deploy-anywhere/index.md` | ✅ pre-existing | — | — |
|
||||
|
||||
---
|
||||
|
||||
## Docs & Guides
|
||||
|
||||
| File | Status | Owner | Needs |
|
||||
|---|---|---|---|
|
||||
| `docs/guides/remote-workspaces.md` | 📦 STAGED | DevRel | Doc Specialist final review |
|
||||
| `docs/guides/same-origin-canvas-fetches.md` | 📦 STAGED | DevRel | Security Lead sign-off on `/cp/*` allowlist section |
|
||||
| `docs/guides/remote-workspaces-faq.md` | 📦 STAGED | DevRel | Marketing Lead (voice), Doc Specialist (technical), Support (troubleshooting) |
|
||||
| `docs/marketing/seo/keywords.md` | 🔍 REVIEW | SEO Analyst | SEO Analyst to surface and publish |
|
||||
|
||||
---
|
||||
|
||||
## Marketing / Social Copy
|
||||
|
||||
| File | Status | Owner | Needs |
|
||||
|---|---|---|---|
|
||||
| `marketing/devrel/phase30-social-copy.md` | 📦 STAGED | DevRel | PMM or CM to schedule posts (X all 4 versions, LinkedIn) |
|
||||
| `marketing/devrel/chrome-devtools-mcp-social-copy.md` | 📦 STAGED | DevRel | CM to schedule alongside blog post |
|
||||
| `marketing/copy/phase30-landing-copy.md` | 📦 STAGED | DevRel | Marketing Lead brand voice review |
|
||||
|
||||
---
|
||||
|
||||
## Demos — Working Demos + Screencasts
|
||||
|
||||
### Demo 1: AGENTS.md Auto-Generation (#1172, PR #763)
|
||||
|
||||
| Asset | Status | Notes |
|
||||
|---|---|---|
|
||||
| `marketing/demos/agents-md-auto-generation/README.md` | 📦 STAGED | 4 scenario working demo + 1-min screencast outline + TTS script |
|
||||
| `marketing/demos/agents-md-auto-generation/storyboard.md` | 📦 STAGED | Full production storyboard (camera, VO pacing, highlights, 4 moments) |
|
||||
| `marketing/demos/agents-md-auto-generation/narration.mp3` | 📦 STAGED | 30s TTS (en-US-AriaNeural) |
|
||||
| Repo link | 📦 STAGED | `workspace/agents_md.py` on `molecule-core` main |
|
||||
| **GitHub issue comment** | 🔒 BLOCKED | `comment-1172.json` staged; `post-issue-comments.sh` ready; GH_TOKEN must refresh |
|
||||
| ASSET: Canvas screenshot (pm-agent + researcher) | 🔒 BLOCKED | Design team needs live canvas + ngrok access |
|
||||
|
||||
### Demo 2: Cloudflare Artifacts (#1173, PR #641)
|
||||
|
||||
| Asset | Status | Notes |
|
||||
|---|---|---|
|
||||
| `marketing/demos/cloudflare-artifacts/README.md` | 📦 STAGED | 5 scenario working demo + 1-min screencast outline + TTS script |
|
||||
| `marketing/demos/cloudflare-artifacts/storyboard.md` | 📦 STAGED | Full production storyboard (camera, VO pacing, green success pulse, 4 moments) |
|
||||
| `marketing/demos/cloudflare-artifacts/narration.mp3` | 📦 STAGED | 30s TTS (en-US-AriaNeural) |
|
||||
| Repo link | 📦 STAGED | `workspace-server/internal/handlers/artifacts.go` on `molecule-core` main |
|
||||
| **GitHub issue comment** | 🔒 BLOCKED | `comment-1173.json` staged; GH_TOKEN must refresh |
|
||||
|
||||
---
|
||||
|
||||
## Audio / Video Assets
|
||||
|
||||
| File | Duration | Voice | Status | Needs |
|
||||
|---|---|---|---|---|
|
||||
| `marketing/audio/phase30-announce.mp3` | ~30s | en-US-AriaNeural | 📦 STAGED | CM to pair with social copy |
|
||||
| `marketing/audio/phase30-video-vo.mp3` | ~67–75s | en-US-AriaNeural | 📦 STAGED | Video Editor to lock against timeline |
|
||||
| `marketing/audio/phase30-video-vo-mandarin.mp3` | ~70s | zh-CN-XiaoxiaoNeural | 📦 STAGED | PMM to confirm authoritative script |
|
||||
| `marketing/audio/chrome-devtools-mcp-summary.mp3` | ~77s | en-US-AriaNeural (+30%) | 📦 STAGED | Slightly over 65–75s target; trim 2s if needed |
|
||||
| `marketing/audio/quickstart-audio.mp3` | ~67–75s | en-US-AriaNeural | 📦 STAGED | CM to pair with quickstart guide |
|
||||
| `marketing/audio/phase30-video-vo-mandarin-script.txt` | 188 chars | — | 📦 STAGED | PMM to confirm path + authoritative script |
|
||||
|
||||
---
|
||||
|
||||
## Visual Assets
|
||||
|
||||
| File | Status | Notes |
|
||||
|---|---|---|
|
||||
| `marketing/assets/phase30-fleet-diagram.png` | 📦 STAGED | 126KB matplotlib; dark navy, purple REMOTE, blue platform; design notes in `phase30-fleet-diagram-notes.txt` |
|
||||
| ASSET: Canvas screenshot (remote badge) | 🔒 BLOCKED | Design team needs live canvas + ngrok |
|
||||
| ASSET: `phase30-canvas-remote-badge.png` | 🔒 BLOCKED | Same blocker as above |
|
||||
|
||||
---
|
||||
|
||||
## Launch Execution
|
||||
|
||||
| File | Status | Notes |
|
||||
|---|---|---|
|
||||
| `marketing/drip/post-push-checklist.md` | 📦 STAGED | 6-phase sequencing: push → PR → docs → social → email → verify |
|
||||
| `marketing/drip/phase30-email-drip.md` | 📦 STAGED | 3-email CRM sequence (Day 1/3–4/7) with placeholders |
|
||||
| `marketing/community/hacker-news-launch.md` | 📦 STAGED | HN guide, 3 title options, post body template, comment responses |
|
||||
| `marketing/community/community-announcements.md` | 📦 STAGED | Discord + Slack + Reddit copy, channel-by-channel |
|
||||
|
||||
## Sales Enablement
|
||||
|
||||
| File | Status | Notes |
|
||||
|---|---|---|
|
||||
| `marketing/sales/phase30-sales-enablement.md` | 📦 STAGED | 4 competitive battlecards, 5 objection handlers, 3-min demo script |
|
||||
| `marketing/sales/phase30-one-pager.md` | 📦 STAGED | 1-page PDF-ready asset with feature table, pricing, quick-start |
|
||||
|
||||
---
|
||||
|
||||
## Scripts & Helpers
|
||||
|
||||
| File | Status | Notes |
|
||||
|---|---|---|
|
||||
| `marketing/demos/post-issue-comments.sh` | 📦 STAGED | curl-based helper to post comments to #1172 + #1173 once GH_TOKEN refreshes |
|
||||
| `comment-1172.json` | 📦 STAGED | Raw JSON body for #1172 comment |
|
||||
| `comment-1173.json` | 📦 STAGED | Raw JSON body for #1173 comment |
|
||||
|
||||
---
|
||||
|
||||
## Pending Actions by Owner
|
||||
|
||||
### DevRel (this workspace)
|
||||
- [ ] None currently — all deliverables committed
|
||||
|
||||
### Marketing Lead
|
||||
- [ ] Review `docs/guides/remote-workspaces-faq.md` — voice + technical accuracy
|
||||
- [ ] Review `marketing/copy/phase30-landing-copy.md` — brand voice
|
||||
- [ ] Review `docs/blog/2026-04-20-remote-workspaces/index.md` — final read before publish
|
||||
- [ ] Post `phase30-social-copy.md` — schedule X posts (all 4 versions) + LinkedIn post
|
||||
- [ ] Post `chrome-devtools-mcp-social-copy.md` — schedule alongside blog post
|
||||
- [ ] Schedule 3-email drip sequence after blog post is live
|
||||
- [ ] Submit or assign Hacker News post (see `hacker-news-launch.md`)
|
||||
|
||||
### Community Manager
|
||||
- [ ] Schedule social copy posts (see Marketing Lead row)
|
||||
- [ ] Post community announcements per `community-announcements.md`
|
||||
|
||||
### Video Editor
|
||||
- [ ] Begin Phase 30 video assembly per `phase30-video-production.md`
|
||||
|
||||
### Sales / Solutions Engineering
|
||||
- [ ] Review `phase30-sales-enablement.md` — customize talk tracks to seller voice
|
||||
- [ ] Review `phase30-one-pager.md` — replace link placeholders before distributing
|
||||
|
||||
### PMM
|
||||
- [ ] Confirm authoritative path for `marketing/social/phase30-launch-plan.md` (currently confirmed missing from internal repo)
|
||||
- [ ] Confirm `phase30-video-vo-mandarin-script.txt` is the right script (188-char DevRel-authored placeholder)
|
||||
- [ ] Supply canvas screenshot (`phase30-canvas-remote-badge.png`) using live canvas + ngrok
|
||||
|
||||
### Design Team
|
||||
- [ ] Capture canvas screenshot showing REMOTE badge on workspace card
|
||||
- [ ] Refine `phase30-fleet-diagram.png` per `phase30-fleet-diagram-notes.txt` design checklist
|
||||
|
||||
### SEO Analyst
|
||||
- [ ] Surface and publish `docs/marketing/seo/keywords.md`
|
||||
|
||||
### Support
|
||||
- [ ] Review troubleshooting section of `docs/guides/remote-workspaces-faq.md`
|
||||
|
||||
### Security Lead
|
||||
- [ ] Review `/cp/*` allowlist section in `docs/guides/same-origin-canvas-fetches.md`
|
||||
- [ ] Review `docs/blog/2026-04-20-secure-by-design/index.md`
|
||||
|
||||
### CEO / Token Owner
|
||||
- [ ] **CRITICAL:** Refresh `GH_TOKEN` — all pushes and issue comments are blocked until this is done
|
||||
|
||||
---
|
||||
|
||||
*Maintained by DevRel. Update status columns as content ships or blockers clear.*
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 34 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 24 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 53 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 30 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user