Merge remote-tracking branch 'origin/main' into trig-171

This commit is contained in:
Molecule AI Core Platform Lead 2026-05-09 21:48:43 +00:00
commit ca74a9c064
23 changed files with 156 additions and 50 deletions

View File

@ -51,7 +51,7 @@ name: E2E API Smoke Test
# * Pre-pull `alpine:latest` so the platform-server's provisioner
# (`internal/handlers/container_files.go`) can stand up its
# ephemeral token-write helper without a daemon.io round-trip.
# * Create `molecule-monorepo-net` bridge network if missing so the
# * Create `molecule-core-net` bridge network if missing so the
# provisioner's container.HostConfig {NetworkMode: ...} attach
# succeeds.
# Item #1 (timeouts) — evidence on recent runs (77/3191, ae/4270, 0e/
@ -163,12 +163,12 @@ jobs:
# when the image is already present.
docker pull alpine:latest >/dev/null
# Provisioner attaches workspace containers to
# molecule-monorepo-net (workspace-server/internal/provisioner/
# molecule-core-net (workspace-server/internal/provisioner/
# provisioner.go::DefaultNetwork). The bridge already exists on
# the operator host's docker daemon — `network create` is
# idempotent via `|| true`.
docker network create molecule-monorepo-net >/dev/null 2>&1 || true
echo "alpine:latest pre-pulled; molecule-monorepo-net ensured."
docker network create molecule-core-net >/dev/null 2>&1 || true
echo "alpine:latest pre-pulled; molecule-core-net ensured."
- name: Start Postgres (docker)
if: needs.detect-changes.outputs.api == 'true'
run: |

View File

@ -34,7 +34,7 @@ name: Handlers Postgres Integration
# So we sidestep `services:` entirely. The job container still uses
# host-net (inherited from runner config; required for cache server
# discovery on the bridge IP 172.18.0.17:42631). We launch a sibling
# postgres on the existing `molecule-monorepo-net` bridge with a
# postgres on the existing `molecule-core-net` bridge with a
# UNIQUE name per run — `pg-handlers-${RUN_ID}-${RUN_ATTEMPT}` — and
# read its bridge IP via `docker inspect`. A host-net job container
# can reach a bridge-net container directly via the bridge IP (verified
@ -44,7 +44,7 @@ name: Handlers Postgres Integration
# + No host-port collision; N parallel runs share the bridge cleanly
# + `if: always()` cleanup runs even on test-step failure
# - One more step in the workflow (+~3 lines)
# - Requires `molecule-monorepo-net` to exist on the operator host
# - Requires `molecule-core-net` to exist on the operator host
# (it does; declared in docker-compose.yml + docker-compose.infra.yml)
#
# Class B Hongming-owned CICD red sweep, 2026-05-08.
@ -96,7 +96,7 @@ jobs:
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
# Bridge network already exists on the operator host (declared
# in docker-compose.yml + docker-compose.infra.yml).
PG_NETWORK: molecule-monorepo-net
PG_NETWORK: molecule-core-net
defaults:
run:
working-directory: workspace-server

View File

@ -284,7 +284,7 @@ cp .env.example .env
./infra/scripts/setup.sh
# Boots Postgres (:5432), Redis (:6379), Langfuse (:3001),
# and Temporal (:7233 gRPC, :8233 UI) on the shared
# `molecule-monorepo-net` Docker network. Temporal runs with
# `molecule-core-net` Docker network. Temporal runs with
# no auth on localhost — dev-only; production must gate it.
#
# Also populates the template/plugin registry by cloning every repo

View File

@ -283,7 +283,7 @@ cp .env.example .env
./infra/scripts/setup.sh
# 启动 Postgres (:5432)、Redis (:6379)、Langfuse (:3001)
# 以及 Temporal (:7233 gRPC, :8233 UI),全部挂在共享的
# `molecule-monorepo-net` Docker 网络上。Temporal 默认无鉴权,
# `molecule-core-net` Docker 网络上。Temporal 默认无鉴权,
# 仅用于本地开发;生产环境必须加 mTLS / API Key。
#
# 同时会根据 manifest.json 拉取所有模板/插件仓库到

View File

@ -7,6 +7,22 @@ export default defineConfig({
test: {
environment: 'node',
exclude: ['e2e/**', 'node_modules/**', '**/dist/**'],
// Issue #22 / vitest pool investigation:
//
// The forks pool spawns one Node.js worker per concurrent slot.
// Each jsdom-environment worker bootstraps a full DOM (~30-50 MB resident
// set) at cold-start. With the default maxWorkers derived from CPU
// count, multiple jsdom workers can start simultaneously, exhausting
// memory on the 2-CPU Gitea Actions runner and causing pool workers to
// fail to respond with "[vitest-pool]: Timeout starting … runner."
//
// Fix: cap maxWorkers at 1 so only one worker is alive at any time.
// Tests still run in parallel within that single worker's process (via
// node's EventLoop) — this is the same parallelism as the `threads`
// pool but without the per-worker jsdom cold-start overhead. 51 test
// files that previously took 5070 s with 5 failures now run
// sequentially through one worker, eliminating the memory spike.
maxWorkers: 1,
// CI-conditional test timeout (issue #96).
//
// Vitest's 5000ms default is too tight for the first test in any

View File

@ -119,7 +119,7 @@ services:
networks:
default:
name: molecule-monorepo-net
name: molecule-core-net
external: true
volumes:

View File

@ -1,3 +1,7 @@
# Include infra services (Temporal, Langfuse) so `docker compose up` starts the full stack.
include:
- docker-compose.infra.yml
services:
# --- Infrastructure ---
postgres:
@ -12,7 +16,7 @@ services:
volumes:
- pgdata:/var/lib/postgresql/data
networks:
- molecule-monorepo-net
- molecule-core-net
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-dev}"]
@ -40,7 +44,7 @@ services:
psql -h postgres -U "$${POSTGRES_USER}" -d postgres -c "CREATE DATABASE langfuse"
fi
networks:
- molecule-monorepo-net
- molecule-core-net
redis:
image: redis:7-alpine
@ -50,7 +54,7 @@ services:
volumes:
- redisdata:/data
networks:
- molecule-monorepo-net
- molecule-core-net
restart: unless-stopped
healthcheck:
test: ["CMD", "redis-cli", "ping"]
@ -68,7 +72,7 @@ services:
volumes:
- clickhousedata:/var/lib/clickhouse
networks:
- molecule-monorepo-net
- molecule-core-net
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://127.0.0.1:8123/ping || exit 1"]
interval: 5s
@ -97,7 +101,7 @@ services:
ports:
- "3001:3000"
networks:
- molecule-monorepo-net
- molecule-core-net
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:3000/api/public/health || exit 1"]
interval: 10s
@ -217,7 +221,7 @@ services:
ports:
- "${PLATFORM_PUBLISH_PORT:-8080}:${PLATFORM_PORT:-8080}"
networks:
- molecule-monorepo-net
- molecule-core-net
restart: unless-stopped
healthcheck:
# Plain GET — `--spider` would issue HEAD, which returns 404 because
@ -258,7 +262,7 @@ services:
ports:
- "${CANVAS_PUBLISH_PORT:-3000}:${CANVAS_PORT:-3000}"
networks:
- molecule-monorepo-net
- molecule-core-net
healthcheck:
test: ["CMD-SHELL", "wget -qO /dev/null --tries=1 http://127.0.0.1:${CANVAS_PORT:-3000} || exit 1"]
interval: 10s
@ -291,7 +295,7 @@ services:
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY:-sk-molecule}
networks:
- molecule-monorepo-net
- molecule-core-net
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:4000/health || exit 1"]
@ -316,7 +320,7 @@ services:
volumes:
- ollamadata:/root/.ollama
networks:
- molecule-monorepo-net
- molecule-core-net
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "ollama list || exit 1"]
@ -326,8 +330,8 @@ services:
start_period: 20s
networks:
molecule-monorepo-net:
name: molecule-monorepo-net
molecule-core-net:
name: molecule-core-net
volumes:
pgdata:

View File

@ -67,7 +67,7 @@ On-demand fits naturally with how agents work — an agent only needs to know ab
This is acceptable for MVP because:
- All workspaces are provisioned by the same platform on trusted infrastructure
- Docker network isolation (`molecule-monorepo-net`) limits who can reach workspace endpoints
- Docker network isolation (`molecule-core-net`) limits who can reach workspace endpoints
- The tool is self-hosted — the operator controls the network
**Known gap:** Once workspace A caches workspace B's URL, nothing stops A from calling B directly even after the hierarchy changes and A is no longer supposed to reach B. The cached URL remains valid until the container is restarted or the URL changes.

View File

@ -124,7 +124,7 @@ Six runtime adapters ship production-ready on `main`: LangGraph, DeepAgents, Cla
| Platform ↔ Redis | TCP | Ephemeral state (liveness TTL), caching, pub/sub |
| Workspace ↔ Workspace | HTTP (A2A JSON-RPC 2.0) | Direct peer-to-peer, **platform not in data path** |
| Workspace → Langfuse | HTTP | Automatic OpenTelemetry tracing |
| Docker Network | `molecule-monorepo-net` | Internal-only by default, no exposed DB/Redis ports |
| Docker Network | `molecule-core-net` | Internal-only by default, no exposed DB/Redis ports |
### Core Components
@ -465,7 +465,7 @@ Unknown tier values default to T2 for safety. Applied via `provisioner.ApplyTier
### Docker Networking
- All containers join `molecule-monorepo-net` private network
- All containers join `molecule-core-net` private network
- Container naming: `ws-{workspace_id[:12]}`
- Ephemeral host port binding: `127.0.0.1:0→8000/tcp`

View File

@ -19,7 +19,7 @@ The provisioner is the platform component that deploys workspace containers and
## Docker Networking (Tier 1-3, Tier 4 uses host)
All workspace containers join the `molecule-monorepo-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
All workspace containers join the `molecule-core-net` Docker network. Containers are named `ws-{id[:12]}` (first 12 chars of workspace UUID). Two exported helpers in `provisioner` package provide the canonical naming:
- `provisioner.ContainerName(workspaceID)``ws-{id[:12]}`
- `provisioner.InternalURL(workspaceID)``http://ws-{id[:12]}:8000`
@ -38,7 +38,7 @@ This URL is pre-stored in both Postgres and Redis before the agent registers. Wh
**Why not use Docker-internal URLs?** In local dev, the platform runs on the host (not in Docker), so it cannot resolve Docker container hostnames. The ephemeral port mapping lets the A2A proxy reach agents via localhost. In production (platform in Docker), the Docker-internal URL (`http://ws-{id}:8000`) would work directly.
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-monorepo-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
**Workspace-to-workspace discovery:** When a workspace discovers another workspace (via `X-Workspace-ID` header on `GET /registry/discover/:id`), the platform returns the Docker-internal URL (`http://ws-{first12chars}:8000`) so containers can reach each other directly on `molecule-core-net`. The internal URL is cached in Redis at provision time and also synthesized as a fallback if the cache misses (only for online/degraded workspaces).
For external HTTPS access (multi-host mode), Nginx on the host handles TLS termination and proxies to the container.

View File

@ -73,7 +73,7 @@ These are applied after CORS middleware on every response.
## 14. No Exposed Database Ports
Postgres and Redis must not expose host ports. They communicate exclusively over the internal Docker network (`molecule-monorepo-net`). Use `docker compose exec` for direct access during development.
Postgres and Redis must not expose host ports. They communicate exclusively over the internal Docker network (`molecule-core-net`). Use `docker compose exec` for direct access during development.
## Related Docs

View File

@ -73,19 +73,19 @@ runner-wide setting, not per-job. Source: gitea/act_runner config docs
Flipping the global `container.network` to `bridge` would break every
other workflow in the repo (cache server discovery,
`molecule-monorepo-net` peer access during integration tests, etc.) —
`molecule-core-net` peer access during integration tests, etc.) —
unacceptable blast radius for a per-test bug.
## Fix shape
`handlers-postgres-integration.yml` no longer uses `services: postgres:`.
It launches a sibling postgres container manually on the existing
`molecule-monorepo-net` bridge network with a per-run unique name:
`molecule-core-net` bridge network with a per-run unique name:
```yaml
env:
PG_NAME: pg-handlers-${{ github.run_id }}-${{ github.run_attempt }}
PG_NETWORK: molecule-monorepo-net
PG_NETWORK: molecule-core-net
steps:
- name: Start sibling Postgres on bridge network
@ -117,7 +117,7 @@ host-network runner config. Translate using this same pattern:
1. Drop the `services:` block.
2. Use `${{ github.run_id }}-${{ github.run_attempt }}` for unique
container name.
3. Launch on `molecule-monorepo-net` (already trusted bridge in
3. Launch on `molecule-core-net` (already trusted bridge in
`docker-compose.infra.yml`).
4. Read back the bridge IP via `docker inspect` and export as a step env.
5. `if: always()` cleanup step at the end.
@ -131,7 +131,7 @@ in one place.
- Issue #88 (closed by #92): localhost → 127.0.0.1 fix that unmasked
this collision; the IPv6 fix is correct, port collision is the new
layer.
- Issue #94 created `molecule-monorepo-net` + `alpine:latest` as
- Issue #94 created `molecule-core-net` + `alpine:latest` as
prereqs.
- Saved memory `feedback_act_runner_github_server_url` documents
another act_runner-vs-GHA divergence (server URL).

View File

@ -5,7 +5,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
echo "==> Ensuring shared docker network exists..."
docker network create molecule-monorepo-net 2>/dev/null || true
docker network create molecule-core-net 2>/dev/null || true
# Populate the template / plugin registry.
# workspace-configs-templates/, org-templates/, and plugins/ are intentionally

View File

@ -24,7 +24,7 @@ echo "=== NUKE ==="
docker compose -f "$ROOT/docker-compose.yml" down -v 2>/dev/null || true
docker ps -a --format "{{.Names}}" | grep "^ws-" | xargs -r docker rm -f 2>/dev/null || true
docker volume ls --format "{{.Name}}" | grep "^ws-" | xargs -r docker volume rm 2>/dev/null || true
docker network rm molecule-monorepo-net 2>/dev/null || true
docker network rm molecule-core-net 2>/dev/null || true
echo " cleaned"
echo "=== POPULATE MANIFEST DIRS ==="

View File

@ -25,6 +25,35 @@ import (
"github.com/Molecule-AI/molecule-monorepo/platform/internal/registry"
"github.com/google/uuid"
)
// insertMCPDelegationRow writes a delegation activity row so the canvas
// Agent Comms tab can show the task text for MCP-initiated delegations.
// Mirrors insertDelegationRow (delegation.go) for the MCP tool path.
func insertMCPDelegationRow(ctx context.Context, db *sql.DB, workspaceID, targetID, delegationID, task string) error {
taskJSON, _ := json.Marshal(map[string]interface{}{
"task": task,
"delegation_id": delegationID,
})
_, err := db.ExecContext(ctx, `
INSERT INTO activity_logs (workspace_id, activity_type, method, source_id, target_id, summary, request_body, status)
VALUES ($1, 'delegation', 'delegate', $2, $3, $4, $5::jsonb, 'pending')
`, workspaceID, workspaceID, targetID, "Delegating to "+targetID, string(taskJSON))
return err
}
// updateMCPDelegationStatus updates a delegation activity row's status.
// Mirrors updateDelegationStatus (delegation.go) for the MCP tool path.
func updateMCPDelegationStatus(ctx context.Context, db *sql.DB, workspaceID, delegationID, status, errorDetail string) {
if _, err := db.ExecContext(ctx, `
UPDATE activity_logs
SET status = $1, error_detail = CASE WHEN $2 = '' THEN error_detail ELSE $2 END
WHERE workspace_id = $3
AND method = 'delegate'
AND request_body->>'delegation_id' = $4
`, status, errorDetail, workspaceID, delegationID); err != nil {
log.Printf("MCP Delegation %s: status update failed: %v", delegationID, err)
}
}
// ─────────────────────────────────────────────────────────────────────────────
// Tool implementations
// ─────────────────────────────────────────────────────────────────────────────
@ -154,6 +183,13 @@ func (h *MCPHandler) toolDelegateTask(ctx context.Context, callerID string, args
return "", fmt.Errorf("workspace %s is not authorised to communicate with %s", callerID, targetID)
}
// Issue #158: write delegation row so canvas Agent Comms tab shows the task text.
delegationID := uuid.New().String()
if err := insertMCPDelegationRow(ctx, h.database, callerID, targetID, delegationID, task); err != nil {
log.Printf("MCP delegate_task: failed to record delegation row: %v", err)
// Non-fatal: still make the A2A call even if activity log write fails.
}
agentURL, err := mcpResolveURL(ctx, h.database, targetID)
if err != nil {
return "", err
@ -197,10 +233,16 @@ func (h *MCPHandler) toolDelegateTask(ctx context.Context, callerID string, args
resp, err := http.DefaultClient.Do(httpReq)
if err != nil {
updateMCPDelegationStatus(ctx, h.database, callerID, delegationID, "failed", err.Error())
return "", fmt.Errorf("A2A call failed: %w", err)
}
defer func() { _ = resp.Body.Close() }()
// A 200/500 from the peer still means the call was dispatched — only
// network errors are truly "failed". Status 'dispatched' is correct for
// any HTTP response (peer's A2A layer handles the actual processing).
updateMCPDelegationStatus(ctx, h.database, callerID, delegationID, "dispatched", "")
body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return "", fmt.Errorf("failed to read response: %w", err)
@ -223,7 +265,16 @@ func (h *MCPHandler) toolDelegateTaskAsync(ctx context.Context, callerID string,
return "", fmt.Errorf("workspace %s is not authorised to communicate with %s", callerID, targetID)
}
taskID := uuid.New().String()
delegationID := uuid.New().String()
// Issue #158: write delegation row so canvas Agent Comms tab shows the task text.
// Insert with 'dispatched' status since the goroutine won't update it.
if err := insertMCPDelegationRow(ctx, h.database, callerID, targetID, delegationID, task); err != nil {
log.Printf("MCP delegate_task_async: failed to record delegation row: %v", err)
// Non-fatal: still fire the A2A call.
} else {
updateMCPDelegationStatus(ctx, h.database, callerID, delegationID, "dispatched", "")
}
// Fire and forget in a detached goroutine. Use a background context so
// the call is not cancelled when the HTTP request completes.
@ -244,7 +295,7 @@ func (h *MCPHandler) toolDelegateTaskAsync(ctx context.Context, callerID string,
a2aBody, _ := json.Marshal(map[string]interface{}{
"jsonrpc": "2.0",
"id": taskID,
"id": delegationID,
"method": "message/send",
"params": map[string]interface{}{
"message": map[string]interface{}{
@ -273,7 +324,7 @@ func (h *MCPHandler) toolDelegateTaskAsync(ctx context.Context, callerID string,
_, _ = io.Copy(io.Discard, resp.Body)
}()
return fmt.Sprintf(`{"task_id":%q,"status":"dispatched","target_id":%q}`, taskID, targetID), nil
return fmt.Sprintf(`{"task_id":%q,"status":"dispatched","target_id":%q}`, delegationID, targetID), nil
}
func (h *MCPHandler) toolCheckTaskStatus(ctx context.Context, callerID string, args map[string]interface{}) (string, error) {

View File

@ -607,7 +607,16 @@ func (h *OrgHandler) Import(c *gin.Context) {
orgFile := filepath.Join(orgBaseDir, "org.yaml")
data, err := os.ReadFile(orgFile)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("org template not found: %s", body.Dir)})
// Audit 2026-05-09 (Core-Security): the prior message echoed
// the user-supplied `body.Dir` verbatim. Path traversal is
// already blocked by resolveInsideRoot above, but echoing
// the raw input back lets a client probe for the existence
// of relative paths inside h.orgDir (a 404 with the input
// vs. a 400 from resolveInsideRoot is itself a signal).
// Drop the input from the message; log full context server-
// side via the resolved path for operator triage.
log.Printf("OrgImport: failed to read %s (requested dir=%q): %v", orgFile, body.Dir, err)
c.JSON(http.StatusNotFound, gin.H{"error": "org template not found"})
return
}
// Expand !include directives before unmarshal. Splits org.yaml

View File

@ -134,7 +134,7 @@ func (h *TranscriptHandler) Get(c *gin.Context) {
// - block cloud metadata endpoints (IMDS, GCP, Azure)
// - block link-local IPs (169.254/16 IPv4, fe80::/10 IPv6)
// - loopback is allowed — local dev runs workspaces on 127.0.0.1
// - Docker internal hostnames (host.docker.internal, *.molecule-monorepo-net)
// - Docker internal hostnames (host.docker.internal, *.molecule-core-net)
// are allowed; the whole threat model assumes the platform already
// trusts peers on that network
func validateWorkspaceURL(u *url.URL) error {

View File

@ -331,8 +331,14 @@ func (h *WorkspaceHandler) Delete(c *gin.Context) {
// stay in this handler.
descendantIDs, stopErrs, err := h.CascadeDelete(ctx, id)
if err != nil {
// Audit 2026-05-09 (Core-Security): raw `err.Error()` here was
// exposed to HTTP clients verbatim, including wrapped lib/pq
// driver strings that disclose schema column names + index
// hints. Log full error server-side; return a sanitized message
// to the client. Operators trace via the log line below using
// the workspace id.
log.Printf("Delete: CascadeDelete(%s) failed: %v", id, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error processing delete request"})
return
}
allIDs := append([]string{id}, descendantIDs...)

View File

@ -173,7 +173,7 @@ func (h *WorkspaceHandler) provisionWorkspaceOpts(workspaceID, templatePath stri
log.Printf("Provisioner: failed to cache URL for %s: %v", workspaceID, cacheErr)
}
// Also cache the Docker-internal URL for workspace-to-workspace discovery.
// Containers on molecule-monorepo-net can reach each other by container name.
// Containers on molecule-core-net can reach each other by container name.
internalURL := provisioner.InternalURL(workspaceID)
if cacheErr := db.CacheInternalURL(ctx, workspaceID, internalURL); cacheErr != nil {
log.Printf("Provisioner: failed to cache internal URL for %s: %v", workspaceID, cacheErr)

View File

@ -67,7 +67,7 @@ var DefaultImage = RuntimeImage(defaultRuntime)
const (
// DefaultNetwork is the Docker network workspaces join.
DefaultNetwork = "molecule-monorepo-net"
DefaultNetwork = "molecule-core-net"
// DefaultPort is the port the A2A server listens on inside the container.
DefaultPort = "8000"
@ -405,7 +405,7 @@ func (p *Provisioner) Start(ctx context.Context, cfg WorkspaceConfig) (string, e
// Apply tier-based container configuration
ApplyTierConfig(hostCfg, cfg, configMount, name)
// Network config — join molecule-monorepo-net with container name as alias
// Network config — join molecule-core-net with container name as alias
networkCfg := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
DefaultNetwork: {

View File

@ -43,11 +43,29 @@ if [ "$(id -u)" = "0" ]; then
ln -sfn /root/.claude/sessions /home/agent/.claude/sessions
fi
# --- Per-persona git identity (closes molecule-core#155) ---
# Without this, every team commit lands with an empty author and Gitea
# attributes the work to the founder PAT instead of the persona that
# actually authored it. Same fingerprint that got us suspended on GitHub
# 2026-05-06. GITEA_USER is injected by the provisioner from the
# workspace_secrets table; bot.moleculesai.app is the agent-only domain
# so commits are clearly distinguishable from human authors.
if [ -n "${GITEA_USER:-}" ]; then
git config --global user.name "${GITEA_USER}"
git config --global user.email "${GITEA_USER}@bot.moleculesai.app"
fi
# --- GitHub credential helper setup (issue #547 / #613) ---
# Configure git to use the molecule credential helper for github.com.
# This runs as root so the global gitconfig is written before we drop
# to agent. The helper fetches fresh GitHub App installation tokens
# from the platform API, with caching and env-var fallback.
#
# NOTE: post-suspension (2026-05-06), github.com/Molecule-AI is gone;
# the helper's platform endpoint also 500s (internal#187). The helper
# block is kept for legacy boxes that still have a working token chain;
# post-suspension provisioner injects GITEA_TOKEN directly so this
# path's failure is non-fatal. Full removal tracked under #171.
if [ -x /app/scripts/molecule-git-token-helper.sh ]; then
# Set credential helper for github.com only (not all hosts).
# The '!' prefix tells git to run the command as a shell command.
@ -55,11 +73,13 @@ if [ "$(id -u)" = "0" ]; then
"!/app/scripts/molecule-git-token-helper.sh"
# Disable other credential helpers for github.com to avoid conflicts.
git config --global "credential.https://github.com.useHttpPath" true
# Move gitconfig to agent's home so it takes effect after gosu.
if [ -f /root/.gitconfig ]; then
cp /root/.gitconfig /home/agent/.gitconfig
chown agent:agent /home/agent/.gitconfig
fi
fi
# Move gitconfig to agent's home so it takes effect after gosu —
# done unconditionally so the per-persona identity survives the drop
# even when the github.com helper block is skipped.
if [ -f /root/.gitconfig ]; then
cp /root/.gitconfig /home/agent/.gitconfig
chown agent:agent /home/agent/.gitconfig
fi
# Create the token cache directory for the agent user.
mkdir -p /home/agent/.molecule-token-cache

View File

@ -434,7 +434,7 @@ async def main(): # pragma: no cover
async def _transcript_handler(request):
# Require workspace bearer token — the same token issued at registration
# and stored in /configs/.auth_token. Any container on molecule-monorepo-net
# and stored in /configs/.auth_token. Any container on molecule-core-net
# could otherwise read the full session log. Closes #287.
#
# #328: fail CLOSED when the token file is unavailable. get_token()

View File

@ -3,7 +3,7 @@ the workspace auth token is not yet on disk.
Prior behaviour (regressed in #287): `if expected:` skipped the auth
check when `get_token()` returned None, so any container on
`molecule-monorepo-net` could read the full session log during the
`molecule-core-net` could read the full session log during the
bootstrap window. The fix lifts the guard into transcript_auth.py for
testability.
"""