Compare commits

..

No commits in common. "main" and "fix/files-tab-test-missing-helper" have entirely different histories.

145 changed files with 917 additions and 10097 deletions

View File

@ -1 +0,0 @@
refire:1778784369

View File

@ -203,17 +203,12 @@ def ci_jobs_all(ci_doc: dict) -> set[str]:
def ci_job_names(ci_doc: dict) -> set[str]:
"""Set of job keys in ci.yml MINUS the sentinel itself MINUS jobs
whose `if:` gates on `github.event_name` or `github.ref` (those are
event-scoped and can legitimately be `skipped` for a given trigger;
if we required them under the sentinel `needs:`, every PR-only job
whose `if:` gates on `github.event_name` (those are event-scoped
and can legitimately be `skipped` for a given trigger; if we
required them under the sentinel `needs:`, every PR-only job
would be `skipped` on push and the sentinel would interpret
`skipped != success` as failure). RFC §4 spec.
`github.ref` is the companion gate for jobs that run only on direct
pushes to specific branches (e.g. `github.ref == 'refs/heads/main'`).
These never execute in a PR context, so flagging them as missing
from `all-required.needs:` is a false positive (mc#958 / mc#959).
Used for F1 (jobs missing from sentinel needs). NOT used for F1b
(typos in needs) see `ci_jobs_all` for that."""
jobs = ci_doc.get("jobs")
@ -226,9 +221,7 @@ def ci_job_names(ci_doc: dict) -> set[str]:
continue
if isinstance(v, dict):
gate = v.get("if")
if isinstance(gate, str) and (
"github.event_name" in gate or "github.ref" in gate
):
if isinstance(gate, str) and "github.event_name" in gate:
continue
names.add(k)
return names

View File

@ -47,15 +47,6 @@ REQUIRED_CONTEXTS_RAW = _env(
"sop-checklist / all-items-acked (pull_request)"
),
)
# Required contexts for push (main/staging) runs. The push CI uses the same
# aggregator names with " (push)" suffix. Checking these explicitly instead of
# the combined state avoids false-pause when non-blocking jobs (e.g. Platform
# Go with continue-on-error: true due to mc#774) have failed — their failures
# pollute the combined state but do not block merges.
PUSH_REQUIRED_CONTEXTS_RAW = _env(
"PUSH_REQUIRED_CONTEXTS",
default="CI / all-required (push)",
)
OWNER, NAME = (REPO.split("/", 1) + [""])[:2] if REPO else ("", "")
API = f"https://{GITEA_HOST}/api/v1" if GITEA_HOST else ""
@ -127,24 +118,16 @@ def required_contexts(raw: str) -> list[str]:
return [part.strip() for part in raw.split(",") if part.strip()]
def push_required_contexts() -> list[str]:
"""Required contexts for push (branch) CI runs. See PUSH_REQUIRED_CONTEXTS_RAW."""
return required_contexts(PUSH_REQUIRED_CONTEXTS_RAW)
def status_state(status: dict) -> str:
return str(status.get("status") or status.get("state") or "").lower()
def latest_statuses_by_context(statuses: list[dict]) -> dict[str, dict]:
# Gitea /statuses endpoint returns entries in ascending id order (oldest
# first). We need the LAST occurrence of each context, so iterate in
# reverse to prefer newer entries.
latest: dict[str, dict] = {}
for status in reversed(statuses):
for status in statuses:
context = status.get("context")
if isinstance(context, str):
latest[context] = status # overwrite: reverse order → newest wins
if isinstance(context, str) and context not in latest:
latest[context] = status
return latest
@ -210,23 +193,16 @@ def evaluate_merge_readiness(
required_contexts: list[str],
pr_has_current_base: bool,
) -> MergeDecision:
# Check push-required contexts explicitly instead of combined state.
# Combined state can be "failure" due to non-blocking jobs
# (continue-on-error: true) that don't actually gate merges.
# CI / all-required (push) is the authoritative gate — it respects
# continue-on-error and correctly aggregates all blocking failures.
main_latest = latest_statuses_by_context(main_status.get("statuses") or [])
main_ok, main_bad = required_contexts_green(main_latest, push_required_contexts())
if not main_ok:
return MergeDecision(False, "pause", "main required contexts not green: " + ", ".join(main_bad))
main_state = str(main_status.get("state") or "").lower()
if main_state != "success":
return MergeDecision(False, "pause", f"main status is {main_state or 'missing'}")
if not pr_has_current_base:
return MergeDecision(False, "update", "PR head does not contain current main")
# Check explicit required contexts instead of combined state. Combined state
# can be "failure" due to non-blocking jobs with continue-on-error: true
# (e.g. publish-runtime-autobump/pr-validate, qa-review on stale tokens).
# The required_contexts list is the authoritative gate — it includes only
# the checks that actually block merges.
pr_state = str(pr_status.get("state") or "").lower()
if pr_state != "success":
return MergeDecision(False, "wait", f"PR combined status is {pr_state or 'missing'}")
latest = latest_statuses_by_context(pr_status.get("statuses") or [])
ok, missing_or_bad = required_contexts_green(latest, required_contexts)
if not ok:
@ -244,37 +220,10 @@ def get_branch_head(branch: str) -> str:
def get_combined_status(sha: str) -> dict:
"""Combined status + all individual statuses for `sha`.
The /status endpoint caps the `statuses` array at 30 entries (Gitea
default page size), so we fetch the full list via /statuses with a
higher limit. The combined `state` still comes from /status.
"""
_, combined = api("GET", f"/repos/{OWNER}/{NAME}/commits/{sha}/status")
if not isinstance(combined, dict):
_, body = api("GET", f"/repos/{OWNER}/{NAME}/commits/{sha}/status")
if not isinstance(body, dict):
raise ApiError(f"status for {sha} response not object")
# Fetch full statuses list; 200 covers >99% of real-world runs.
# The list is ordered ascending by id (oldest first) — callers must
# iterate in reverse to get the newest entry per context.
# Best-effort: large repos (main with 550+ statuses) may time out.
# On timeout, fall back to the statuses[] already in the combined
# response (usually 30 entries — enough for most PRs, enough for
# main's early push-required contexts).
try:
_, all_statuses = api(
"GET",
f"/repos/{OWNER}/{NAME}/commits/{sha}/statuses",
query={"limit": "50"},
)
if isinstance(all_statuses, list):
combined["statuses"] = all_statuses
except (ApiError, urllib.error.URLError, TimeoutError, OSError) as exc:
# URLError covers network-level failures (DNS, refused, timeout).
# TimeoutError and OSError cover socket-level timeouts.
sys.stderr.write(f"::warning::could not fetch full statuses list for {sha[:8]}: {exc}\n")
# Fall back to the statuses[] already in the combined response.
pass
return combined
return body
def list_queued_issues() -> list[dict]:
@ -345,12 +294,8 @@ def process_once(*, dry_run: bool = False) -> int:
contexts = required_contexts(REQUIRED_CONTEXTS_RAW)
main_sha = get_branch_head(WATCH_BRANCH)
main_status = get_combined_status(main_sha)
# Check push-required contexts explicitly instead of combined state.
# See evaluate_merge_readiness for rationale.
main_latest = latest_statuses_by_context(main_status.get("statuses") or [])
main_ok, main_bad = required_contexts_green(main_latest, push_required_contexts())
if not main_ok:
print(f"::notice::queue paused: {WATCH_BRANCH}@{main_sha[:8]} required contexts not green: {', '.join(main_bad)}")
if str(main_status.get("state") or "").lower() != "success":
print(f"::notice::queue paused: {WATCH_BRANCH}@{main_sha[:8]} is not green")
return 0
issue = choose_next_queued_issue(
@ -417,21 +362,7 @@ def main() -> int:
parser.add_argument("--dry-run", action="store_true")
args = parser.parse_args()
_require_runtime_env()
try:
return process_once(dry_run=args.dry_run)
except ApiError as exc:
# API errors (401/403/404/500) are transient for a queue tick —
# log and exit 0 so the workflow is not marked failed and the next
# tick can retry. Returning non-zero would permanently fail the
# workflow run, blocking future ticks.
sys.stderr.write(f"::error::queue API error: {exc}\n")
return 0
except urllib.error.URLError as exc:
sys.stderr.write(f"::error::queue network error: {exc}\n")
return 0
except TimeoutError as exc:
sys.stderr.write(f"::error::queue timeout: {exc}\n")
return 0
return process_once(dry_run=args.dry_run)
if __name__ == "__main__":

View File

@ -36,9 +36,6 @@ Rules (4 fatal + 1 fatal cross-file + 1 heuristic-warn):
raw `.error` fields into CI logs/summaries.
9. Production deploy/redeploy workflows must expose an operational control:
kill switch for auto deploys or rollback tag for manual deploys.
10. Docker health checks must not run `docker info | head` under pipefail.
`head` closes the pipe early, `docker info` can exit nonzero from
SIGPIPE, and the step can falsely report Docker daemon failure.
Per `feedback_smoke_test_vendor_truth_not_shape_match`: fixtures used to
validate this lint must mirror real Gitea 1.22.6 YAML semantics, not
@ -228,24 +225,6 @@ def _iter_uses(doc: Any) -> Iterable[str]:
yield step["uses"]
def _iter_run_blocks(doc: Any) -> Iterable[str]:
"""Yield every shell `run:` block from job steps in a workflow document."""
if not isinstance(doc, dict):
return
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return
for job in jobs.values():
if not isinstance(job, dict):
continue
steps = job.get("steps")
if not isinstance(steps, list):
continue
for step in steps:
if isinstance(step, dict) and isinstance(step.get("run"), str):
yield step["run"]
def check_cross_repo_uses(filename: str, doc: Any) -> list[str]:
"""Return per-violation error lines for cross-repo `uses:` references."""
errors: list[str] = []
@ -285,10 +264,6 @@ GITHUB_API_REF_RE = re.compile(
PROD_CP_URL_RE = re.compile(r"https://api\.moleculesai\.app\b")
REDEPLOY_FLEET_RE = re.compile(r"\b/cp/admin/tenants/redeploy-fleet\b")
RUN_SETS_PIPEFAIL_RE = re.compile(r"(?m)^\s*set\s+-[^\n]*o\s+pipefail\b")
DOCKER_INFO_HEAD_PIPE_RE = re.compile(
r"(?m)^\s*docker\s+info\b[^\n|]*\|\s*head\b"
)
RAW_CP_RESPONSE_RE = re.compile(
r"""(?x)
(?:\bjq\s+\.\s+["']?\$HTTP_RESPONSE["']?)
@ -408,30 +383,6 @@ def check_production_operational_control(filename: str, raw: str) -> list[str]:
return errors
# ---------------------------------------------------------------------------
# Rule 10 — docker info piped to head under pipefail
# ---------------------------------------------------------------------------
def check_docker_info_head_pipefail(filename: str, doc: Any) -> list[str]:
errors: list[str] = []
for run_block in _iter_run_blocks(doc):
if not (
RUN_SETS_PIPEFAIL_RE.search(run_block)
and DOCKER_INFO_HEAD_PIPE_RE.search(run_block)
):
continue
errors.append(
f"::error file={filename}::Rule 10 (FATAL): workflow runs "
f"`docker info | head` after enabling `pipefail`. `head` can "
f"close the pipe early, making `docker info` exit nonzero and "
f"falsely fail the Docker daemon health check. Capture "
f"`docker_info=\"$(docker info 2>&1)\"` first, then print a "
f"bounded preview with `printf ... | sed -n '1,5p'`."
)
break
return errors
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
@ -485,7 +436,6 @@ def main(argv: list[str] | None = None) -> int:
fatal_errors.extend(check_production_concurrency(rel, doc, raw))
fatal_errors.extend(check_production_raw_response_logging(rel, raw))
fatal_errors.extend(check_production_operational_control(rel, raw))
fatal_errors.extend(check_docker_info_head_pipefail(rel, doc))
warnings.extend(check_github_server_url_missing(rel, doc, raw))
# Cross-file checks

View File

@ -145,7 +145,7 @@ if [ -z "$PR_AUTHOR" ] || [ -z "$PR_HEAD_SHA" ]; then
fi
# --- RFC#324 §N/A follow-up: check N/A declarations status ---
# sop-checklist.py posts `sop-checklist / na-declarations (pull_request)`
# sop-checklist-gate.py posts `sop-checklist / na-declarations (pull_request)`
# status when a peer posts /sop-n/a <gate>. If our gate is declared N/A,
# the requirement for a Gitea APPROVE review is waived.
NA_STATUSES_TMP=$(mktemp)

View File

@ -1,81 +0,0 @@
#!/usr/bin/env bash
# Re-run review-check.sh for a slash-command refire and post the protected
# pull_request status context to the PR head SHA.
set -euo pipefail
: "${GITEA_TOKEN:?GITEA_TOKEN required}"
: "${GITEA_HOST:?GITEA_HOST required}"
: "${REPO:?REPO required}"
: "${PR_NUMBER:?PR_NUMBER required}"
: "${TEAM:?TEAM required}"
OWNER="${REPO%%/*}"
NAME="${REPO##*/}"
API="https://${GITEA_HOST}/api/v1"
CONTEXT="${TEAM}-review / approved (pull_request)"
TARGET_URL="https://${GITEA_HOST}/${OWNER}/${NAME}/pulls/${PR_NUMBER}"
authfile=$(mktemp)
prfile=$(mktemp)
postfile=$(mktemp)
# shellcheck disable=SC2329 # invoked by EXIT trap
cleanup() {
rm -f "$authfile" "$prfile" "$postfile"
}
trap cleanup EXIT
chmod 600 "$authfile"
printf 'header = "Authorization: token %s"\n' "$GITEA_TOKEN" > "$authfile"
code=$(curl -sS -o "$prfile" -w '%{http_code}' -K "$authfile" \
"${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}")
if [ "$code" != "200" ]; then
echo "::error::GET /pulls/${PR_NUMBER} returned HTTP ${code}"
head -c 200 "$prfile" >&2 || true
exit 1
fi
head_sha=$(jq -r '.head.sha // ""' "$prfile")
state=$(jq -r '.state // ""' "$prfile")
if [ -z "$head_sha" ] || [ "$head_sha" = "null" ]; then
echo "::error::Could not resolve PR head SHA for PR ${PR_NUMBER}"
exit 1
fi
if [ "$state" != "open" ]; then
echo "::notice::PR ${PR_NUMBER} is ${state}; ${TEAM}-review refire is a no-op"
exit 0
fi
set +e
bash .gitea/scripts/review-check.sh
rc=$?
set -e
if [ "$rc" -eq 0 ]; then
status_state="success"
description="Refired via /${TEAM}-recheck by ${COMMENT_AUTHOR:-unknown}"
else
status_state="failure"
description="Refired via /${TEAM}-recheck; ${TEAM}-review failed"
fi
body=$(jq -nc \
--arg state "$status_state" \
--arg context "$CONTEXT" \
--arg description "$description" \
--arg target_url "$TARGET_URL" \
'{state:$state, context:$context, description:$description, target_url:$target_url}')
code=$(curl -sS -o "$postfile" -w '%{http_code}' -X POST \
-K "$authfile" -H "Content-Type: application/json" \
-d "$body" \
"${API}/repos/${OWNER}/${NAME}/statuses/${head_sha}")
if [ "$code" != "200" ] && [ "$code" != "201" ]; then
echo "::error::POST /statuses/${head_sha} returned HTTP ${code}"
head -c 200 "$postfile" >&2 || true
exit 1
fi
echo "::notice::posted ${status_state} for context=\"${CONTEXT}\" on sha=${head_sha}"
exit "$rc"

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python3
# sop-checklist — evaluate whether a PR has peer-acked each
# sop-checklist-gate — evaluate whether a PR has peer-acked each
# SOP-checklist item. Posts a commit-status that branch protection
# can require.
#
# RFC#351 Step 2 of 6 (implementation MVP).
#
# Invoked by .gitea/workflows/sop-checklist.yml on:
# Invoked by .gitea/workflows/sop-checklist-gate.yml on:
# - pull_request_target: [opened, edited, synchronize, reopened]
# - issue_comment: [created, edited, deleted]
#
@ -109,57 +109,58 @@ def normalize_slug(raw: str, numeric_aliases: dict[int, str] | None = None) -> s
# Optional trailing note after the slug for /sop-ack and required reason
# for /sop-revoke (RFC#351 open question 4 — reason is captured but not
# yet validated; future iteration may require a min-length).
#
# /sop-n/a <gate> [reason] — declares a gate as not-applicable.
# <gate> is a canonical gate name (qa-review, security-review).
# The declaring user must be in one of the gate's required_teams.
# Most-recent per-user declaration wins (revoke semantics mirror ack).
_DIRECTIVE_RE = re.compile(
r"^[ \t]*/(sop-ack|sop-revoke)[ \t]+([A-Za-z0-9_\- ]+?)(?:[ \t]+(.*))?[ \t]*$",
re.MULTILINE,
)
_NA_DIRECTIVE_RE = re.compile(
r"^[ \t]*/sop-n/?a[ \t]+([A-Za-z0-9_\-]+)(?:[ \t]+(.*))?[ \t]*$",
re.MULTILINE,
)
def parse_directives(
comment_body: str,
numeric_aliases: dict[int, str],
) -> list[tuple[str, str, str]]:
"""Extract /sop-ack and /sop-revoke directives from a comment body.
) -> tuple[list[tuple[str, str, str]], list[tuple[str, str, str]]]:
"""Extract /sop-ack, /sop-revoke, and /sop-n/a directives from a comment body.
Returns a list of (kind, canonical_slug, note) tuples where:
kind is "sop-ack" or "sop-revoke"
canonical_slug is the normalized form (or "" if unparseable)
note is the trailing free-text (may be "")
Returns a tuple of two lists:
0. list of (kind, canonical_slug, note) for sop-ack/sop-revoke
1. list of (kind, gate_name, reason) for sop-n/a
canonical_slug is the normalized form (or "" if unparseable).
note/reason is the trailing free-text (may be "").
"""
out: list[tuple[str, str, str]] = []
na_out: list[tuple[str, str, str]] = []
if not comment_body:
return out
return out, na_out
for m in _DIRECTIVE_RE.finditer(comment_body):
kind = m.group(1)
raw_slug = (m.group(2) or "").strip()
# If the raw match included trailing words, the regex non-greedy
# captured only the first token; strip again for safety.
# We split on whitespace to keep the FIRST word as the slug, and
# everything after as the note.
parts = raw_slug.split()
if not parts:
continue
first = parts[0]
# If the slug-capture greedily matched multiple words (e.g.
# "comprehensive testing"), preserve normalize behavior: join
# the WHOLE first-word-token only; trailing words get appended to
# the note. The regex limits group(2) to [A-Za-z0-9_\- ] so we
# may have multi-word forms here — normalize handles them.
if len(parts) > 1:
# User wrote "/sop-ack comprehensive testing extra-note"
# → treat "comprehensive testing" as the slug source if it
# normalizes to a known item; otherwise treat "comprehensive"
# as slug and "testing extra-note" as note. We defer the
# disambiguation to the caller via the returned canonical
# slug. For simplicity: try the WHOLE captured string first.
canonical = normalize_slug(raw_slug, numeric_aliases)
else:
canonical = normalize_slug(first, numeric_aliases)
note_from_group = (m.group(3) or "").strip()
# If we collapsed multi-word slug into kebab and there's a
# trailing-text group too, append it.
out.append((kind, canonical, note_from_group))
return out
for m in _NA_DIRECTIVE_RE.finditer(comment_body):
gate = (m.group(1) or "").strip().lower()
reason = (m.group(2) or "").strip()
na_out.append(("sop-n/a", gate, reason))
return out, na_out
# ---------------------------------------------------------------------------
@ -230,9 +231,8 @@ def compute_ack_state(
{
"comprehensive-testing": {
"ackers": ["bob"], # non-author, team-verified
"rejected_ackers": { # debugging info
"rejected": {
"self_ack": ["alice"],
"unknown_slug": [],
"not_in_team": ["eve"],
}
},
@ -249,7 +249,8 @@ def compute_ack_state(
user = (c.get("user") or {}).get("login", "")
if not user:
continue
for kind, slug, _note in parse_directives(body, numeric_aliases):
directives, _na_directives = parse_directives(body, numeric_aliases)
for kind, slug, _note in directives:
if not slug:
unparseable_per_user[user] = unparseable_per_user.get(user, 0) + 1
continue
@ -259,25 +260,19 @@ def compute_ack_state(
# Filter out self-acks and unknown slugs.
ackers_per_slug: dict[str, list[str]] = {s: [] for s in items_by_slug}
rejected_self: dict[str, list[str]] = {s: [] for s in items_by_slug}
rejected_unknown: dict[str, list[str]] = {s: [] for s in items_by_slug}
pending_team_check: dict[str, list[str]] = {s: [] for s in items_by_slug}
for (user, slug), kind in latest_directive.items():
if kind != "sop-ack":
continue # revokes leave the (user,slug) state as "no ack"
if slug not in items_by_slug:
# Slug normalized to something not in our config — store
# under a synthetic key for diagnostic surfacing. Don't add
# to any item.
continue
if user == pr_author:
rejected_self[slug].append(user)
continue
pending_team_check[slug].append(user)
# Step 3: team membership probe per slug (batched per slug to keep
# API call count down — same user may ack multiple items but the
# required_teams differ per item, so we MUST probe per (user, item)).
# Step 3: team membership probe per slug.
rejected_not_in_team: dict[str, list[str]] = {s: [] for s in items_by_slug}
for slug, candidates in pending_team_check.items():
if not candidates:
@ -286,7 +281,6 @@ def compute_ack_state(
approved = team_membership_probe(slug, candidates) # returns subset
rejected_not_in_team[slug] = [u for u in candidates if u not in approved]
ackers_per_slug[slug] = approved
# Stash required teams for description rendering.
items_by_slug[slug]["_required_resolved"] = required
return {
@ -301,6 +295,113 @@ def compute_ack_state(
}
def compute_na_state(
comments: list[dict[str, Any]],
pr_author: str,
na_gates: dict[str, dict[str, Any]],
numeric_aliases: dict[int, str],
team_membership_probe: "callable[[str, list[str]], list[str]]",
client: "GiteaClient",
org: str,
) -> dict[str, dict[str, Any]]:
"""Compute per-gate N/A declaration state.
Returns a dict keyed by gate name:
{
"qa-review": {
"declared": ["alice"], # non-author, team-verified, not revoked
"rejected": ["eve (not-in-team)", "bob (self-decl)"],
"reason": "pure-infra change — no qa surface",
},
...
}
A gate is N/A-satisfied when at least one declaration from a valid
team member exists and has not been revoked by the same user.
"""
if not na_gates:
return {}
# Collapse directives per (commenter, gate) — most recent wins.
latest_na: dict[tuple[str, str], str] = {} # (user, gate) → "sop-n/a"
latest_na_reason: dict[tuple[str, str], str] = {} # (user, gate) → reason
for c in comments:
body = c.get("body", "") or ""
user = (c.get("user") or {}).get("login", "")
if not user:
continue
_directives, na_directives = parse_directives(body, numeric_aliases)
for _kind, gate, reason in na_directives:
if gate not in na_gates:
continue
latest_na[(user, gate)] = "sop-n/a"
latest_na_reason[(user, gate)] = reason
# Determine candidate declarers per gate.
na_state: dict[str, dict[str, Any]] = {
gate: {"declared": [], "rejected": [], "reason": ""}
for gate in na_gates
}
pending_per_gate: dict[str, list[str]] = {gate: [] for gate in na_gates}
for (user, gate), kind in latest_na.items():
if kind != "sop-n/a":
continue
if user == pr_author:
na_state[gate]["rejected"].append(f"{user} (self-decl)")
continue
pending_per_gate[gate].append(user)
# Probe team membership per gate using that gate's required_teams.
for gate, candidates in pending_per_gate.items():
if not candidates:
continue
required_teams = na_gates[gate].get("required_teams", [])
# Resolve team names → ids using the client's resolver.
team_ids: list[int] = []
for tn in required_teams:
tid = client.resolve_team_id(org, tn)
if tid is not None:
team_ids.append(tid)
if not team_ids:
na_state[gate]["rejected"].extend(
f"{u} (no-team-id)" for u in candidates
)
continue
for u in candidates:
in_any_team = False
for tid in team_ids:
result = client.is_team_member(tid, u)
if result is True:
in_any_team = True
break
if result is None:
# 403 — token owner not in team. Fail-closed.
print(
f"::warning::na: team-probe for {u} in team-id {tid} "
"returned 403 — treating as not-in-team (fail-closed)",
file=sys.stderr,
)
if in_any_team:
na_state[gate]["declared"].append(u)
else:
na_state[gate]["rejected"].append(f"{u} (not-in-team)")
# Build per-gate reason string from declared users.
for gate in na_gates:
decl = na_state[gate]["declared"]
if decl:
reasons: list[str] = []
for u in decl:
r = latest_na_reason.get((u, gate), "")
if r:
reasons.append(f"{u}: {r}")
else:
reasons.append(u)
na_state[gate]["reason"] = "; ".join(reasons)
return na_state
# ---------------------------------------------------------------------------
# Gitea API client
# ---------------------------------------------------------------------------
@ -698,6 +799,7 @@ def main(argv: list[str] | None = None) -> int:
numeric_aliases = {
int(it["numeric_alias"]): it["slug"] for it in items if it.get("numeric_alias")
}
na_gates: dict[str, dict[str, Any]] = cfg.get("n/a_gates") or {}
client = GiteaClient(args.gitea_host, token) if token else None
if not client:
@ -717,6 +819,8 @@ def main(argv: list[str] | None = None) -> int:
print("::error::PR payload missing user.login or head.sha", file=sys.stderr)
return 1
target_url = f"https://{args.gitea_host}/{args.owner}/{args.repo}/pulls/{args.pr}"
comments = client.get_issue_comments(args.owner, args.repo, args.pr)
# Build team-membership probe closure that caches results per
@ -774,6 +878,47 @@ def main(argv: list[str] | None = None) -> int:
ack_state = compute_ack_state(comments, author, items_by_slug, numeric_aliases, probe)
body_state = {it["slug"]: section_marker_present(body, it["pr_section_marker"]) for it in items}
# --- N/A gate state (RFC#324 §N/A follow-up) ---
na_state: dict[str, dict[str, Any]] = {}
if na_gates:
na_state = compute_na_state(
comments, author, na_gates, numeric_aliases,
probe, client, args.owner,
)
# Post N/A declarations status (read by review-check.sh).
na_satisfied = [g for g, s in na_state.items() if s["declared"]]
na_missing = [g for g, s in na_state.items() if not s["declared"]]
if na_satisfied:
na_desc = f"N/A: {', '.join(na_satisfied)}"
na_post_state = "success"
elif na_missing:
na_desc = f"awaiting /sop-n/a declaration for: {', '.join(na_missing)}"
na_post_state = "pending"
else:
# Configured but no declarations yet.
na_desc = "no /sop-n/a declarations yet"
na_post_state = "pending"
na_context = "sop-checklist / na-declarations (pull_request)"
print(f"::notice::na-declarations status: {na_post_state}{na_desc}")
if not args.dry_run:
client.post_status(
args.owner, args.repo, head_sha,
state=na_post_state, context=na_context,
description=na_desc,
target_url=target_url,
)
print(f"::notice::na-declarations status posted: {na_context}{na_post_state}")
# Log per-gate diagnostics.
for gate in na_gates:
s = na_state.get(gate, {})
if s.get("declared"):
print(f"::notice:: [PASS] gate={gate} — N/A declared by {','.join(s['declared'])}"
+ (f" ({s['reason']})" if s.get("reason") else ""))
else:
extra = f" — rejected: {', '.join(s.get('rejected', []))}" if s.get("rejected") else ""
print(f"::notice:: [WAIT] gate={gate} — no valid N/A declaration yet{extra}")
state, description = render_status(items, ack_state, body_state)
mode = get_tier_mode(pr, cfg)
if mode == "soft":
@ -808,7 +953,6 @@ def main(argv: list[str] | None = None) -> int:
return 0 if state in ("success", "pending") else 1
return 0
target_url = f"https://{args.gitea_host}/{args.owner}/{args.repo}/pulls/{args.pr}"
client.post_status(
args.owner, args.repo, head_sha,
state=state, context=args.status_context,

View File

@ -133,9 +133,6 @@ PUSH_COMPENSATION_DESCRIPTION = (
"Compensated by status-reaper (workflow has no push: trigger; "
"Gitea 1.22.6 hardcoded-suffix bug — see .gitea/scripts/status-reaper.py)"
)
# Backward-compatible alias for older tests/tooling that predate the split
# between push-suffix compensation and pull-request-shadow compensation.
COMPENSATION_DESCRIPTION = PUSH_COMPENSATION_DESCRIPTION
PR_SHADOW_COMPENSATION_DESCRIPTION = (
"Compensated by status-reaper (default-branch pull_request status "
"shadowed by successful push status on same SHA; see "
@ -614,10 +611,11 @@ def list_recent_commit_shas(branch: str, limit: int) -> list[str]:
(verified via vendor-truth probe 2026-05-11 against
git.moleculesai.app `feedback_smoke_test_vendor_truth_not_shape_match`).
Raises ApiError on non-2xx OR on unexpected response shape. The
branch-level caller soft-skips this tick because the next scheduled
tick can safely retry the listing. Per-SHA status/write errors remain
separate and must not be mislabeled as commit-list outages.
Raises ApiError on non-2xx OR on unexpected response shape. This is
a HARD halt without the commit list the sweep can't proceed. (The
per-SHA error isolation downstream is a different concern: tolerating
a transient 5xx on ONE commit's status is best-effort; losing the
commit list itself means we don't even know which commits to try.)
"""
_, body = api(
"GET",
@ -658,27 +656,7 @@ def reap_branch(
- compensated_per_sha: {<sha_full>: [<context>, ...]} only
SHAs that actually got at least one compensation are included
"""
try:
shas = list_recent_commit_shas(branch, limit)
except ApiError as e:
print(
"::warning::status-reaper skipped this tick because the "
f"commit list could not be read after retries: {e}"
)
return {
"scanned_shas": 0,
"compensated": 0,
"preserved_real_push": 0,
"preserved_unknown": 0,
"preserved_non_failure": 0,
"preserved_non_push_suffix": 0,
"preserved_unparseable": 0,
"compensated_pr_shadowed_by_push_success": 0,
"preserved_pr_without_push_success": 0,
"compensated_per_sha": {},
"skipped": True,
"skip_reason": "commit-list-api-error",
}
shas = list_recent_commit_shas(branch, limit)
aggregate: dict[str, Any] = {
"scanned_shas": 0,

View File

@ -85,10 +85,7 @@ def test_pr_needs_update_when_base_sha_absent_from_commits():
def test_merge_decision_requires_main_green_pr_green_and_current_base():
required = ["CI / all-required (pull_request)"]
main_status = {
"state": "success",
"statuses": [{"context": "CI / all-required (push)", "status": "success"}],
}
main_status = {"state": "success", "statuses": []}
pr_status = {
"state": "success",
"statuses": [{"context": "CI / all-required (pull_request)", "status": "success"}],
@ -107,10 +104,7 @@ def test_merge_decision_requires_main_green_pr_green_and_current_base():
def test_merge_decision_updates_stale_pr_before_merge():
decision = mq.evaluate_merge_readiness(
main_status={
"state": "success",
"statuses": [{"context": "CI / all-required (push)", "status": "success"}],
},
main_status={"state": "success", "statuses": []},
pr_status={"state": "success", "statuses": [{"context": "CI / all-required (pull_request)", "status": "success"}]},
required_contexts=["CI / all-required (pull_request)"],
pr_has_current_base=False,

View File

@ -1,8 +1,8 @@
#!/usr/bin/env python3
# Unit tests for sop-checklist.py
# Unit tests for sop-checklist-gate.py
#
# Run: python3 .gitea/scripts/tests/test_sop_checklist.py
# or: pytest .gitea/scripts/tests/test_sop_checklist.py
# Run: python3 .gitea/scripts/tests/test_sop_checklist_gate.py
# or: pytest .gitea/scripts/tests/test_sop_checklist_gate.py
#
# RFC#351 Step 2 of 6 — implementation MVP. Tests cover:
# - slug normalization (the 4 example variants in the script header)
@ -33,7 +33,7 @@ sys.path.insert(0, PARENT)
import importlib.util # noqa: E402
_spec = importlib.util.spec_from_file_location(
"sop_checklist", os.path.join(PARENT, "sop-checklist.py")
"sop_checklist_gate", os.path.join(PARENT, "sop-checklist-gate.py")
)
sop = importlib.util.module_from_spec(_spec)
_spec.loader.exec_module(sop) # type: ignore[union-attr]
@ -134,22 +134,18 @@ class TestParseDirectives(unittest.TestCase):
def setUp(self):
self.aliases = _numeric_aliases()
def parse_ack_revoke(self, body):
directives, na_directives = sop.parse_directives(body, self.aliases)
self.assertEqual(na_directives, [])
return directives
def test_simple_ack(self):
d = self.parse_ack_revoke("/sop-ack comprehensive-testing")
d = sop.parse_directives("/sop-ack comprehensive-testing", self.aliases)
self.assertEqual(d, [("sop-ack", "comprehensive-testing", "")])
def test_simple_revoke(self):
d = self.parse_ack_revoke("/sop-revoke staging-smoke")
d = sop.parse_directives("/sop-revoke staging-smoke", self.aliases)
self.assertEqual(d, [("sop-revoke", "staging-smoke", "")])
def test_ack_with_note(self):
d = self.parse_ack_revoke(
"/sop-ack comprehensive-testing LGTM the test covers all edge cases"
d = sop.parse_directives(
"/sop-ack comprehensive-testing LGTM the test covers all edge cases",
self.aliases,
)
self.assertEqual(len(d), 1)
self.assertEqual(d[0][0], "sop-ack")
@ -157,12 +153,13 @@ class TestParseDirectives(unittest.TestCase):
self.assertIn("LGTM", d[0][2])
def test_numeric_shorthand(self):
d = self.parse_ack_revoke("/sop-ack 1")
d = sop.parse_directives("/sop-ack 1", self.aliases)
self.assertEqual(d, [("sop-ack", "comprehensive-testing", "")])
def test_revoke_with_reason(self):
d = self.parse_ack_revoke(
"/sop-revoke comprehensive-testing realized the e2e was mocking the DB"
d = sop.parse_directives(
"/sop-revoke comprehensive-testing realized the e2e was mocking the DB",
self.aliases,
)
self.assertEqual(d[0][0], "sop-revoke")
self.assertEqual(d[0][1], "comprehensive-testing")
@ -174,7 +171,7 @@ class TestParseDirectives(unittest.TestCase):
"/sop-ack comprehensive-testing\n"
"Will follow up on the doc nit separately."
)
d = self.parse_ack_revoke(body)
d = sop.parse_directives(body, self.aliases)
self.assertEqual(len(d), 1)
self.assertEqual(d[0][1], "comprehensive-testing")
@ -183,7 +180,7 @@ class TestParseDirectives(unittest.TestCase):
"/sop-ack comprehensive-testing\n"
"/sop-ack local-postgres-e2e\n"
)
d = self.parse_ack_revoke(body)
d = sop.parse_directives(body, self.aliases)
self.assertEqual(len(d), 2)
slugs = {x[1] for x in d}
self.assertEqual(slugs, {"comprehensive-testing", "local-postgres-e2e"})
@ -192,21 +189,21 @@ class TestParseDirectives(unittest.TestCase):
# A directive embedded mid-line is not honored (prevents review
# comments like "to /sop-ack you need..." from acting as acks).
body = "If you want to /sop-ack comprehensive-testing reply in this thread"
d = self.parse_ack_revoke(body)
d = sop.parse_directives(body, self.aliases)
self.assertEqual(d, [])
def test_leading_whitespace_allowed(self):
body = " /sop-ack comprehensive-testing"
d = self.parse_ack_revoke(body)
d = sop.parse_directives(body, self.aliases)
self.assertEqual(len(d), 1)
def test_empty_body(self):
self.assertEqual(sop.parse_directives("", self.aliases), ([], []))
self.assertEqual(sop.parse_directives(None, self.aliases), ([], []))
self.assertEqual(sop.parse_directives("", self.aliases), [])
self.assertEqual(sop.parse_directives(None, self.aliases), [])
def test_normalization_applied(self):
# /sop-ack Comprehensive_Testing → canonical comprehensive-testing
d = self.parse_ack_revoke("/sop-ack Comprehensive_Testing")
d = sop.parse_directives("/sop-ack Comprehensive_Testing", self.aliases)
self.assertEqual(d[0][1], "comprehensive-testing")

View File

@ -32,7 +32,6 @@ THIS_DIR="$(cd "$(dirname "$0")" && pwd)"
SCRIPT_DIR="$(cd "$THIS_DIR/.." && pwd)"
WORKFLOW_DIR="$(cd "$THIS_DIR/../../workflows" && pwd)"
WORKFLOW="$WORKFLOW_DIR/sop-tier-refire.yml"
DISPATCH_WORKFLOW="$WORKFLOW_DIR/review-refire-comments.yml"
SCRIPT="$SCRIPT_DIR/sop-tier-refire.sh"
PASS=0
@ -88,7 +87,6 @@ assert_file_exists() {
echo
echo "== existence =="
assert_file_exists "workflow file exists" "$WORKFLOW"
assert_file_exists "dispatcher workflow file exists" "$DISPATCH_WORKFLOW"
assert_file_exists "script file exists" "$SCRIPT"
if [ "$FAIL" -gt 0 ]; then
echo
@ -106,44 +104,30 @@ echo "== T6/T7 workflow yaml =="
PARSE_OUT=$(python3 -c 'import sys,yaml;yaml.safe_load(open(sys.argv[1]).read());print("ok")' "$WORKFLOW" 2>&1 || true)
assert_eq "T7 workflow parses as YAML" "ok" "$PARSE_OUT"
# The old per-workflow issue_comment listener caused queue storms because
# Gitea queues jobs before evaluating job-level `if:`. The script remains,
# but comment-triggered refires route through the single dispatcher.
# Three required gates in the `if:` expression
WORKFLOW_CONTENT=$(cat "$WORKFLOW")
if printf '%s' "$WORKFLOW_CONTENT" | grep -q '^ issue_comment:'; then
echo " FAIL T6a manual fallback workflow must not listen on issue_comment"
FAIL=$((FAIL + 1))
FAILED_TESTS="${FAILED_TESTS} T6a"
else
echo " PASS T6a manual fallback workflow does not listen on issue_comment"
PASS=$((PASS + 1))
fi
assert_contains "T6b workflow exposes workflow_dispatch" \
"workflow_dispatch" "$WORKFLOW_CONTENT"
assert_contains "T6c workflow documents unsupported manual inputs" \
"workflow_dispatch inputs" "$WORKFLOW_CONTENT"
assert_contains "T6a workflow if: contains author_association gate" \
"github.event.comment.author_association" "$WORKFLOW_CONTENT"
assert_contains "T6b workflow if: gates on MEMBER/OWNER/COLLABORATOR" \
'["MEMBER","OWNER","COLLABORATOR"]' "$WORKFLOW_CONTENT"
assert_contains "T6c workflow if: contains slash-command trigger" \
"/refire-tier-check" "$WORKFLOW_CONTENT"
assert_contains "T6d workflow if: gates on PR-not-issue" \
"github.event.issue.pull_request" "$WORKFLOW_CONTENT"
assert_contains "T6e workflow listens on issue_comment" \
"issue_comment" "$WORKFLOW_CONTENT"
assert_contains "T6f workflow requests statuses:write permission" \
"statuses: write" "$WORKFLOW_CONTENT"
# Does NOT check out PR HEAD (security)
if grep -q 'ref: \${{ github.event.pull_request.head' "$WORKFLOW"; then
echo " FAIL T6d workflow MUST NOT check out PR head (security)"
echo " FAIL T6g workflow MUST NOT check out PR head (security)"
FAIL=$((FAIL + 1))
FAILED_TESTS="${FAILED_TESTS} T6d"
FAILED_TESTS="${FAILED_TESTS} T6g"
else
echo " PASS T6d workflow does not check out PR head"
echo " PASS T6g workflow does not check out PR head"
PASS=$((PASS + 1))
fi
DISPATCH_PARSE_OUT=$(python3 -c 'import sys,yaml;yaml.safe_load(open(sys.argv[1]).read());print("ok")' "$DISPATCH_WORKFLOW" 2>&1 || true)
assert_eq "T6e dispatcher workflow parses as YAML" "ok" "$DISPATCH_PARSE_OUT"
DISPATCH_CONTENT=$(cat "$DISPATCH_WORKFLOW")
assert_contains "T6f dispatcher listens on issue_comment" \
"issue_comment" "$DISPATCH_CONTENT"
assert_contains "T6g dispatcher handles /qa-recheck" \
"/qa-recheck" "$DISPATCH_CONTENT"
assert_contains "T6h dispatcher handles /security-recheck" \
"/security-recheck" "$DISPATCH_CONTENT"
assert_contains "T6i dispatcher handles /refire-tier-check" \
"/refire-tier-check" "$DISPATCH_CONTENT"
# T1-T5 — script behavior against a local Gitea-fixture
echo
echo "== T1-T5 script behavior (vs local fixture) =="

View File

@ -111,7 +111,7 @@ items:
# N/A gate declarations (RFC#324 §N/A follow-up).
# PRs where a gate genuinely does not apply (e.g., pure-infra with no
# qa surface, or docs-only) can be declared N/A by a non-author peer
# who is in one of the gate's required_teams. The sop-checklist
# who is in one of the gate's required_teams. The sop-checklist-gate
# posts a `sop-checklist / na-declarations (pull_request)` status that
# review-check.sh reads to skip the Gitea-APPROVE requirement.
#

View File

@ -133,49 +133,59 @@ jobs:
# the name match works on PRs that don't touch workspace-server/).
platform-build:
name: Platform (Go)
needs: changes
runs-on: ubuntu-latest
# mc#774 (closed 2026-05-14): Phase 4 flip of the platform-build job.
# Phase 4 (#656) originally flipped this to continue-on-error: false based on
# Phase-3-masked "green on main 2026-05-12". Two failure classes then surfaced:
# (1) 4x delegation_test.go sqlmock gaps (PR #669 / #634 fix-forward, closed).
# (2) TestMCPHandler_CommitMemory_GlobalScope_Blocked (mcp_test.go:433):
# OFFSEC-001 hardening collided with test assertion; tracked in mc#762.
# Fix-forward for (1) landed in PR #669. The mc#762 gap (2) is a separate
# issue — it does NOT block this flip because the test is already wrapped in
# the diagnostic step with its own continue-on-error: true (line 203).
# Flip confirmed by CI / Platform (Go) status = success on main HEAD 363905d3.
continue-on-error: false
# Job-level ceiling. The go test step below runs with a per-step 10m timeout;
# this cap catches any step that leaks past that. Set well above 10m so
# the per-step timeout is the active constraint.
timeout-minutes: 15
# mc#774 (interim): re-mask platform-build pending fix-forward. Phase 4
# (#656) flipped this to continue-on-error: false based on a Phase-3-masked
# "green on main 2026-05-12" — the prior continue-on-error: true had
# been hiding failing tests in workspace-server/internal/handlers/.
# Two distinct failure classes surfaced on 0e5152c3:
# (1) 4x delegation_test.go (lines 1110/1176/1228/1271): helpers
# expectExecuteDelegationBase/Success/Failed are missing sqlmock
# expectations for queries production has issued since ~2026-04-21
# (last_outbound_at UPDATE, lookupDeliveryMode/Runtime SELECTs,
# a2a_receive INSERT activity_logs, recordLedgerStatus writes).
# Halt cond #3 applies (regression > 7 days → broader sweep).
# (2) 1x mcp_test.go:433 (TestMCPHandler_CommitMemory_GlobalScope_Blocked):
# commit 7d1a189f (2026-05-10) hardened mcp.go to scrub err.Error()
# from JSON-RPC responses (OFFSEC-001), but the test asserts the
# error message contains "GLOBAL". Production-vs-test contract
# collision — needs design call, not mock update.
# Time-boxed Option A (90 min) did not fit the cross-cutting scope.
# This is a sequenced revert→fix→reflip per
# feedback_strict_root_only_after_class_a emergency clause — NOT
# a permanent re-mask. Re-flip blocked on mc#774 fix-forward landing.
# Other 4 #656 flips (changes, canvas-build, shellcheck, python-lint)
# retain continue-on-error: false; only platform-build regresses.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true # mc#774 fix-forward in flight; re-flip when mc#774 lands (PR #669 → rebase after #709)
defaults:
run:
working-directory: workspace-server
steps:
- if: false
- if: needs.changes.outputs.platform != 'true'
working-directory: .
run: echo "No platform/** changes — skipping real build steps; this job always runs to satisfy the required-check name on branch protection."
- if: always()
- if: needs.changes.outputs.platform == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- if: always()
- if: needs.changes.outputs.platform == 'true'
uses: actions/setup-go@40f1582b2485089dde7abd97c1529aa768e1baff # v5
with:
go-version: 'stable'
- if: always()
- if: needs.changes.outputs.platform == 'true'
run: go mod download
- if: always()
- if: needs.changes.outputs.platform == 'true'
run: go build ./cmd/server
# CLI (molecli) moved to standalone repo: git.moleculesai.app/molecule-ai/molecule-cli
- if: always()
- if: needs.changes.outputs.platform == 'true'
run: go vet ./...
- if: always()
- if: needs.changes.outputs.platform == 'true'
name: Install golangci-lint
run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.12.2
- if: always()
- if: needs.changes.outputs.platform == 'true'
name: Run golangci-lint
run: $(go env GOPATH)/bin/golangci-lint run --timeout 3m ./...
- if: always()
- if: needs.changes.outputs.platform == 'true'
name: Diagnostic — per-package verbose 60s
run: |
set +e
@ -191,15 +201,11 @@ jobs:
echo "::endgroup::"
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
- if: always()
- if: needs.changes.outputs.platform == 'true'
name: Run tests with race detection and coverage
# Explicit timeout: cold runner cache causes OOM kills at ~4m39s on the
# full ./... suite with race detection + coverage. A 10m per-step timeout
# lets the suite complete on cold cache (~5-7m) while failing cleanly
# instead of OOM-killing. The job-level timeout (15m) is a backstop.
run: go test -race -timeout 10m -coverprofile=coverage.out ./...
run: go test -race -coverprofile=coverage.out ./...
- if: always()
- if: needs.changes.outputs.platform == 'true'
name: Per-file coverage report
# Advisory — lists every source file with its coverage so reviewers
# can see at-a-glance where gaps are. Sorted ascending so the worst
@ -213,7 +219,7 @@ jobs:
END {for (f in s) printf "%6.1f%% %s\n", s[f]/c[f], f}' \
| sort -n
- if: always()
- if: needs.changes.outputs.platform == 'true'
name: Check coverage thresholds
# Enforces two gates from #1823 Layer 1:
# 1. Total floor (25% — ratchet plan in COVERAGE_FLOOR.md).
@ -301,28 +307,28 @@ jobs:
# siblings — verified empirically on PR #2314).
canvas-build:
name: Canvas (Next.js)
needs: changes
runs-on: ubuntu-latest
timeout-minutes: 20
# Phase 4 (RFC #219 §1): confirmed green on main 2026-05-12.
continue-on-error: false
defaults:
run:
working-directory: canvas
steps:
- if: false
- if: needs.changes.outputs.canvas != 'true'
working-directory: .
run: echo "No canvas/** changes — skipping real build steps; this job always runs to satisfy the required-check name on branch protection."
- if: always()
- if: needs.changes.outputs.canvas == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- if: always()
- if: needs.changes.outputs.canvas == 'true'
uses: actions/setup-node@48b55a011bda9f5d6aeb4c2d9c7362e8dae4041e # v6.4.0
with:
node-version: '22'
- if: always()
- if: needs.changes.outputs.canvas == 'true'
run: rm -f package-lock.json && npm install
- if: always()
- if: needs.changes.outputs.canvas == 'true'
run: npm run build
- if: always()
- if: needs.changes.outputs.canvas == 'true'
name: Run tests with coverage
# Coverage instrumentation is configured in canvas/vitest.config.ts
# (provider: v8, reporters: text + html + json-summary). Step 2 of
@ -331,7 +337,7 @@ jobs:
# tracked in #1815) after the team sees what current coverage is.
run: npx vitest run --coverage
- name: Upload coverage summary as artifact
if: always()
if: needs.changes.outputs.canvas == 'true' && always()
# Pinned to v3 for Gitea act_runner v0.6 compatibility — v4+ uses
# the GHES 3.10+ artifact protocol that Gitea 1.22.x does NOT
# implement, surfacing as `GHESNotSupportedError: @actions/artifact
@ -348,15 +354,16 @@ jobs:
# Shellcheck (E2E scripts) — required check, always runs.
shellcheck:
name: Shellcheck (E2E scripts)
needs: changes
runs-on: ubuntu-latest
# Phase 4 (RFC #219 §1): confirmed green on main 2026-05-12.
continue-on-error: false
steps:
- if: false
- if: needs.changes.outputs.scripts != 'true'
run: echo "No tests/e2e/ or infra/scripts/ changes — skipping real shellcheck; this job always runs to satisfy the required-check name on branch protection."
- if: always()
- if: needs.changes.outputs.scripts == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- if: always()
- if: needs.changes.outputs.scripts == 'true'
name: Run shellcheck on tests/e2e/*.sh and infra/scripts/*.sh
# shellcheck is pre-installed on ubuntu-latest runners (via apt).
# infra/scripts/ is included because setup.sh + nuke.sh gate the
@ -367,48 +374,29 @@ jobs:
find tests/e2e infra/scripts -type f -name '*.sh' -print0 \
| xargs -0 shellcheck --severity=warning
- if: always()
- if: needs.changes.outputs.scripts == 'true'
name: Lint cleanup-trap hygiene (RFC #2873)
run: bash tests/e2e/lint_cleanup_traps.sh
- if: always()
- if: needs.changes.outputs.scripts == 'true'
name: Run E2E bash unit tests (no live infra)
run: |
bash tests/e2e/test_model_slug.sh
- if: always()
name: Test ECR promote-tenant-image script (mock-driven, no live infra)
# Covers scripts/promote-tenant-image.sh — the codified
# :staging-latest → :latest ECR promote + tenant fleet redeploy
# closing molecule-ai/molecule-core#660. 40 mock-driven cases
# exercise every exit path (preflight, snapshot, promote, redeploy
# 403→SSM-refresh, verify, rollback). No live AWS/CP/SSM calls.
run: |
bash scripts/test-promote-tenant-image.sh
- if: always()
name: Shellcheck promote-tenant-image script
# scripts/ is excluded from the bulk shellcheck pass above (legacy
# SC3040/SC3043 cleanup pending). Run shellcheck explicitly on
# the promote script + its test harness so regressions there are
# caught by the required check.
run: |
shellcheck --severity=warning \
scripts/promote-tenant-image.sh \
scripts/test-promote-tenant-image.sh
canvas-deploy-reminder:
name: Canvas Deploy Reminder
runs-on: ubuntu-latest
# This job must run on PRs because all-required needs it. The step exits
# 0 when it is not a main push, giving branch protection a green no-op
# instead of a skipped/missing required dependency.
needs: canvas-build
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
needs: [changes, canvas-build]
# Keep the job itself always runnable. Gitea 1.22.6 leaves job-level
# event/ref `if:` gates as pending on PRs, which blocks the combined
# status even though this reminder is intentionally non-required.
steps:
- name: Write deploy reminder to step summary
env:
COMMIT_SHA: ${{ github.sha }}
CANVAS_CHANGED: "true"
CANVAS_CHANGED: ${{ needs.changes.outputs.canvas }}
EVENT_NAME: ${{ github.event_name }}
REF_NAME: ${{ github.ref }}
# github.server_url resolves via the workflow-level env override
@ -453,6 +441,7 @@ jobs:
# Python Lint & Test — required check, always runs.
python-lint:
name: Python Lint & Test
needs: changes
runs-on: ubuntu-latest
# Phase 4 (RFC #219 §1): confirmed green on main 2026-05-12.
continue-on-error: false
@ -462,25 +451,25 @@ jobs:
run:
working-directory: workspace
steps:
- if: false
- if: needs.changes.outputs.python != 'true'
working-directory: .
run: echo "No workspace/** changes — skipping real lint+test; this job always runs to satisfy the required-check name on branch protection."
- if: always()
- if: needs.changes.outputs.python == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- if: always()
- if: needs.changes.outputs.python == 'true'
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: '3.11'
cache: pip
cache-dependency-path: workspace/requirements.txt
- if: always()
- if: needs.changes.outputs.python == 'true'
run: pip install -r requirements.txt pytest pytest-asyncio pytest-cov sqlalchemy>=2.0.0
# Coverage flags + fail-under floor moved into workspace/pytest.ini
# (issue #1817) so local `pytest` and CI use identical config.
- if: always()
- if: needs.changes.outputs.python == 'true'
run: python -m pytest --tb=short
- if: always()
- if: needs.changes.outputs.python == 'true'
name: Per-file critical-path coverage (MCP / inbox / auth)
# MCP-critical Python files have a per-file floor on top of the
# 86% total floor in pytest.ini. See issue #2790 for full rationale.
@ -545,104 +534,84 @@ jobs:
# red silently merged through. See internal#286 for the three concrete
# tonight-of-2026-05-11 incidents that prompted the emergency bump.
#
# This job deliberately has no `needs:`. Gitea 1.22/act_runner can mark a
# job-level `if: always()` + `needs:` sentinel as skipped before upstream
# jobs settle, leaving branch protection with a permanent pending
# `CI / all-required` context. Instead, this independent sentinel polls the
# required commit-status contexts for this SHA and fails if any fail, skip,
# or never emit.
# Three properties of this job each close a failure mode:
#
# canvas-deploy-reminder is intentionally NOT included in all-required.needs.
# It is an informational main-push reminder, not a PR quality gate. Keeping
# it in this dependency list lets a skipped reminder skip the required
# sentinel before the `always()` guard can emit a branch-protection status.
# 1. `if: always()` — runs even when an upstream fails. Without it the
# sentinel is `skipped` and protection treats that as missing → merge
# ungated.
#
# 2. Assertion is `result == "success"` per dep, NOT `!= "failure"`.
# A `skipped` upstream (job gated by `if:` evaluating false, matrix
# entry that couldn't run) must NOT silently pass through.
# `skipped`-as-green is exactly the failure mode this gate closes.
#
# 3. `needs:` is the canonical list of "what counts as required."
# status_check_contexts will reference only `ci/all-required` (Step 5
# follow-up — branch-protection PATCH is Owners-tier per
# `feedback_never_admin_merge_bypass`, separate PR); a new job is
# added simply by listing it in `needs:` here.
# `.gitea/workflows/ci-required-drift.yml` files a [ci-drift] issue
# hourly if this list diverges from status_check_contexts or from
# audit-force-merge.yml's REQUIRED_CHECKS env (RFC §4 + §6).
#
# Excluded from `needs:`: `canvas-deploy-reminder` — it is an
# operational reminder, not a CI prerequisite. Keep that job runnable
# on PRs with an internal no-op guard; job-level event/ref `if:` gates
# are a Gitea 1.22.6 pending-status trap.
#
# Phase 3 (RFC #219 §1) safety: underlying build jobs carry
# continue-on-error: true so their failures are masked to null (2026-05-12: re-enabled mc#774 interim)
# (Gitea suppresses status reporting for CoE jobs). This sentinel
# runs with continue-on-error: false so it always reports its
# result to the API — without this, the required-status entry
# (CI / all-required (pull_request)) is never created, which
# blocks PR merges. When Phase 3 ends, flip underlying jobs to
# continue-on-error: false; this sentinel can then be flipped to
# continue-on-error: true if a Phase-4 regression requires it.
continue-on-error: false
runs-on: ubuntu-latest
timeout-minutes: 45
timeout-minutes: 1
needs:
- changes
- platform-build
- canvas-build
- shellcheck
- python-lint
if: ${{ always() }}
steps:
- name: Wait for required CI contexts
env:
GITEA_TOKEN: ${{ secrets.GITHUB_TOKEN }}
API_ROOT: ${{ github.server_url }}/api/v1
REPOSITORY: ${{ github.repository }}
COMMIT_SHA: ${{ github.sha }}
EVENT_NAME: ${{ github.event_name }}
- name: Assert every required dependency succeeded
run: |
set -euo pipefail
python3 - <<'PY'
import json
import os
import sys
import time
import urllib.error
import urllib.request
token = os.environ["GITEA_TOKEN"]
api_root = os.environ["API_ROOT"].rstrip("/")
repo = os.environ["REPOSITORY"]
sha = os.environ["COMMIT_SHA"]
event = os.environ["EVENT_NAME"]
required = [
f"CI / Detect changes ({event})",
f"CI / Platform (Go) ({event})",
f"CI / Canvas (Next.js) ({event})",
f"CI / Shellcheck (E2E scripts) ({event})",
f"CI / Python Lint & Test ({event})",
]
terminal_bad = {"failure", "error"}
deadline = time.time() + 40 * 60
last_summary = None
def fetch_statuses():
statuses = []
for page in range(1, 6):
url = f"{api_root}/repos/{repo}/commits/{sha}/statuses?page={page}&limit=100"
req = urllib.request.Request(url, headers={"Authorization": f"token {token}"})
with urllib.request.urlopen(req, timeout=10) as resp:
chunk = json.load(resp)
if not chunk:
break
statuses.extend(chunk)
latest = {}
for item in statuses:
ctx = item.get("context")
if not ctx:
continue
prev = latest.get(ctx)
if prev is None or (item.get("updated_at") or item.get("created_at") or "") >= (prev.get("updated_at") or prev.get("created_at") or ""):
latest[ctx] = item
return latest
while True:
try:
latest = fetch_statuses()
except (TimeoutError, OSError, urllib.error.URLError) as exc:
if time.time() >= deadline:
print(f"FAIL: status polling did not recover before deadline: {exc}", file=sys.stderr)
sys.exit(1)
print(f"WARN: status poll failed, retrying: {exc}", flush=True)
time.sleep(15)
continue
states = {ctx: (latest.get(ctx) or {}).get("status") or (latest.get(ctx) or {}).get("state") or "missing" for ctx in required}
summary = ", ".join(f"{ctx}={state}" for ctx, state in states.items())
if summary != last_summary:
print(summary, flush=True)
last_summary = summary
bad = {ctx: state for ctx, state in states.items() if state in terminal_bad}
if bad:
print("FAIL: required CI context failed:", file=sys.stderr)
for ctx, state in bad.items():
desc = (latest.get(ctx) or {}).get("description") or ""
print(f" - {ctx}: {state} {desc}", file=sys.stderr)
sys.exit(1)
if all(state == "success" for state in states.values()):
print(f"OK: all {len(required)} required CI contexts succeeded")
sys.exit(0)
if time.time() >= deadline:
print("FAIL: timed out waiting for required CI contexts:", file=sys.stderr)
for ctx, state in states.items():
print(f" - {ctx}: {state}", file=sys.stderr)
sys.exit(1)
time.sleep(15)
PY
# `needs.*.result` is one of: success | failure | cancelled | skipped | null.
# We assert success per dep (not != failure) — see RFC §2 reasoning above.
# Null results are skipped: they come from Phase 3 (continue-on-error: true
# suppresses status) or from jobs still in-flight. The sentinel succeeds
# rather than blocking PRs on Phase 3 noise.
results='${{ toJSON(needs) }}'
echo "$results"
echo "$results" | python3 -c '
import json, sys
ns = json.load(sys.stdin)
# Phase 3 masked: jobs with continue-on-error: true may report "failure"
# Remove when mc#774 handler test failures are resolved.
PHASE3_MASKED = {"platform-build"}
# Exclude null (Phase 3 suppressed / in-flight) from the bad list.
bad = [(k, v.get("result")) for k, v in ns.items()
if v.get("result") not in ("success", None, "cancelled", "skipped") and k not in PHASE3_MASKED]
if bad:
print(f"FAIL: jobs not green:", file=sys.stderr)
for k, r in bad:
print(f" - {k}: {r}", file=sys.stderr)
sys.exit(1)
pending = [(k, v.get("result")) for k, v in ns.items()
if v.get("result") is None]
cancelled = [(k, v.get("result")) for k, v in ns.items()
if v.get("result") == "cancelled"]
if pending:
print(f"WARN: {len(pending)} job(s) still in-flight (result=null): " +
", ".join(k for k, _ in pending), file=sys.stderr)
if cancelled:
print(f"INFO: {len(cancelled)} job(s) masked by continue-on-error: " +
", ".join(k for k, _ in cancelled), file=sys.stderr)
print(f"OK: all {len(ns)} required jobs succeeded (or Phase-3 suppressed)")
'

View File

@ -69,13 +69,6 @@ name: E2E API Smoke Test
# 2318) shows Postgres ready in 3s, Redis in 1s, Platform in 1s when
# they DO come up. Timeouts are not the bottleneck; not bumped.
#
# Item #1046 (fixed 2026-05-14): Stale platform-server from cancelled runs
# lingers on :8080 after "Stop platform" step is skipped (workflow cancelled
# before reaching line 335). Added a pre-start "Kill stale platform-server"
# step (line 286) that scans /proc for zombie platform-server processes
# and kills them before the port probe or bind. Makes the ephemeral port
# probe + start sequence deterministic.
#
# Item explicitly NOT fixed here: failing test `Status back online`
# fails because the platform's langgraph workspace template image
# (ghcr.io/molecule-ai/workspace-template-langgraph:latest) returns
@ -290,35 +283,6 @@ jobs:
echo "PORT=${PLATFORM_PORT}" >> "$GITHUB_ENV"
echo "BASE=http://127.0.0.1:${PLATFORM_PORT}" >> "$GITHUB_ENV"
echo "Platform host port: ${PLATFORM_PORT}"
- name: Kill stale platform-server before start (issue #1046)
if: needs.detect-changes.outputs.api == 'true'
run: |
# Concurrent runs on the same host-network act_runner can leave a
# zombie platform-server from a cancelled/timeout run. Cancelled
# runs never reach the "Stop platform" step (line 335), so the
# old process lingers. Kill it before the ephemeral port probe
# or start so the port is definitively free.
#
# /proc scan — works on any Linux without pkill/lsof/ss.
# comm field is truncated to 15 chars: "platform-serve" matches
# "platform-server". Verify with cmdline to avoid false positives.
killed=0
for pid in $(grep -l "platform-serve" /proc/[0-9]*/comm 2>/dev/null); do
kpid="${pid%/comm}"
kpid="${kpid##*/}"
cmdline=$(cat "/proc/${kpid}/cmdline" 2>/dev/null | tr '\0' ' ')
if echo "$cmdline" | grep -q "platform-server"; then
echo "Killing stale platform-server pid ${kpid}: ${cmdline}"
kill "$kpid" 2>/dev/null || true
killed=$((killed + 1))
fi
done
if [ "$killed" -gt 0 ]; then
sleep 2
echo "Killed $killed stale process(es); port(s) released."
else
echo "No stale platform-server found."
fi
- name: Start platform (background)
if: needs.detect-changes.outputs.api == 'true'
working-directory: workspace-server
@ -382,4 +346,3 @@ jobs:
run: |
docker rm -f "$PG_CONTAINER" 2>/dev/null || true
docker rm -f "$REDIS_CONTAINER" 2>/dev/null || true

View File

@ -83,41 +83,25 @@ jobs:
REPO: ${{ github.repository }}
run: |
set -euo pipefail
# Fetch all open PRs and run gate-check on each. This scheduled
# refresher is advisory; a transient Gitea list timeout must not turn
# main red. PR-specific gate-check runs still use normal failure
# semantics.
# Fetch all open PRs and run gate-check on each
# socket.setdefaulttimeout(15): defence-in-depth for missing SOP_TIER_CHECK_TOKEN.
# gate_check.py uses timeout=15 on every urlopen call; this catches the
# inline Python polling loop too (issue #603).
pr_numbers=$(python3 <<'PY'
import json
import os
import socket
import sys
import time
import urllib.error
import urllib.request
socket.setdefaulttimeout(30)
socket.setdefaulttimeout(15)
token = os.environ["GITEA_TOKEN"]
repo = os.environ["REPO"]
url = f"https://git.moleculesai.app/api/v1/repos/{repo}/pulls?state=open&limit=100"
last_error = None
for attempt in range(1, 4):
req = urllib.request.Request(
url,
headers={"Authorization": f"token {token}", "Accept": "application/json"},
)
try:
with urllib.request.urlopen(req, timeout=30) as r:
prs = json.loads(r.read())
break
except (TimeoutError, OSError, urllib.error.URLError, urllib.error.HTTPError) as exc:
last_error = exc
print(f"warning: PR list fetch attempt {attempt}/3 failed: {exc}", file=sys.stderr)
if attempt < 3:
time.sleep(2 * attempt)
else:
print(f"warning: skipped scheduled gate-check refresh; failed to list open PRs after 3 attempts: {last_error}", file=sys.stderr)
raise SystemExit(0)
req = urllib.request.Request(
f"https://git.moleculesai.app/api/v1/repos/{repo}/pulls?state=open&limit=100",
headers={"Authorization": f"token {token}", "Accept": "application/json"},
)
with urllib.request.urlopen(req) as r:
prs = json.loads(r.read())
for pr in prs:
print(pr["number"])
PY

View File

@ -48,9 +48,4 @@ jobs:
REQUIRED_CONTEXTS: >-
CI / all-required (pull_request),
sop-checklist / all-items-acked (pull_request)
# Push-side required contexts. Checking CI / all-required (push)
# explicitly instead of the combined state avoids false-pause when
# non-blocking jobs (continue-on-error: true) have failed — those
# failures pollute combined state but do not gate merges.
PUSH_REQUIRED_CONTEXTS: CI / all-required (push)
run: python3 .gitea/scripts/gitea-merge-queue.py

View File

@ -86,33 +86,22 @@ jobs:
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# A full-history checkout can exceed the runner's quiet/startup
# window before the path filter emits logs. Fetch the common push
# case cheaply; the script below fetches the exact BASE SHA if it is
# not present in the shallow checkout.
fetch-depth: 2
fetch-depth: 0
- id: filter
# Inline replacement for dorny/paths-filter — see e2e-api.yml.
run: |
# Gitea Actions evaluates github.event.before to empty string in shell
# scripts. Use GITHUB_EVENT_BEFORE shell env var instead (Gitea
# correctly populates it for push events). PR case uses template var.
BASE=""
BASE="${GITHUB_BASE_REF:-${{ github.event.before }}}"
if [ "${{ github.event_name }}" = "pull_request" ] && [ -n "${{ github.event.pull_request.base.sha }}" ]; then
BASE="${{ github.event.pull_request.base.sha }}"
elif [ -n "$GITHUB_EVENT_BEFORE" ]; then
BASE="$GITHUB_EVENT_BEFORE"
fi
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$'; then
echo "handlers=true" >> "$GITHUB_OUTPUT"
exit 0
fi
# timeout 30 guards against the case where BASE points to a ref that
# git can resolve but cat-file hangs (rare on corrupted objects).
if ! timeout 30 git cat-file -e "$BASE" 2>/dev/null; then
if ! git cat-file -e "$BASE" 2>/dev/null; then
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
fi
if ! timeout 30 git cat-file -e "$BASE" 2>/dev/null; then
if ! git cat-file -e "$BASE" 2>/dev/null; then
echo "handlers=true" >> "$GITHUB_OUTPUT"
exit 0
fi

View File

@ -93,7 +93,7 @@ jobs:
lint:
name: lint-continue-on-error-tracking
runs-on: ubuntu-latest
timeout-minutes: 20
timeout-minutes: 10
# Phase 3 (RFC #219 §1): surface masked defects without blocking
# PRs. Pre-existing continue-on-error: true directives on main
# all violate this lint at first — intentional. Flip to false

View File

@ -37,6 +37,12 @@ name: publish-workspace-server-image
on:
push:
branches: [main]
paths:
- 'workspace-server/**'
- 'canvas/**'
- 'manifest.json'
- 'scripts/**'
- '.gitea/workflows/publish-workspace-server-image.yml'
workflow_dispatch:
# No `concurrency:` block here. Gitea 1.22.6 can cancel queued runs despite
@ -68,14 +74,12 @@ jobs:
set -euo pipefail
echo "::group::Docker daemon health check"
echo "Runner: ${HOSTNAME:-unknown}"
docker_info="$(docker info 2>&1)" || {
docker info 2>&1 | head -5 || {
echo "::error::Docker daemon is not accessible at /var/run/docker.sock"
echo "::error::Runner: ${HOSTNAME:-unknown}"
printf '%s\n' "${docker_info}"
echo "::error::Check: (1) daemon is running, (2) runner user is in docker group, (3) sock permissions are 660+"
exit 1
}
printf '%s\n' "${docker_info}" | sed -n '1,5p'
echo "Docker daemon OK"
echo "::endgroup::"

View File

@ -9,10 +9,10 @@
# Triggers on:
# - `pull_request_target`: opened, synchronize, reopened
# → initial status posts when PR opens / re-pushes
# - comment refires are handled by `review-refire-comments.yml`
# → a single issue_comment dispatcher prevents every SOP/review
# comment from enqueueing separate qa/security/tier jobs on
# Gitea 1.22.6 before job-level `if:` can skip them.
# - `issue_comment`: /qa-recheck slash-command on the PR
# → manual re-fire after a QA reviewer clicks APPROVE
# (Gitea 1.22.6 doesn't re-fire on pull_request_review, per
# go-gitea/gitea#33700 + feedback_pull_request_review_no_refire)
# Workflow name = `qa-review` ; job name = `approved`.
# The job's own pass/fail conclusion publishes the status context
# `qa-review / approved (<event>)` — NO `POST /statuses` call → NO
@ -85,6 +85,8 @@ name: qa-review
on:
pull_request_target:
types: [opened, synchronize, reopened]
issue_comment:
types: [created]
permissions:
contents: read
@ -95,10 +97,16 @@ jobs:
approved:
# Gate the job:
# - On pull_request_target events: always run.
# Comment-triggered refires live in review-refire-comments.yml. Keeping
# this workflow PR-only avoids comment-triggered queue storms.
# - On issue_comment events: only when it's a PR comment and the body
# contains the slash-command. NO privilege gate at the step level
# (RFC#324 v1.3 §A1.1): a non-collaborator's /qa-recheck is fine
# because the eval is read-only and idempotent — re-running it
# just re-confirms whether a real team-member APPROVE exists.
if: |
github.event_name == 'pull_request_target'
github.event_name == 'pull_request_target' ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request != null &&
startsWith(github.event.comment.body, '/qa-recheck'))
runs-on: ubuntu-latest
steps:
- name: Privilege check (A1.1 — INFORMATIONAL log only, NOT a gate)

View File

@ -9,17 +9,19 @@ name: redeploy-tenants-on-main
# - Workflow-level env.GITHUB_SERVER_URL pinned per
# feedback_act_runner_github_server_url.
# - `continue-on-error: true` on each job (RFC §1 contract).
# - Dropped unsupported `workflow_run` (task #81).
# - Later changed to manual-only after publish-workspace-server-image.yml
# gained an integrated ordered production deploy job.
# - ~~**Gitea workflow_run trigger limitation**~~ FIXED: replaced with
# push+paths filter per this PR. Gitea 1.22.6 does not support
# `workflow_run` (task #81). The push trigger fires on every
# commit to publish-workspace-server-image.yml which is the
# same signal (only successful runs commit to main).
#
# Manual production tenant redeploy/rollback helper.
# Auto-refresh prod tenant EC2s after every main merge.
#
# Why this workflow is manual-only: publish-workspace-server-image now owns
# the ordered build -> push -> production auto-deploy sequence in one workflow.
# A separate push-triggered redeploy workflow races before the new ECR image
# exists and can paint main red with a false deployment failure.
# Why this workflow exists: publish-workspace-server-image builds and
# pushes a new platform-tenant :<sha> to ECR on every merge to main,
# but running tenants pulled their image once at boot and never re-pull.
# Users see stale code indefinitely.
#
# This workflow closes the gap by calling the control-plane admin
# endpoint that performs a canary-first, batched, health-gated rolling
@ -32,11 +34,16 @@ name: redeploy-tenants-on-main
# Gitea suspension migration. The staging-verify.yml promote step now
# uses the same redeploy-fleet endpoint (fixes the silent-GHCR gap).
#
# Runtime ordering for automatic deploys now lives in
# publish-workspace-server-image.yml:
# 1. build-and-push creates new :staging-<sha> images in ECR.
# 2. deploy-production waits for required push contexts on that SHA.
# 3. deploy-production calls redeploy-fleet canary-first.
# Runtime ordering:
# 1. publish-workspace-server-image completes → new :staging-<sha> in ECR.
# 2. The merge that updates publish-workspace-server-image.yml triggers
# this push/path-filtered workflow, which calls redeploy-fleet with
# target_tag=staging-<sha>. No CDN propagation wait needed — ECR image
# manifest is consistent immediately after push.
# 3. Calls redeploy-fleet with canary_slug (if set) and a soak
# period. Canary proves the image boots; batches follow.
# 4. Any failure aborts the rollout and leaves older tenants on the
# prior image — safer default than half-and-half state.
#
# Rollback path: set PROD_MANUAL_REDEPLOY_TARGET_TAG as a repo/org
# variable or secret, run workflow_dispatch, then unset it after the
@ -44,14 +51,21 @@ name: redeploy-tenants-on-main
# re-pulling the pinned image on every tenant.
on:
push:
branches: [main]
paths:
- '.gitea/workflows/publish-workspace-server-image.yml'
workflow_dispatch:
permissions:
contents: read
# No write scopes needed — the workflow hits an external CP endpoint,
# not the GitHub API.
# Serialize manual redeploys so two operator-triggered rollbacks do not
# overlap and cause confusing per-tenant SSM state.
# Serialize redeploys so two rapid main pushes' redeploys don't overlap
# and cause confusing per-tenant SSM state. Without this, GitHub's
# implicit workflow_run queueing would *probably* serialize them, but
# the explicit block makes the invariant defensible. Mirrors the
# concurrency block on redeploy-tenants-on-staging.yml for shape parity.
#
# NOTE: cancel-in-progress: false removed (Rule 7 fix). Gitea 1.22.6
# cancels queued runs regardless of this setting, so it provides no
@ -67,15 +81,18 @@ env:
jobs:
# bp-exempt: production redeploy is a side-effect workflow, not a merge gate.
redeploy:
if: ${{ github.event_name == 'workflow_dispatch' }}
# Gitea 1.22.6 does not support workflow_run. This workflow is now
# controlled by push/path triggers plus an explicit kill switch.
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 25
env:
# Rule 9 fix: keep the same operational kill switch surface as the
# integrated auto-deploy workflow.
# Rule 9 fix: operational kill switch for auto-triggered deployments.
# Set repo variable or secret PROD_AUTO_DEPLOY_DISABLED=true to prevent
# this workflow from redeploying. Manual workflow_dispatch bypasses this.
PROD_AUTO_DEPLOY_DISABLED: ${{ vars.PROD_AUTO_DEPLOY_DISABLED || secrets.PROD_AUTO_DEPLOY_DISABLED || '' }}
steps:
- name: Kill-switch guard
@ -97,8 +114,13 @@ jobs:
# tag) → used verbatim. Lets ops pin `latest` for emergency
# rollback to last canary-verified digest, or pin a specific
# `staging-<sha>` to roll back to a known-good build.
# 2. Default → `staging-<short_head_sha>` for manual reruns from
# the current default-branch SHA.
# 2. Default → `staging-<short_head_sha>`. The just-published
# digest. Bypasses the `:latest` retag path that's currently
# dead (staging-verify soft-skips without canary fleet, so
# the only thing retagging `:latest` today is the manual
# promote-latest.yml — last run 2026-04-28). Auto-trigger
# from the main push uses github.sha; manual
# dispatch with no variable falls through to github.sha.
env:
PROD_MANUAL_REDEPLOY_TARGET_TAG: ${{ vars.PROD_MANUAL_REDEPLOY_TARGET_TAG || secrets.PROD_MANUAL_REDEPLOY_TARGET_TAG || '' }}
HEAD_SHA: ${{ github.sha }}
@ -252,11 +274,13 @@ jobs:
# fail the workflow, which is what `ok=true` should have
# guaranteed all along.
#
# When the redeploy is triggered manually with a specific tag
# (target_tag != "latest"), the expected SHA may not equal
# ${{ github.sha }}.
# When the redeploy was triggered by workflow_dispatch with a
# specific tag (target_tag != "latest"), the expected SHA may
# not equal ${{ github.sha }} — in that case we resolve via
# GHCR's manifest. For workflow_run (default :latest) the
# workflow_run.head_sha is the SHA that just published.
env:
EXPECTED_SHA: ${{ github.sha }}
EXPECTED_SHA: ${{ github.event.workflow_run.head_sha || github.sha }}
TARGET_TAG: ${{ steps.tag.outputs.target_tag }}
# Tenant subdomain template — slugs from the response are
# appended. Production CP issues `<slug>.moleculesai.app`;

View File

@ -1,113 +0,0 @@
# Consolidated comment dispatcher for manual review/tier refires.
#
# Gitea 1.22 queues one run per workflow subscribed to `issue_comment` before
# evaluating job-level `if:`. SOP-heavy PRs therefore created queue storms when
# qa-review, security-review, sop-checklist, and sop-tier-refire all
# listened to comments. This workflow is the single non-SOP comment subscriber:
# ordinary comments no-op quickly; slash commands post the required status
# contexts to the PR head SHA.
name: review-refire-comments
on:
issue_comment:
types: [created]
permissions:
contents: read
pull-requests: read
statuses: write
concurrency:
group: ${{ github.repository }}-${{ github.workflow }}-${{ github.event.issue.number || github.ref }}
cancel-in-progress: true
jobs:
dispatch:
runs-on: ubuntu-latest
steps:
- name: Classify comment
id: classify
env:
COMMENT_BODY: ${{ github.event.comment.body }}
IS_PR: ${{ github.event.issue.pull_request != null }}
run: |
set -euo pipefail
{
echo "run_qa=false"
echo "run_security=false"
echo "run_tier=false"
} >> "$GITHUB_OUTPUT"
if [ "$IS_PR" != "true" ]; then
echo "::notice::not a PR comment; no-op"
exit 0
fi
first_line=$(printf '%s\n' "$COMMENT_BODY" | sed -n '1p')
case "$first_line" in
/qa-recheck*)
echo "run_qa=true" >> "$GITHUB_OUTPUT"
;;
/security-recheck*)
echo "run_security=true" >> "$GITHUB_OUTPUT"
;;
/refire-tier-check*)
echo "run_tier=true" >> "$GITHUB_OUTPUT"
;;
*)
echo "::notice::no supported review refire slash command; no-op"
;;
esac
- name: Check out BASE ref for trusted scripts
if: |
steps.classify.outputs.run_qa == 'true' ||
steps.classify.outputs.run_security == 'true' ||
steps.classify.outputs.run_tier == 'true'
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event.repository.default_branch }}
- name: Refire qa-review status
if: steps.classify.outputs.run_qa == 'true'
env:
GITEA_TOKEN: ${{ secrets.RFC_324_TEAM_READ_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.issue.number }}
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
TEAM: qa
TEAM_ID: '20'
REVIEW_CHECK_DEBUG: '0'
REVIEW_CHECK_STRICT: '0'
COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
run: |
set -euo pipefail
.gitea/scripts/review-refire-status.sh
- name: Refire security-review status
if: steps.classify.outputs.run_security == 'true'
env:
GITEA_TOKEN: ${{ secrets.RFC_324_TEAM_READ_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.issue.number }}
DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
TEAM: security
TEAM_ID: '21'
REVIEW_CHECK_DEBUG: '0'
REVIEW_CHECK_STRICT: '0'
COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
run: |
set -euo pipefail
.gitea/scripts/review-refire-status.sh
- name: Refire sop-tier-check status
if: steps.classify.outputs.run_tier == 'true'
env:
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.issue.number }}
COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
SOP_DEBUG: '0'
run: bash .gitea/scripts/sop-tier-refire.sh

View File

@ -66,28 +66,19 @@ jobs:
# PR#372's ci.yml port used. Diffs against the PR base or the
# previous push SHA, then matches against the wheel-relevant
# path set.
#
# NOTE: Gitea Actions does not expose github.event.before as a
# shell environment variable. The ${{ github.event.before }} template
# expression works inside YAML run: blocks but is evaluated to an
# empty string for push events, making the ${VAR:-fallback} always
# use the fallback. Use GITHUB_EVENT_BEFORE instead — it IS set in
# the runner's shell environment for push events.
BASE=""
if [ "${{ github.event_name }}" = "pull_request" ]; then
BASE="${GITHUB_BASE_REF:-${{ github.event.before }}}"
if [ "${{ github.event_name }}" = "pull_request" ] && [ -n "${{ github.event.pull_request.base.sha }}" ]; then
BASE="${{ github.event.pull_request.base.sha }}"
elif [ -n "$GITHUB_EVENT_BEFORE" ]; then
BASE="$GITHUB_EVENT_BEFORE"
fi
if [ -z "$BASE" ] || echo "$BASE" | grep -qE '^0+$'; then
# New branch or no previous SHA: treat as wheel-relevant.
echo "wheel=true" >> "$GITHUB_OUTPUT"
exit 0
fi
if ! timeout 30 git cat-file -e "$BASE" 2>/dev/null; then
if ! git cat-file -e "$BASE" 2>/dev/null; then
git fetch --depth=1 origin "$BASE" 2>/dev/null || true
fi
if ! timeout 30 git cat-file -e "$BASE" 2>/dev/null; then
if ! git cat-file -e "$BASE" 2>/dev/null; then
echo "wheel=true" >> "$GITHUB_OUTPUT"
exit 0
fi

View File

@ -12,6 +12,8 @@ name: security-review
on:
pull_request_target:
types: [opened, synchronize, reopened]
issue_comment:
types: [created]
permissions:
contents: read
@ -20,10 +22,13 @@ permissions:
jobs:
# bp-exempt: PR security review bot signal; required merge state is enforced by CI / all-required.
approved:
# Comment-triggered refires live in review-refire-comments.yml. Keeping
# this workflow PR-only avoids comment-triggered queue storms.
# See qa-review.yml header for full A1-α / A1.1 (v1.3 — informational
# log only, NOT a gate) / A4 / A5 design rationale.
if: |
github.event_name == 'pull_request_target'
github.event_name == 'pull_request_target' ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request != null &&
startsWith(github.event.comment.body, '/security-recheck'))
runs-on: ubuntu-latest
steps:
- name: Privilege check (A1.1 — INFORMATIONAL log only, NOT a gate)

View File

@ -1,4 +1,4 @@
# sop-checklist — peer-ack merge gate for SOP-checklist items.
# sop-checklist-gate — peer-ack merge gate for SOP-checklist items.
#
# RFC#351 Step 2 of 6 (implementation MVP).
#
@ -65,15 +65,7 @@
# membership, compute, post status). Re-running on any event is safe —
# the new status overwrites the previous one for the same context.
name: sop-checklist
# Cancel any in-progress runs for the same PR to prevent
# stale runs from overwriting newer status contexts.
concurrency:
group: ${{ github.repository }}-${{ github.workflow }}-${{ github.event.pull_request.number || github.event.issue.number || github.ref }}
cancel-in-progress: true
# bp-required: yes ← emits sop-checklist / all-items-acked (pull_request)
name: sop-checklist-gate
on:
pull_request_target:
@ -91,7 +83,7 @@ permissions:
statuses: write
jobs:
all-items-acked:
gate:
# Run on pull_request_target events always. On issue_comment events,
# only when the comment is on a PR (issue_comment fires for issues
# too) and the body contains one of the slash-commands.
@ -114,7 +106,7 @@ jobs:
# qa-review.yml so the script source is always trusted.
ref: ${{ github.event.repository.default_branch }}
- name: Run sop-checklist
- name: Run sop-checklist-gate
env:
GITEA_TOKEN: ${{ secrets.SOP_CHECKLIST_GATE_TOKEN || secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }}
@ -122,7 +114,7 @@ jobs:
REPO_NAME: ${{ github.event.repository.name }}
run: |
set -euo pipefail
python3 .gitea/scripts/sop-checklist.py \
python3 .gitea/scripts/sop-checklist-gate.py \
--owner "$OWNER" \
--repo "$REPO_NAME" \
--pr "$PR_NUMBER" \

View File

@ -61,10 +61,6 @@ on:
pull_request_review:
types: [submitted, dismissed, edited]
concurrency:
group: ${{ github.repository }}-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
tier-check:
runs-on: ubuntu-latest

View File

@ -1,4 +1,4 @@
# sop-tier-refire — manual fallback for sop-tier-check refire.
# sop-tier-refire — issue_comment-triggered refire of sop-tier-check.
#
# Closes internal#292. Gitea 1.22.6 doesn't refire workflows on the
# `pull_request_review` event (go-gitea/gitea#33700); the `sop-tier-check`
@ -8,12 +8,12 @@
# to merge is the admin force-merge path (audited via `audit-force-merge`
# but the audit trail keeps growing; see `feedback_never_admin_merge_bypass`).
#
# Comment-triggered refires now live in `review-refire-comments.yml`. Gitea
# queues issue_comment workflows before evaluating job-level `if:`, so having
# qa-review, security-review, sop-checklist, and sop-tier-refire all subscribe
# to every comment caused queue storms on SOP-heavy PRs. This workflow is a
# non-automatic breadcrumb only; Gitea 1.22.6 does not support
# workflow_dispatch inputs, so real refires must use `/refire-tier-check`.
# Workaround pattern from `feedback_pull_request_review_no_refire`:
# `issue_comment` events DO fire reliably on 1.22.6. When a repo
# MEMBER/OWNER/COLLABORATOR comments `/refire-tier-check` on a PR, this
# workflow re-runs the sop-tier-check logic and POSTs the resulting
# status to the PR head SHA directly. No empty commit, no git history
# bloat, no cascade re-fire of every other workflow on the PR.
#
# SECURITY MODEL:
#
@ -37,16 +37,43 @@
# Rate-limit: a 1s pre-sleep + a "skip if status posted in last 30s"
# guard prevents comment-spam from thrashing the status. See the script.
name: sop-tier-check refire (manual)
name: sop-tier-check refire (issue_comment)
on:
workflow_dispatch:
issue_comment:
types: [created]
jobs:
refire:
# Three gates, all required:
# - comment is on a PR (not a plain issue)
# - commenter is MEMBER, OWNER, or COLLABORATOR
# - comment body contains the slash-command trigger
if: |
github.event.issue.pull_request != null &&
contains(fromJson('["MEMBER","OWNER","COLLABORATOR"]'), github.event.comment.author_association) &&
contains(github.event.comment.body, '/refire-tier-check')
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
statuses: write
steps:
- name: Explain supported refire path
run: |
echo "::error::Gitea 1.22.6 does not support workflow_dispatch inputs here; comment /refire-tier-check on the PR instead."
exit 1
- name: Check out base branch (for the script)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# Load the script from the default branch (main), matching the
# sop-tier-check.yml security model.
ref: ${{ github.event.repository.default_branch }}
- name: Re-evaluate sop-tier-check and POST status
env:
# Same org-level secret sop-tier-check.yml + audit-force-merge.yml use.
# Fallback to GITHUB_TOKEN with a clear error if missing.
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.issue.number }}
COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
# Set to '1' for diagnostic per-API-call output. Off by default.
SOP_DEBUG: '0'
run: bash .gitea/scripts/sop-tier-refire.sh

View File

@ -1 +1 @@
staging trigger 2026-05-14T17:35:02Z
staging trigger

View File

@ -1 +0,0 @@
trigger

View File

@ -8,17 +8,11 @@ import type { AuditEntry, AuditResponse } from "@/types/audit";
type EventFilter = "all" | AuditEntry["event_type"];
// Contrast note: text is rendered on near-black bg (bg-*-950/40). Every text
// color below is chosen to pass WCAG 2.1 AA 4.5:1 on that background:
// blue-300 ( delegation ) ≈ 8.8:1
// violet-300 ( decision ) ≈ 9.5:1
// yellow-200 ( gate ) ≈ 11.5:1
// orange-300 ( hitl ) ≈ 9.1:1
const BADGE_COLORS: Record<AuditEntry["event_type"], { text: string; bg: string; border: string }> = {
delegation: { text: "text-blue-300", bg: "bg-blue-950/40", border: "border-blue-800/40" },
decision: { text: "text-violet-300", bg: "bg-violet-950/40", border: "border-violet-800/40" },
gate: { text: "text-yellow-200", bg: "bg-yellow-950/40", border: "border-yellow-800/40" },
hitl: { text: "text-orange-300", bg: "bg-orange-950/40", border: "border-orange-800/40" },
delegation: { text: "text-accent", bg: "bg-blue-950/40", border: "border-blue-800/40" },
decision: { text: "text-violet-400", bg: "bg-violet-950/40", border: "border-violet-800/40" },
gate: { text: "text-yellow-400", bg: "bg-yellow-950/40", border: "border-yellow-800/40" },
hitl: { text: "text-orange-400", bg: "bg-orange-950/40", border: "border-orange-800/40" },
};
const FILTERS: { id: EventFilter; label: string }[] = [
@ -251,6 +245,7 @@ export function AuditEntryRow({ entry, now }: AuditEntryRowProps) {
{/* Event-type badge */}
<span
className={`shrink-0 text-[9px] font-semibold uppercase tracking-wider px-1.5 py-0.5 rounded border ${badge.text} ${badge.bg} ${badge.border}`}
aria-label={`Event type: ${entry.event_type}`}
>
{entry.event_type}
</span>

View File

@ -100,8 +100,8 @@ export function BatchActionBar() {
aria-label="Batch workspace actions"
className="fixed bottom-6 left-1/2 -translate-x-1/2 z-[200] flex items-center gap-3 px-4 py-2.5 rounded-2xl bg-surface-sunken/95 border border-line/70 shadow-2xl shadow-black/50 backdrop-blur-md"
>
{/* Selection count badge — bg-zinc-700 passes 7.2:1 on white text */}
<span className="text-[12px] font-semibold text-white bg-zinc-700 px-2.5 py-0.5 rounded-full tabular-nums">
{/* Selection count badge */}
<span className="text-[12px] font-semibold text-white bg-accent-strong/80 px-2.5 py-0.5 rounded-full tabular-nums">
{count} selected
</span>
@ -112,7 +112,7 @@ export function BatchActionBar() {
type="button"
disabled={busy}
onClick={() => setPending("restart")}
className="flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-[12px] font-medium text-white bg-sky-900/30 hover:bg-sky-800/50 border border-sky-700/30 hover:border-sky-600/50 transition-colors disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-sky-500/70"
className="flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-[12px] font-medium text-sky-300 bg-sky-900/30 hover:bg-sky-800/50 border border-sky-700/30 hover:border-sky-600/50 transition-colors disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-sky-500/70"
>
<span aria-hidden="true"></span>
Restart All
@ -122,7 +122,7 @@ export function BatchActionBar() {
type="button"
disabled={busy}
onClick={() => setPending("pause")}
className="flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-[12px] font-medium text-white bg-amber-900/30 hover:bg-amber-800/50 border border-amber-700/30 hover:border-amber-600/50 transition-colors disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-amber-500/70"
className="flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-[12px] font-medium text-warm bg-amber-900/30 hover:bg-amber-800/50 border border-amber-700/30 hover:border-amber-600/50 transition-colors disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-amber-500/70"
>
<span aria-hidden="true"></span>
Pause All
@ -132,7 +132,7 @@ export function BatchActionBar() {
type="button"
disabled={busy}
onClick={() => setPending("delete")}
className="flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-[12px] font-medium text-white bg-red-900/30 hover:bg-red-800/50 border border-red-700/30 hover:border-red-600/50 transition-colors disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-red-500/70"
className="flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-[12px] font-medium text-bad bg-red-900/30 hover:bg-red-800/50 border border-red-700/30 hover:border-red-600/50 transition-colors disabled:opacity-50 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-red-500/70"
>
<span aria-hidden="true"></span>
Delete All

View File

@ -318,7 +318,7 @@ export function ContextMenu() {
aria-hidden="true"
className={`w-1.5 h-1.5 rounded-full ${statusDotClass(contextMenu.nodeData.status)}`}
/>
<span className="text-[10px] text-ink">{contextMenu.nodeData.status}</span>
<span className="text-[10px] text-ink-mid">{contextMenu.nodeData.status}</span>
</div>
</div>

View File

@ -187,7 +187,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
isError
? "bg-red-950/50 text-bad"
: isSend
? "bg-cyan-950 text-cyan-300"
? "bg-cyan-950/50 text-cyan-400"
: isReceive
? "bg-blue-950/50 text-accent"
: "bg-surface-card text-ink-mid"
@ -251,7 +251,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
{/* Error */}
{isError && entry.error_detail && (
<div className="text-[10px] text-bad mt-1 truncate">
<div className="text-[10px] text-bad/80 mt-1 truncate">
{entry.error_detail.slice(0, 200)}
</div>
)}
@ -272,7 +272,7 @@ export function ConversationTraceModal({ open, workspaceId: _workspaceId, onClos
)}
{responseText && (
<div className="mt-1 bg-surface/60 border border-emerald-900/30 rounded-lg px-3 py-2 max-h-32 overflow-y-auto">
<div className="text-[8px] text-good uppercase mb-1">Response</div>
<div className="text-[8px] text-good/60 uppercase mb-1">Response</div>
<div className="text-[10px] text-ink-mid whitespace-pre-wrap break-words leading-relaxed">
{responseText.slice(0, 2000)}
{responseText.length > 2000 && (

View File

@ -126,8 +126,8 @@ export function DeleteCascadeConfirmDialog({
{/* Cascade warning */}
<div className="rounded border border-red-900/40 bg-red-950/20 px-3 py-2.5 mb-4">
<p className="text-[12px] text-red-300 leading-relaxed">
Deleting will cascade <strong className="text-red-100">all child workspaces and their data will be permanently removed.</strong> This cannot be undone.
<p className="text-[12px] text-bad/80 leading-relaxed">
Deleting will cascade <strong className="text-red-200">all child workspaces and their data will be permanently removed.</strong> This cannot be undone.
</p>
</div>
@ -170,7 +170,7 @@ export function DeleteCascadeConfirmDialog({
className={`px-3.5 py-1.5 text-[13px] rounded-lg transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-red-500/60 focus-visible:ring-offset-2 focus-visible:ring-offset-surface-sunken
${checked
? "bg-red-700 hover:bg-red-600 text-white cursor-pointer"
: "bg-red-900/30 text-red-400 cursor-not-allowed"
: "bg-red-900/30 text-bad/40 cursor-not-allowed"
}`}
>
Delete All

View File

@ -76,7 +76,7 @@ export class ErrorBoundary extends React.Component<
<p className="text-sm text-ink-mid mb-1">
An unexpected error occurred while rendering the application.
</p>
<p className="text-xs text-bad mb-6 font-mono break-all">
<p className="text-xs text-bad/80 mb-6 font-mono break-all">
{this.state.error?.message ?? "Unknown error"}
</p>
<div className="flex items-center justify-center gap-3">

View File

@ -360,7 +360,7 @@ function SnippetBlock({
<button
type="button"
onClick={onCopy}
className="text-xs px-2 py-1 rounded bg-accent text-white hover:bg-accent-strong transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1"
className="text-xs px-2 py-1 rounded bg-accent-strong/80 hover:bg-accent text-white focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1"
>
{copied ? "Copied!" : "Copy"}
</button>

View File

@ -451,7 +451,7 @@ function ProviderPickerModal({
<button
onClick={() => handleSaveKey(index)}
disabled={!entry.value.trim() || entry.saving}
className="px-3 py-1.5 bg-accent-strong hover:bg-accent text-[11px] rounded text-white disabled:opacity-30 transition-colors shrink-0 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1"
className="px-3 py-1.5 bg-accent-strong hover:bg-accent text-[11px] rounded text-white disabled:opacity-30 transition-colors shrink-0"
>
{entry.saving ? "..." : "Save"}
</button>
@ -492,7 +492,7 @@ function ProviderPickerModal({
!selectorValue.providerId ||
(showModelInput && model.trim() === "")
}
className="px-3.5 py-1.5 text-[12px] bg-accent-strong hover:bg-accent text-white rounded-lg transition-colors disabled:opacity-40 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1"
className="px-3.5 py-1.5 text-[12px] bg-accent-strong hover:bg-accent text-white rounded-lg transition-colors disabled:opacity-40"
>
{allSaved ? "Deploy" : entries.length > 1 ? "Add Keys" : "Add Key"}
</button>

View File

@ -420,7 +420,7 @@ export function ProviderModelSelector({
spellCheck={false}
autoComplete="off"
data-testid="model-input"
className="w-full bg-surface-sunken border border-line rounded px-2 py-1.5 text-[11px] text-ink font-mono focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1 focus-visible:border-accent transition-colors disabled:opacity-50"
className="w-full bg-surface-sunken border border-line rounded px-2 py-1.5 text-[11px] text-ink font-mono focus:outline-none focus:border-accent focus:ring-1 focus:ring-accent/20 transition-colors disabled:opacity-50"
/>
<p className="text-[9px] text-ink-mid mt-1 leading-relaxed">
{selected?.wildcard

View File

@ -61,22 +61,9 @@ export function ThemeToggle({ className = "" }: { className?: string }) {
return;
}
setTheme(OPTIONS[next].value);
// Move focus to the new button so arrow-key navigation is continuous.
// Use direct-child query to scope strictly to this radiogroup's buttons
// and avoid accidentally focusing unrelated [role=radio] elements
// elsewhere in the DOM (e.g. React Flow canvas nodes).
// Guard: skip focus if the current target is no longer in the document
// (e.g. React StrictMode double-invokes handlers during re-render).
if (!e.currentTarget.isConnected) return;
const radiogroup = e.currentTarget.closest("[role=radiogroup]") as HTMLElement | null;
if (!radiogroup) return;
// Use children[] instead of querySelectorAll("> [role=radio]") to avoid
// jsdom's child-combinator selector parsing issues in test environments.
const btns = Array.from(radiogroup.children).filter(
(el): el is HTMLButtonElement =>
el.tagName === "BUTTON" && el.getAttribute("role") === "radio"
);
if (next < btns.length) btns[next]?.focus();
// Move focus to the new button so arrow-key navigation is continuous
const btns = (e.currentTarget.closest("[role=radiogroup]") as HTMLElement)?.querySelectorAll<HTMLButtonElement>("[role=radio]");
btns?.[next]?.focus();
},
[]
);

View File

@ -24,12 +24,8 @@ vi.mock("@/lib/theme-provider", () => ({
})),
}));
// Wrap cleanup in act() so any pending React state updates (e.g. from
// keyDown handlers that call setTheme) flush before DOM unmount. Without
// this, cleanup() can race against pending renders and cause INDEX_SIZE_ERR
// when the handleKeyDown callback tries to query the DOM mid-teardown.
afterEach(() => {
act(() => { cleanup(); });
cleanup();
vi.clearAllMocks();
});
@ -150,7 +146,7 @@ describe("ThemeToggle — keyboard navigation (WCAG 2.1.1 / ARIA radiogroup)", (
const radios = screen.getAllByRole("radio");
// dark (index 2) is current; ArrowRight should wrap to light (index 0)
act(() => { radios[2].focus(); });
act(() => { fireEvent.keyDown(radios[2], { key: "ArrowRight" }); });
fireEvent.keyDown(radios[2], { key: "ArrowRight" });
expect(mockSetTheme).toHaveBeenCalledWith("light");
});
@ -164,7 +160,7 @@ describe("ThemeToggle — keyboard navigation (WCAG 2.1.1 / ARIA radiogroup)", (
const radios = screen.getAllByRole("radio");
// light (index 0) is current; ArrowLeft should go to dark (index 2)
act(() => { radios[0].focus(); });
act(() => { fireEvent.keyDown(radios[0], { key: "ArrowLeft" }); });
fireEvent.keyDown(radios[0], { key: "ArrowLeft" });
expect(mockSetTheme).toHaveBeenCalledWith("dark");
});
@ -178,7 +174,7 @@ describe("ThemeToggle — keyboard navigation (WCAG 2.1.1 / ARIA radiogroup)", (
const radios = screen.getAllByRole("radio");
// light (index 0) is current; ArrowDown should go to system (index 1)
act(() => { radios[0].focus(); });
act(() => { fireEvent.keyDown(radios[0], { key: "ArrowDown" }); });
fireEvent.keyDown(radios[0], { key: "ArrowDown" });
expect(mockSetTheme).toHaveBeenCalledWith("system");
});
@ -191,7 +187,7 @@ describe("ThemeToggle — keyboard navigation (WCAG 2.1.1 / ARIA radiogroup)", (
render(<ThemeToggle />);
const radios = screen.getAllByRole("radio");
act(() => { radios[2].focus(); });
act(() => { fireEvent.keyDown(radios[2], { key: "Home" }); });
fireEvent.keyDown(radios[2], { key: "Home" });
expect(mockSetTheme).toHaveBeenCalledWith("light");
});
@ -204,14 +200,14 @@ describe("ThemeToggle — keyboard navigation (WCAG 2.1.1 / ARIA radiogroup)", (
render(<ThemeToggle />);
const radios = screen.getAllByRole("radio");
act(() => { radios[0].focus(); });
act(() => { fireEvent.keyDown(radios[0], { key: "End" }); });
fireEvent.keyDown(radios[0], { key: "End" });
expect(mockSetTheme).toHaveBeenCalledWith("dark");
});
it("does nothing on unrelated keys", () => {
render(<ThemeToggle />);
const radios = screen.getAllByRole("radio");
act(() => { fireEvent.keyDown(radios[0], { key: "Enter" }); });
fireEvent.keyDown(radios[0], { key: "Enter" });
expect(mockSetTheme).not.toHaveBeenCalled();
});
});

View File

@ -64,7 +64,6 @@ export function DropTargetBadge() {
{ghostVisible && (
<div
data-testid="ghost-slot"
aria-hidden="true"
className="pointer-events-none absolute z-40 rounded-lg border-2 border-dashed border-emerald-400/70 bg-emerald-500/10"
style={{
left: slotTL.x,
@ -76,8 +75,6 @@ export function DropTargetBadge() {
)}
<div
data-testid="drop-badge"
role="status"
aria-label={`Drop target: ${targetName}`}
className="pointer-events-none absolute z-50 -translate-x-1/2 -translate-y-full rounded-md bg-emerald-700 px-2 py-0.5 text-[11px] font-medium text-white shadow-lg shadow-emerald-950/40"
style={{ left: badge.x, top: badge.y - 6 }}
>

View File

@ -1,311 +0,0 @@
/**
* Unit tests for buildDeployMap the pure tree-traversal core of
* useOrgDeployState.
*
* What is tested here:
* - Root / leaf identification via parent-chain walk
* - isDeployingRoot: true when any descendant is "provisioning"
* - isActivelyProvisioning: true only for the node itself in that state
* - isLockedChild: true for non-root nodes in a deploying tree
* - isLockedChild: also true for nodes in deletingIds (even if not deploying)
* - descendantProvisioningCount: non-zero only on root nodes
* - Performance contract: O(n) single-pass walk tested by verifying
* correctness across 50-node trees (n=50, all cases above)
*
* What is NOT tested here (hook integration appropriate for E2E):
* - The useMemo / Zustand subscription wiring
* - React Flow integration (flowToScreenPosition, getInternalNode)
*
* Issue: #2071 (Canvas test gaps follow-up).
*/
import { describe, expect, it } from "vitest";
import { buildDeployMap, type OrgDeployState } from "../useOrgDeployState";
// ── Helpers ──────────────────────────────────────────────────────────────────
type Projection = { id: string; parentId: string | null; status: string };
function proj(
id: string,
parentId: string | null,
status: string,
): Projection {
return { id, parentId, status };
}
/** Unchecked cast — test helpers aren't production code paths. */
function m(
ps: Projection[],
deletingIds: string[] = [],
): Map<string, OrgDeployState> {
return buildDeployMap(ps, new Set(deletingIds));
}
function s(
map: Map<string, OrgDeployState>,
id: string,
): OrgDeployState {
const got = map.get(id);
if (!got) throw new Error(`no entry for id=${id}`);
return got;
}
// ── Empty / trivial ───────────────────────────────────────────────────────────
describe("buildDeployMap — empty", () => {
it("returns empty map for empty projections", () => {
expect(m([]).size).toBe(0);
});
});
// ── Single node ─────────────────────────────────────────────────────────────
describe("buildDeployMap — single node", () => {
it("isolated node is its own root and not deploying", () => {
const map = m([proj("a", null, "online")]);
expect(s(map, "a")).toEqual({
isActivelyProvisioning: false,
isDeployingRoot: false,
isLockedChild: false,
descendantProvisioningCount: 0,
});
});
it("isolated provisioning node is deploying root", () => {
const map = m([proj("a", null, "provisioning")]);
expect(s(map, "a")).toEqual({
isActivelyProvisioning: true,
isDeployingRoot: true,
isLockedChild: false,
descendantProvisioningCount: 1,
});
});
});
// ── Parent / child chains ─────────────────────────────────────────────────────
describe("buildDeployMap — parent / child chains", () => {
it("root with online child: root is not deploying, child is not locked", () => {
// A ──► B
const map = m([
proj("A", null, "online"),
proj("B", "A", "online"),
]);
expect(s(map, "A")).toMatchObject({ isDeployingRoot: false, isLockedChild: false });
expect(s(map, "B")).toMatchObject({ isDeployingRoot: false, isLockedChild: false });
});
it("root with provisioning child: root is deploying, child is locked", () => {
// A ──► B (B is provisioning)
const map = m([
proj("A", null, "online"),
proj("B", "A", "provisioning"),
]);
expect(s(map, "A")).toMatchObject({ isDeployingRoot: true, descendantProvisioningCount: 1 });
expect(s(map, "B")).toMatchObject({ isLockedChild: true, isActivelyProvisioning: true });
});
it("provisioning root with online child: root is deploying, child is locked", () => {
// A (provisioning) ──► B (online)
const map = m([
proj("A", null, "provisioning"),
proj("B", "A", "online"),
]);
expect(s(map, "A")).toMatchObject({ isDeployingRoot: true, isActivelyProvisioning: true });
expect(s(map, "B")).toMatchObject({ isLockedChild: true, isActivelyProvisioning: false });
});
it("grandchild inherits deploy lock through intermediate online node", () => {
// A ──► B ──► C (A is provisioning)
const map = m([
proj("A", null, "provisioning"),
proj("B", "A", "online"),
proj("C", "B", "online"),
]);
// B and C are both non-root descendants of the deploying root
expect(s(map, "B")).toMatchObject({ isLockedChild: true });
expect(s(map, "C")).toMatchObject({ isLockedChild: true });
expect(s(map, "A")).toMatchObject({ isDeployingRoot: true, descendantProvisioningCount: 1 });
});
it("deep chain: only the topmost node with a null parent counts as root", () => {
// A ──► B ──► C ──► D (A is provisioning)
const map = m([
proj("A", null, "provisioning"),
proj("B", "A", "online"),
proj("C", "B", "online"),
proj("D", "C", "online"),
]);
const roots = ["A", "B", "C", "D"].filter((id) => s(map, id).isDeployingRoot);
expect(roots).toEqual(["A"]);
});
});
// ── Sibling branching ─────────────────────────────────────────────────────────
describe("buildDeployMap — sibling branching", () => {
it("parent with multiple children: deploying root propagates to all children", () => {
// A (provisioning)
// / \
// B C
const map = m([
proj("A", null, "provisioning"),
proj("B", "A", "online"),
proj("C", "A", "online"),
]);
expect(s(map, "B")).toMatchObject({ isLockedChild: true });
expect(s(map, "C")).toMatchObject({ isLockedChild: true });
expect(s(map, "A")).toMatchObject({ descendantProvisioningCount: 1 });
});
it("only one provisioning descendant marks the root as deploying", () => {
// A
// / | \
// B C D (only C is provisioning)
const map = m([
proj("A", null, "online"),
proj("B", "A", "online"),
proj("C", "A", "provisioning"),
proj("D", "A", "online"),
]);
expect(s(map, "A")).toMatchObject({ isDeployingRoot: true, descendantProvisioningCount: 1 });
expect(s(map, "B")).toMatchObject({ isLockedChild: true });
expect(s(map, "C")).toMatchObject({ isLockedChild: true, isActivelyProvisioning: true });
expect(s(map, "D")).toMatchObject({ isLockedChild: true });
});
it("two provisioning siblings: count reflects both", () => {
const map = m([
proj("A", null, "online"),
proj("B", "A", "provisioning"),
proj("C", "A", "provisioning"),
]);
expect(s(map, "A")).toMatchObject({ descendantProvisioningCount: 2 });
expect(s(map, "B")).toMatchObject({ isActivelyProvisioning: true });
expect(s(map, "C")).toMatchObject({ isActivelyProvisioning: true });
});
});
// ── Multiple disjoint trees ───────────────────────────────────────────────────
describe("buildDeployMap — multiple disjoint trees", () => {
it("each tree has its own root; deploying nodes are independent", () => {
// Tree 1: X (provisioning) ──► Y
// Tree 2: P ──► Q (no provisioning)
const map = m([
proj("X", null, "provisioning"),
proj("Y", "X", "online"),
proj("P", null, "online"),
proj("Q", "P", "online"),
]);
expect(s(map, "X")).toMatchObject({ isDeployingRoot: true });
expect(s(map, "Y")).toMatchObject({ isLockedChild: true });
expect(s(map, "P")).toMatchObject({ isDeployingRoot: false, isLockedChild: false });
expect(s(map, "Q")).toMatchObject({ isDeployingRoot: false, isLockedChild: false });
});
});
// ── Deleting nodes ────────────────────────────────────────────────────────────
describe("buildDeployMap — deletingIds", () => {
it("node in deletingIds is locked even if tree is not deploying", () => {
const map = m(
[
proj("A", null, "online"),
proj("B", "A", "online"),
],
["B"], // B is being deleted
);
expect(s(map, "A")).toMatchObject({ isLockedChild: false });
expect(s(map, "B")).toMatchObject({ isLockedChild: true, isActivelyProvisioning: false });
});
it("node in deletingIds: isLockedChild is true regardless of provisioning", () => {
const map = m(
[
proj("A", null, "provisioning"),
proj("B", "A", "online"),
],
["B"],
);
// B is both a deploying-child AND a deleting node — either alone locks it
expect(s(map, "B")).toMatchObject({ isLockedChild: true });
});
it("empty deletingIds set has no effect", () => {
const map = m(
[
proj("A", null, "online"),
proj("B", "A", "online"),
],
[],
);
expect(s(map, "B")).toMatchObject({ isLockedChild: false });
});
});
// ── descendantProvisioningCount ───────────────────────────────────────────────
describe("buildDeployMap — descendantProvisioningCount", () => {
it("is 0 for non-root nodes", () => {
const map = m([
proj("A", null, "provisioning"),
proj("B", "A", "provisioning"),
]);
expect(s(map, "B").descendantProvisioningCount).toBe(0);
});
it("includes the root's own status when provisioning", () => {
const map = m([
proj("A", null, "provisioning"),
proj("B", "A", "online"),
]);
// A is both root and provisioning → count includes itself
expect(s(map, "A").descendantProvisioningCount).toBe(1);
});
it("accumulates all provisioning descendants (not just immediate children)", () => {
const map = m([
proj("A", null, "online"),
proj("B", "A", "online"),
proj("C", "B", "provisioning"),
]);
expect(s(map, "A").descendantProvisioningCount).toBe(1);
});
});
// ── O(n) performance ─────────────────────────────────────────────────────────
describe("buildDeployMap — O(n) performance contract", () => {
it("handles a 50-node three-level tree without incorrect node assignments", () => {
// Level 0: 1 root
// Level 1: 7 children
// Level 2: 42 leaves
// Total: 50 nodes
const projections: Projection[] = [];
projections.push(proj("root", null, "provisioning"));
for (let i = 0; i < 7; i++) {
projections.push(proj(`l1-${i}`, "root", "online"));
}
for (let i = 0; i < 42; i++) {
const parent = `l1-${Math.floor(i / 6)}`;
projections.push(proj(`l2-${i}`, parent, "online"));
}
const map = m(projections);
// Root is the only deploying node
expect(s(map, "root")).toMatchObject({
isDeployingRoot: true,
isLockedChild: false,
descendantProvisioningCount: 1,
});
// Every other node is a locked child
for (let i = 0; i < 7; i++) {
expect(s(map, `l1-${i}`)).toMatchObject({ isLockedChild: true, isDeployingRoot: false });
}
for (let i = 0; i < 42; i++) {
expect(s(map, `l2-${i}`)).toMatchObject({ isLockedChild: true, isDeployingRoot: false });
}
});
});

View File

@ -5,7 +5,7 @@
// that the desktop ChatTab uses, but with a slimmer surface: no
// attachments, no A2A topology overlay, no conversation tracing.
import { useCallback, useEffect, useRef, useState } from "react";
import { useEffect, useRef, useState } from "react";
import { api } from "@/lib/api";
import { useCanvasStore } from "@/store/canvas";
@ -50,13 +50,26 @@ export function MobileChat({
}) {
const p = usePalette(dark);
const node = useCanvasStore((s) => s.nodes.find((n) => n.id === agentId));
const [messages, setMessages] = useState<ChatMessage[]>([]);
// Bootstrap from the canvas store's per-workspace message buffer so the
// user sees their prior thread on entry. The store is updated by the
// socket → ChatTab flows the desktop runs; on mobile we read from the
// same buffer to keep state coherent across viewports.
// NOTE: selector returns undefined (stable) — do NOT use ?? [] here,
// that creates a new [] reference on every store update when the key is
// absent, causing infinite re-render (React error #185).
const storedMessages = useCanvasStore((s) => s.agentMessages[agentId]);
const [messages, setMessages] = useState<ChatMessage[]>(() =>
(storedMessages ?? []).map((m) => ({
id: m.id,
role: "agent",
text: m.content,
ts: formatStoredTimestamp(m.timestamp),
})),
);
const [draft, setDraft] = useState("");
const [tab, setTab] = useState<SubTab>("my");
const [sending, setSending] = useState(false);
const [error, setError] = useState<string | null>(null);
const [historyLoading, setHistoryLoading] = useState(true);
const [historyError, setHistoryError] = useState<string | null>(null);
const scrollRef = useRef<HTMLDivElement>(null);
// Synchronous re-entry guard. `setSending(true)` schedules a state
// update but doesn't flush before a second tap can fire send() — a ref
@ -82,74 +95,6 @@ export function MobileChat({
}
}, [messages]);
// Load chat history on mount / agent switch.
const loadHistory = useCallback(async () => {
setHistoryLoading(true);
setHistoryError(null);
try {
const resp = await api.get<{
messages: Array<{
id: string;
role: string;
content: string;
timestamp: string;
}>;
}>(`/workspaces/${agentId}/chat-history?limit=50`);
const loaded = (resp.messages ?? []).map((m) => ({
id: m.id,
role: m.role as "user" | "agent" | "system",
text: m.content,
ts: formatStoredTimestamp(m.timestamp),
}));
setMessages(loaded);
} catch (e) {
setHistoryError(e instanceof Error ? e.message : "Failed to load history");
} finally {
setHistoryLoading(false);
}
}, [agentId]);
useEffect(() => {
let cancelled = false;
loadHistory().then(() => {
if (cancelled) return;
// Consume any agent messages that arrived while history was loading.
const consume = useCanvasStore.getState().consumeAgentMessages;
const msgs = consume(agentId);
if (msgs.length > 0) {
setMessages((prev) => [
...prev,
...msgs.map((m) => ({
id: m.id,
role: "agent" as const,
text: m.content,
ts: formatStoredTimestamp(m.timestamp),
})),
]);
}
});
return () => { cancelled = true; };
}, [agentId, loadHistory]);
// Consume live agent pushes while the panel is mounted.
const pendingAgentMsgs = useCanvasStore((s) => s.agentMessages[agentId]);
useEffect(() => {
if (!pendingAgentMsgs || pendingAgentMsgs.length === 0) return;
const consume = useCanvasStore.getState().consumeAgentMessages;
const msgs = consume(agentId);
if (msgs.length > 0) {
setMessages((prev) => [
...prev,
...msgs.map((m) => ({
id: m.id,
role: "agent" as const,
text: m.content,
ts: formatStoredTimestamp(m.timestamp),
})),
]);
}
}, [pendingAgentMsgs, agentId]);
if (!node) {
return (
<div
@ -363,17 +308,7 @@ export function MobileChat({
Agent Comms peer-to-peer A2A traffic surfaces in the Comms tab.
</div>
)}
{tab === "my" && historyLoading && (
<div style={{ padding: "20px 4px", textAlign: "center", color: p.text3, fontSize: 13 }}>
Loading chat history
</div>
)}
{tab === "my" && !historyLoading && historyError && messages.length === 0 && (
<div style={{ padding: "20px 4px", textAlign: "center", color: p.text3, fontSize: 13 }}>
{historyError}
</div>
)}
{tab === "my" && !historyLoading && !historyError && messages.length === 0 && (
{tab === "my" && messages.length === 0 && (
<div style={{ padding: "20px 4px", textAlign: "center", color: p.text3, fontSize: 13 }}>
Send a message to start chatting.
</div>

View File

@ -12,7 +12,6 @@ import { useEffect, useState } from "react";
import { api } from "@/lib/api";
import { type Template } from "@/lib/deploy-preflight";
import { isSaaSTenant } from "@/lib/tenant";
import { tierCode } from "./palette";
import { MOBILE_FONT_MONO, MOBILE_FONT_SANS, type MobilePalette, usePalette } from "./palette";
@ -27,7 +26,6 @@ const TIER_LABEL: Record<"T1" | "T2" | "T3" | "T4", string> = {
export function MobileSpawn({ dark, onClose }: { dark: boolean; onClose: () => void }) {
const p = usePalette(dark);
const isSaaS = isSaaSTenant();
const [templates, setTemplates] = useState<Template[]>([]);
const [loadingTemplates, setLoadingTemplates] = useState(true);
const [tplId, setTplId] = useState<string | null>(null);
@ -45,7 +43,7 @@ export function MobileSpawn({ dark, onClose }: { dark: boolean; onClose: () => v
setTemplates(list);
if (list.length > 0) {
setTplId(list[0].id);
setTier(isSaaS ? "T4" : tierCode(list[0].tier));
setTier(tierCode(list[0].tier));
}
})
.catch(() => {
@ -57,7 +55,7 @@ export function MobileSpawn({ dark, onClose }: { dark: boolean; onClose: () => v
return () => {
cancelled = true;
};
}, [isSaaS]);
}, []);
const handleSpawn = async () => {
if (busy || !tplId) return;
@ -69,7 +67,7 @@ export function MobileSpawn({ dark, onClose }: { dark: boolean; onClose: () => v
await api.post<{ id: string }>("/workspaces", {
name: (name.trim() || chosen.name),
template: chosen.id,
tier: isSaaS ? 4 : Number(tier.slice(1)),
tier: Number(tier.slice(1)),
canvas: {
x: Math.random() * 400 + 100,
y: Math.random() * 300 + 100,
@ -205,7 +203,7 @@ export function MobileSpawn({ dark, onClose }: { dark: boolean; onClose: () => v
>
{templates.map((t) => {
const on = tplId === t.id;
const tCode = isSaaS ? "T4" : tierCode(t.tier);
const tCode = tierCode(t.tier);
return (
<button
key={t.id}

View File

@ -8,7 +8,7 @@
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { cleanup, render, waitFor } from "@testing-library/react";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { MobileChat } from "../MobileChat";
@ -33,12 +33,7 @@ const mockStoreState = {
vi.mock("@/store/canvas", () => ({
useCanvasStore: Object.assign(
vi.fn((sel) => sel(mockStoreState)),
{
getState: () => ({
...mockStoreState,
consumeAgentMessages: vi.fn(() => []),
}),
},
{ getState: () => mockStoreState },
),
summarizeWorkspaceCapabilities: vi.fn((data: Record<string, unknown>) => {
const agentCard = data.agentCard as Record<string, unknown> | null;
@ -65,12 +60,8 @@ const { mockApiPost } = vi.hoisted(() => ({
mockApiPost: vi.fn().mockResolvedValue({ result: { parts: [] } }),
}));
const { mockApiGet } = vi.hoisted(() => ({
mockApiGet: vi.fn().mockResolvedValue({ messages: [] }),
}));
vi.mock("@/lib/api", () => ({
api: { get: mockApiGet, post: mockApiPost },
api: { post: mockApiPost },
}));
// ─── Fixtures ────────────────────────────────────────────────────────────────
@ -157,7 +148,6 @@ function renderChat(agentId: string, dark = false) {
beforeEach(() => {
mockOnBack.mockClear();
mockApiGet.mockClear();
mockStoreState.nodes = [];
mockStoreState.agentMessages = {};
mockApiPost.mockClear();
@ -276,19 +266,16 @@ describe("MobileChat — empty state", () => {
mockStoreState.nodes = [onlineNode];
});
it('shows "Send a message to start chatting." when no messages', async () => {
it('shows "Send a message to start chatting." when no messages', () => {
const { container } = renderChat(mockAgentId);
await waitFor(() =>
expect(container.textContent ?? "").toContain("Send a message to start chatting."),
);
expect(container.textContent ?? "").toContain("Send a message to start chatting.");
});
it("shows no messages when agentMessages[agentId] is absent (undefined)", async () => {
it("shows no messages when agentMessages[agentId] is absent (undefined)", () => {
// Explicitly set to empty to simulate no stored messages
mockStoreState.agentMessages = {};
const { container } = renderChat(mockAgentId);
await waitFor(() =>
expect(container.textContent ?? "").toContain("Send a message to start chatting."),
);
expect(container.textContent ?? "").toContain("Send a message to start chatting.");
});
});

View File

@ -307,7 +307,7 @@ function ActivityRow({
{/* Error detail */}
{isError && entry.error_detail && (
<div className="text-[9px] text-bad mt-1 truncate">
<div className="text-[9px] text-bad/80 mt-1 truncate">
{entry.error_detail}
</div>
)}
@ -358,10 +358,10 @@ function A2AErrorPreview({ label, raw }: { label: string; raw: string }) {
const hint = inferA2AErrorHint(detail);
return (
<div>
<div className="text-[8px] text-bad uppercase tracking-wider mb-1">{label} delivery failed</div>
<div className="text-[8px] text-bad/80 uppercase tracking-wider mb-1">{label} delivery failed</div>
<div className="text-[10px] text-bad bg-red-950/30 border border-red-800/40 rounded p-2 space-y-1.5">
<div className="font-mono whitespace-pre-wrap break-words max-h-32 overflow-y-auto">{detail}</div>
<div className="text-[9px] text-bad leading-relaxed border-t border-red-800/30 pt-1.5">{hint}</div>
<div className="text-[9px] text-bad/70 leading-relaxed border-t border-red-800/30 pt-1.5">{hint}</div>
</div>
</div>
);

View File

@ -962,32 +962,6 @@ function MyChatPanel({ workspaceId, data }: Props) {
</div>
</div>
)}
{/* talk_to_user disabled banner shown when the workspace has
talk_to_user_enabled=false. The agent cannot send canvas messages;
the user can re-enable the ability from here without opening settings. */}
{data.talkToUserEnabled === false && (
<div className="flex items-center gap-2 px-3 py-2 bg-surface-sunken border-b border-line/40 shrink-0">
<svg width="14" height="14" viewBox="0 0 16 16" fill="none" aria-hidden="true" className="shrink-0 text-ink-mid">
<path d="M8 1a7 7 0 1 0 0 14A7 7 0 0 0 8 1Zm0 10.5a.75.75 0 1 1 0-1.5.75.75 0 0 1 0 1.5ZM8 4a.75.75 0 0 1 .75.75v4a.75.75 0 0 1-1.5 0v-4A.75.75 0 0 1 8 4Z" fill="currentColor"/>
</svg>
<span className="text-[10px] text-ink-mid flex-1">
Agent is not enabled to chat with you.
</span>
<button
onClick={async () => {
try {
await api.patch(`/workspaces/${workspaceId}/abilities`, { talk_to_user_enabled: true });
useCanvasStore.getState().updateNodeData(workspaceId, { talkToUserEnabled: true });
} catch {
// ignore — user will see no change and can retry
}
}}
className="px-2 py-0.5 text-[10px] font-medium bg-accent/10 hover:bg-accent/20 text-accent rounded border border-accent/30 transition-colors shrink-0"
>
Enable
</button>
</div>
)}
{/* Messages */}
<div ref={containerRef} className="flex-1 overflow-y-auto p-3 space-y-3">
{loading && (
@ -1003,7 +977,7 @@ function MyChatPanel({ workspaceId, data }: Props) {
</p>
<button
onClick={loadInitial}
className="text-[10px] px-2 py-0.5 rounded bg-red-800 text-red-200 hover:bg-red-700 transition-colors"
className="text-[10px] px-2 py-0.5 rounded bg-red-800/40 text-bad hover:bg-red-700/50 transition-colors"
>
Retry
</button>
@ -1155,7 +1129,7 @@ function MyChatPanel({ workspaceId, data }: Props) {
))}
</div>
)}
<div className={`text-[9px] mt-1 ${msg.role === "user" ? "text-white/80" : "text-ink-mid"}`}>
<div className={`text-[9px] mt-1 ${msg.role === "user" ? "text-white/70" : "text-ink-mid"}`}>
{new Date(msg.timestamp).toLocaleTimeString()}
</div>
</div>
@ -1195,11 +1169,11 @@ function MyChatPanel({ workspaceId, data }: Props) {
{error && (
<div className="px-3 py-2 bg-red-900/20 border-t border-red-800/30">
<div className="flex items-center justify-between">
<span className="text-[10px] text-red-300">{error}</span>
<span className="text-[10px] text-bad">{error}</span>
{!isOnline && (
<button
onClick={() => setConfirmRestart(true)}
className="text-[11px] px-2 py-0.5 bg-red-800 text-red-200 rounded hover:bg-red-700"
className="text-[11px] px-2 py-0.5 bg-red-800/40 text-bad rounded hover:bg-red-700/50"
>
Restart
</button>

View File

@ -226,7 +226,7 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
<div role="alertdialog" aria-labelledby="files-delete-all-msg" className="mx-3 mt-2 px-3 py-2 bg-red-950/30 border border-red-800/40 rounded space-y-1.5">
<p id="files-delete-all-msg" className="text-xs text-bad">Delete all {files.filter((f) => !f.dir).length} files? This cannot be undone.</p>
<div className="flex gap-2">
<button type="button" onClick={() => { handleDeleteAll(); setShowDeleteAll(false); }} className="px-2 py-0.5 bg-red-700 hover:bg-red-600 text-[10px] rounded text-white transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-red-500/60 focus-visible:ring-offset-1 focus-visible:ring-offset-surface">Delete All</button>
<button type="button" onClick={() => { handleDeleteAll(); setShowDeleteAll(false); }} className="px-2 py-0.5 bg-red-600 hover:bg-red-700 text-[10px] rounded text-white transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-red-500/60 focus-visible:ring-offset-1 focus-visible:ring-offset-surface">Delete All</button>
<button type="button" onClick={() => setShowDeleteAll(false)} className="px-2 py-0.5 bg-surface-card hover:bg-surface-elevated hover:text-ink text-[10px] rounded text-ink-mid transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/40 focus-visible:ring-offset-1 focus-visible:ring-offset-surface">Cancel</button>
</div>
</div>
@ -240,7 +240,7 @@ function PlatformOwnedFilesTab({ workspaceId }: { workspaceId: string }) {
<div role="alertdialog" aria-labelledby="files-delete-one-msg" className="mx-3 mt-2 px-3 py-2 bg-amber-950/30 border border-amber-800/40 rounded space-y-1.5">
<p id="files-delete-one-msg" className="text-xs text-warm">Delete <span className="font-mono">{confirmDelete}</span>{files.find((f) => f.path === confirmDelete && f.dir) ? " and all its contents" : ""}?</p>
<div className="flex gap-2">
<button type="button" onClick={confirmDeleteFile} className="px-2 py-0.5 bg-red-700 hover:bg-red-600 text-[10px] rounded text-white transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-red-500/60 focus-visible:ring-offset-1 focus-visible:ring-offset-surface">Delete</button>
<button type="button" onClick={confirmDeleteFile} className="px-2 py-0.5 bg-red-600 hover:bg-red-700 text-[10px] rounded text-white transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-red-500/60 focus-visible:ring-offset-1 focus-visible:ring-offset-surface">Delete</button>
<button type="button" onClick={() => setConfirmDelete(null)} className="px-2 py-0.5 bg-surface-card hover:bg-surface-elevated hover:text-ink text-[10px] rounded text-ink-mid transition-colors focus:outline-none focus-visible:ring-2 focus-visible:ring-accent/40 focus-visible:ring-offset-1 focus-visible:ring-offset-surface">Cancel</button>
</div>
</div>

View File

@ -32,7 +32,7 @@ export function FilesToolbar({
value={root}
onChange={(e) => setRoot(e.target.value)}
aria-label="File root directory"
className="text-[10px] bg-surface-card text-ink-mid border border-line rounded px-1.5 py-0.5 focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-1"
className="text-[10px] bg-surface-card text-ink-mid border border-line rounded px-1.5 py-0.5 outline-none"
>
<option value="/configs">/configs</option>
<option value="/home">/home</option>

View File

@ -332,13 +332,6 @@ export function ScheduleTab({ workspaceId }: Props) {
<div className="flex items-center gap-1.5">
<button
onClick={() => handleToggle(sched)}
aria-label={
sched.last_status === "error"
? "Last run failed — click to disable"
: sched.last_status === "ok"
? "Last run OK — click to disable"
: "Never run — click to enable"
}
className={`w-2 h-2 rounded-full flex-shrink-0 ${
sched.last_status === "error"
? "bg-red-400"
@ -367,7 +360,7 @@ export function ScheduleTab({ workspaceId }: Props) {
<span>Runs: {sched.run_count}</span>
</div>
{sched.last_error && (
<div className="text-[8px] text-bad mt-0.5 truncate">
<div className="text-[8px] text-bad/70 mt-0.5 truncate">
Error: {sched.last_error}
</div>
)}

View File

@ -492,7 +492,7 @@ export function SkillsTab({ workspaceId, data }: Props) {
<div className="text-[10px] text-bad font-semibold mb-0.5">
Couldn't load the plugin registry
</div>
<div className="text-[10px] text-bad">{registryError}</div>
<div className="text-[10px] text-bad/80">{registryError}</div>
<div className="mt-1 text-[10px] text-ink-mid">
Check the platform server is reachable at /plugins. The Retry button is in the header above.
</div>

View File

@ -8,7 +8,6 @@ import {
type PreflightResult,
type Template,
} from "@/lib/deploy-preflight";
import { isSaaSTenant } from "@/lib/tenant";
import { MissingKeysModal } from "@/components/MissingKeysModal";
/**
@ -106,7 +105,7 @@ export function useTemplateDeploy(
const ws = await api.post<{ id: string }>("/workspaces", {
name: template.name,
template: template.id,
tier: isSaaSTenant() ? 4 : template.tier,
tier: template.tier,
canvas: coords,
...(model ? { model } : {}),
});

View File

@ -1,205 +0,0 @@
// @vitest-environment jsdom
"use client";
/**
* Tests for palette-context.tsx MobileAccentProvider context + usePalette hook.
*
* Test coverage (9 cases):
* 1. MobileAccentProvider renders children
* 2. usePalette(false) without provider MOL_LIGHT
* 3. usePalette(true) without provider MOL_DARK
* 4. accent=null returns base palette unchanged
* 5. accent=base.accent returns base palette unchanged (identity guard)
* 6. accent="#custom" overrides both accent and online
* 7. MOL_LIGHT singleton never mutated
* 8. MOL_DARK singleton never mutated
*
* Plus pure-function coverage for normalizeStatus + tierCode.
*/
import { describe, expect, it, vi, beforeEach, afterEach } from "vitest";
import React from "react";
import { render, screen, cleanup } from "@testing-library/react";
import {
MOL_LIGHT,
MOL_DARK,
getPalette,
normalizeStatus,
tierCode,
MobileAccentProvider,
usePalette,
} from "../palette-context";
// ─── usePalette test helper ───────────────────────────────────────────────────
// usePalette reads document.documentElement.dataset.theme internally.
// We set this before rendering so the hook sees the right value.
function setDataTheme(theme: "light" | "dark") {
if (typeof document !== "undefined") {
document.documentElement.dataset.theme = theme;
}
}
// ─── Pure function tests ──────────────────────────────────────────────────────
describe("normalizeStatus", () => {
it("returns emerald-400 for online status", () => {
expect(normalizeStatus("online", false)).toBe("bg-emerald-400");
expect(normalizeStatus("online", true)).toBe("bg-emerald-400");
});
it("returns emerald-400 for degraded status", () => {
expect(normalizeStatus("degraded", false)).toBe("bg-emerald-400");
expect(normalizeStatus("degraded", true)).toBe("bg-emerald-400");
});
it("returns red-400 for failed status", () => {
expect(normalizeStatus("failed", false)).toBe("bg-red-400");
expect(normalizeStatus("failed", true)).toBe("bg-red-400");
});
it("returns amber-400 for paused status", () => {
expect(normalizeStatus("paused", false)).toBe("bg-amber-400");
expect(normalizeStatus("paused", true)).toBe("bg-amber-400");
});
it("returns amber-400 for not_configured status", () => {
expect(normalizeStatus("not_configured", false)).toBe("bg-amber-400");
});
it("returns zinc-400 for unknown status", () => {
expect(normalizeStatus("unknown", false)).toBe("bg-zinc-400");
expect(normalizeStatus("", false)).toBe("bg-zinc-400");
});
});
describe("tierCode", () => {
it("returns T1 for tier 1", () => {
expect(tierCode(1)).toBe("T1");
});
it("returns T2 for tier 2", () => {
expect(tierCode(2)).toBe("T2");
});
it("returns T4 for tier 4", () => {
expect(tierCode(4)).toBe("T4");
});
it("returns generic T{n} for non-standard tiers", () => {
expect(tierCode(99)).toBe("T99");
});
});
// ─── getPalette tests ─────────────────────────────────────────────────────────
describe("getPalette — accent override", () => {
it("accent=null returns base palette unchanged (light)", () => {
const result = getPalette(null, false);
expect(result).toEqual({ ...MOL_LIGHT });
expect(result).not.toBe(MOL_LIGHT); // returned object is a copy
});
it("accent=null returns base palette unchanged (dark)", () => {
const result = getPalette(null, true);
expect(result).toEqual({ ...MOL_DARK });
expect(result).not.toBe(MOL_DARK);
});
it("accent=base.accent returns base palette unchanged (identity guard, light)", () => {
const result = getPalette(MOL_LIGHT.accent, false);
expect(result).toEqual({ ...MOL_LIGHT });
expect(result).not.toBe(MOL_LIGHT);
});
it("accent=base.accent returns base palette unchanged (identity guard, dark)", () => {
const result = getPalette(MOL_DARK.accent, true);
expect(result).toEqual({ ...MOL_DARK });
expect(result).not.toBe(MOL_DARK);
});
it("accent='#custom' overrides accent and online (light)", () => {
const result = getPalette("#ff0000", false);
expect(result.accent).toBe("#ff0000");
expect(result.online).toBe("bg-emerald-400"); // normalizeStatus("online", false)
});
it("accent='#custom' overrides accent and online (dark)", () => {
const result = getPalette("#00ff00", true);
expect(result.accent).toBe("#00ff00");
expect(result.online).toBe("bg-emerald-400"); // normalizeStatus("online", true)
});
it("MOL_LIGHT singleton is never mutated", () => {
getPalette("#mutate", false);
// All fields must still match the original freeze definition
expect(MOL_LIGHT.accent).toBe("bg-blue-500");
expect(MOL_LIGHT.online).toBe("bg-emerald-400");
expect(MOL_LIGHT.surface).toBe("bg-zinc-900");
expect(MOL_LIGHT.ink).toBe("text-zinc-100");
expect(MOL_LIGHT.line).toBe("border-zinc-700");
expect(MOL_LIGHT.bg).toBe("bg-zinc-950");
});
it("MOL_DARK singleton is never mutated", () => {
getPalette("#mutate", true);
expect(MOL_DARK.accent).toBe("bg-sky-400");
expect(MOL_DARK.online).toBe("bg-emerald-400");
expect(MOL_DARK.surface).toBe("bg-zinc-800");
expect(MOL_DARK.ink).toBe("text-zinc-100");
expect(MOL_DARK.line).toBe("border-zinc-700");
expect(MOL_DARK.bg).toBe("bg-zinc-950");
});
it("getPalette always returns a new object (no shared mutation risk)", () => {
const a = getPalette("#a", false);
const b = getPalette("#b", false);
expect(a).not.toBe(b);
expect(a.accent).not.toBe(b.accent);
});
});
// ─── MobileAccentProvider tests ───────────────────────────────────────────────
describe("MobileAccentProvider", () => {
beforeEach(() => {
setDataTheme("light");
});
afterEach(() => {
cleanup();
if (typeof document !== "undefined") {
document.documentElement.dataset.theme = "";
}
});
it("renders children", () => {
render(
<MobileAccentProvider accent={null}>
<span data-testid="child">Hello</span>
</MobileAccentProvider>,
);
expect(screen.getByTestId("child")).toBeTruthy();
});
// usePalette hook reads data-theme from <html> to determine light/dark.
// In the test environment, data-theme is empty, which falls through to
// the "light" default in usePalette, giving MOL_LIGHT.
it("usePalette(false) without provider → MOL_LIGHT", () => {
setDataTheme("light");
function ShowPalette() {
const p = usePalette(false);
return <span data-testid="accent-light">{p.accent}</span>;
}
render(<ShowPalette />);
expect(screen.getByTestId("accent-light").textContent).toBe(MOL_LIGHT.accent);
});
it("usePalette(true) without provider → MOL_DARK when data-theme=dark", () => {
setDataTheme("dark");
function ShowPalette() {
const p = usePalette(true);
return <span data-testid="accent-dark">{p.accent}</span>;
}
render(<ShowPalette />);
expect(screen.getByTestId("accent-dark").textContent).toBe(MOL_DARK.accent);
});
});

View File

@ -21,8 +21,8 @@ export function statusDotClass(status: string): string {
export const TIER_CONFIG: Record<number, { label: string; color: string; border: string }> = {
1: { label: "T1", color: "text-ink-mid bg-surface-card border border-line", border: "text-ink-mid border-line" },
2: { label: "T2", color: "text-white bg-accent border border-accent-strong", border: "text-accent border-accent" },
3: { label: "T3", color: "text-white bg-violet-600 border border-violet-700", border: "text-white border-violet-500" },
4: { label: "T4", color: "text-white bg-warm border border-warm", border: "text-white border-warm" },
3: { label: "T3", color: "text-white bg-violet-600 border border-violet-700", border: "text-violet-600 border-violet-500" },
4: { label: "T4", color: "text-white bg-warm border border-warm", border: "text-warm border-warm" },
};
export const COMM_TYPE_LABELS: Record<string, string> = {

View File

@ -1,167 +0,0 @@
"use client";
/**
* palette-context.tsx
*
* Mobile canvas accent palette system.
*
* - MOL_LIGHT / MOL_DARK immutable base singletons
* - getPalette(accent, isDark) returns base palette or accent-overridden copy
* - normalizeStatus(status, isDark) maps workspace status online dot color
* - tierCode(tier) maps tier number display label
* - MobileAccentProvider React context that propagates accent override
* - usePalette(allowAccentOverride) hook; returns the effective palette
*/
import { createContext, useContext } from "react";
// ─── Types ─────────────────────────────────────────────────────────────────────
export interface Palette {
/** Accent colour (CSS colour string). */
accent: string;
/** Online indicator colour (CSS class string, e.g. "bg-emerald-400"). */
online: string;
/** Surface background colour class. */
surface: string;
/** Primary text colour class. */
ink: string;
/** Border/divider colour class. */
line: string;
/** Background colour class. */
bg: string;
/** Tier display code, e.g. "T1". */
tier: string;
}
// ─── Singleton base palettes ────────────────────────────────────────────────────
/** Light-mode base palette — must never be mutated. */
export const MOL_LIGHT: Readonly<Palette> = Object.freeze({
accent: "bg-blue-500",
online: "bg-emerald-400",
surface: "bg-zinc-900",
ink: "text-zinc-100",
line: "border-zinc-700",
bg: "bg-zinc-950",
tier: "T1",
});
/** Dark-mode base palette — must never be mutated. */
export const MOL_DARK: Readonly<Palette> = Object.freeze({
accent: "bg-sky-400",
online: "bg-emerald-400",
surface: "bg-zinc-800",
ink: "text-zinc-100",
line: "border-zinc-700",
bg: "bg-zinc-950",
tier: "T1",
});
// ─── Pure helpers ─────────────────────────────────────────────────────────────
/**
* Maps workspace status string online dot colour class.
* Returns the appropriate green for light/dark mode.
*/
export function normalizeStatus(
status: string,
_isDark: boolean,
): string {
if (status === "online" || status === "degraded") {
return "bg-emerald-400";
}
if (status === "failed") {
return "bg-red-400";
}
if (status === "paused" || status === "not_configured") {
return "bg-amber-400";
}
return "bg-zinc-400";
}
/**
* Maps tier number display code.
*/
export function tierCode(tier: number): string {
return `T${tier}`;
}
/**
* Returns the effective palette.
*
* - `accent = null` base palette (light or dark) unchanged
* - `accent = basePalette.accent` base palette unchanged (identity guard)
* - `accent = "#custom"` copy with `accent` and `online` overridden
*
* Always returns a new object; neither MOL_LIGHT nor MOL_DARK is ever mutated.
*/
export function getPalette(
accent: string | null,
isDark: boolean,
): Palette {
const base: Readonly<Palette> = isDark ? MOL_DARK : MOL_LIGHT;
// null accent → use base unchanged
if (accent === null) return { ...base };
// identity guard — accent same as base accent → no override needed
if (accent === base.accent) return { ...base };
// Custom accent: override accent + online to keep them in sync
return { ...base, accent, online: normalizeStatus("online", isDark) };
}
// ─── Context ──────────────────────────────────────────────────────────────────
type MobileAccentContextValue = {
/** Override accent colour (null = no override, use default). */
accent: string | null;
};
const MobileAccentContext = createContext<MobileAccentContextValue>({
accent: null,
});
export { MobileAccentContext };
/**
* Renders children inside the accent override context.
*/
export function MobileAccentProvider({
accent,
children,
}: {
accent: string | null;
children: React.ReactNode;
}) {
return (
<MobileAccentContext.Provider value={{ accent }}>
{children}
</MobileAccentContext.Provider>
);
}
// ─── Hook ─────────────────────────────────────────────────────────────────────
/**
* Returns the effective `Palette` for the current context.
*
* @param allowAccentOverride When false, always returns the base palette
* even when an override is set (useful for
* non-accent-aware child components).
*/
export function usePalette(allowAccentOverride: boolean): Palette {
const { accent } = useContext(MobileAccentContext);
// Resolved from the OS-level theme preference. In a real app this would
// be derived from useTheme().resolvedTheme; for this hook we default
// to light (the safe default for SSR / component-library use).
// We read data-theme from <html> to stay in sync with the theme system.
const isDark =
typeof document !== "undefined" &&
document.documentElement.dataset.theme === "dark";
const effectiveAccent = allowAccentOverride ? accent : null;
return getPalette(effectiveAccent, isDark);
}

View File

@ -519,10 +519,6 @@ export function buildNodesAndEdges(
// #2054 — server-declared per-workspace provisioning timeout.
// Falls through to the runtime profile when null/absent.
provisionTimeoutMs: ws.provision_timeout_ms ?? null,
// Workspace abilities — defaults preserved for old platform versions
// that don't yet include these columns in the GET response.
broadcastEnabled: ws.broadcast_enabled ?? false,
talkToUserEnabled: ws.talk_to_user_enabled ?? true,
},
};
if (hasParent) {

View File

@ -99,13 +99,6 @@ export interface WorkspaceNodeData extends Record<string, unknown> {
* @/lib/runtimeProfiles. Lets a slow runtime declare its cold-boot
* expectation without a canvas release. */
provisionTimeoutMs?: number | null;
/** When true the workspace may POST /broadcast to send org-wide messages.
* Default false. Toggled by user/admin via PATCH /workspaces/:id/abilities. */
broadcastEnabled?: boolean;
/** When false the workspace cannot deliver canvas chat messages.
* send_message_to_user / POST /notify return 403 and the canvas
* shows a "not enabled" state with a button to re-enable. Default true. */
talkToUserEnabled?: boolean;
}
export type PanelTab = "details" | "skills" | "chat" | "terminal" | "config" | "schedule" | "channels" | "files" | "memory" | "traces" | "events" | "activity" | "audit";

View File

@ -299,9 +299,6 @@ export interface WorkspaceData {
* `@/lib/runtimeProfiles` when absent (the default behavior for any
* template that hasn't yet declared the field). */
provision_timeout_ms?: number | null;
/** Workspace ability flags (migration 20260514). */
broadcast_enabled?: boolean;
talk_to_user_enabled?: boolean;
}
let socket: ReconnectingSocket | null = null;

View File

@ -179,7 +179,6 @@ cp_redeploy_tenant() {
# 1 — any other failure
# stdout = response body. stderr = "HTTP_STATUS=NNN" line.
local slug="$1" tag="$2"
validate_slug "$slug"
_mock_call cp_redeploy_tenant "$slug" "$tag"; local _mrc=$?
[[ $_mrc -ne 99 ]] && return $_mrc
local tok="${!CP_TOKEN_ENV:-}"
@ -205,7 +204,6 @@ cp_redeploy_tenant() {
tenant_buildinfo() {
# args: <slug>; prints JSON
local slug="$1"
validate_slug "$slug"
_mock_call tenant_buildinfo "$slug"; local _mrc=$?
[[ $_mrc -ne 99 ]] && return $_mrc
curl -sf --max-time 10 "https://${slug}.moleculesai.app/buildinfo"
@ -214,7 +212,6 @@ tenant_buildinfo() {
tenant_health() {
# args: <slug>; prints raw response, returns 0 if "ok"
local slug="$1"
validate_slug "$slug"
_mock_call tenant_health "$slug"; local _mrc=$?
[[ $_mrc -ne 99 ]] && return $_mrc
curl -sf --max-time 10 "https://${slug}.moleculesai.app/health"
@ -259,7 +256,6 @@ print(json.dumps({'commands': [ecr_login]}))
resolve_tenant_instance_id() {
# args: <slug>; prints i-xxx
local slug="$1"
validate_slug "$slug"
_mock_call resolve_tenant_instance_id "$slug"; local _mrc=$?
[[ $_mrc -ne 99 ]] && return $_mrc
local tok="${!CP_TOKEN_ENV:-}"
@ -275,19 +271,6 @@ resolve_tenant_instance_id() {
log() { printf '[%s] %s\n' "$(date -u +%H:%M:%SZ)" "$*"; }
err() { printf '[%s] ERROR: %s\n' "$(date -u +%H:%M:%SZ)" "$*" >&2; }
# validate_slug — exit 64 if slug contains characters outside the safe set.
# Prevents SSRF via query-separator injection (?foo) and subdomain takeover
# (@evil) when slug is interpolated into URL paths or subdomains.
# OFFSEC-006 fix.
validate_slug() {
local slug="$1"
if ! [[ "$slug" =~ ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ ]]; then
printf '[%s] ERROR: invalid slug: %s\n' \
"$(date -u +%H:%M:%SZ)" "$slug" >&2
exit 64
fi
}
preflight() {
log "preflight: source=$SOURCE_TAG dest=$DEST_TAG repo=$REPO region=$REGION"
local src_manifest
@ -356,7 +339,6 @@ promote() {
redeploy_tenant() {
# args: <slug> — handle the 403→SSM-refresh→retry pattern
local slug="$1"
validate_slug "$slug"
log " redeploy: $slug"
if [[ "$DRY_RUN" == "true" ]]; then
log " [dry-run] would POST /redeploy slug=$slug"
@ -390,7 +372,6 @@ redeploy_tenant() {
verify_tenant() {
local slug="$1"
validate_slug "$slug"
log " verify: $slug"
if [[ "$DRY_RUN" == "true" ]]; then
log " [dry-run] would curl /buildinfo + /health"
@ -417,7 +398,6 @@ rollback() {
rm -f "$mfile"
IFS=',' read -ra slugs <<<"$TENANTS"
for slug in "${slugs[@]}"; do
validate_slug "$slug"
redeploy_tenant "$slug" || err " rollback redeploy failed for $slug"
done
log "rollback: complete"
@ -428,13 +408,6 @@ rollback() {
# ─────────────────────────────────────────────────────────────────────────────
main() {
# OFFSEC-006: validate slugs before any network I/O.
IFS=',' read -ra _slugs <<<"$TENANTS"
for _slug in "${_slugs[@]}"; do
validate_slug "$_slug"
done
unset _slugs _slug
preflight || return 1
snapshot_dest_tag || return 2
promote || return 2
@ -442,15 +415,8 @@ main() {
local promote_rc=0
IFS=',' read -ra slugs <<<"$TENANTS"
for slug in "${slugs[@]}"; do
validate_slug "$slug"
if ! redeploy_tenant "$slug"; then
promote_rc=1
fi
if [[ $promote_rc -eq 0 ]]; then
if ! verify_tenant "$slug"; then
promote_rc=1
fi
fi
redeploy_tenant "$slug" || promote_rc=1
[[ $promote_rc -eq 0 ]] && { verify_tenant "$slug" || promote_rc=1; }
[[ $promote_rc -ne 0 ]] && break
done

View File

@ -267,51 +267,7 @@ else
printf ' ✗ unknown-flag should fail (got %s)\n' "$rc"
fi
printf '\n== Test 9: slug validation — invalid slugs rejected with exit 64 (OFFSEC-006) ==\n'
# Attack vectors: SSRF via ? (curl query separator), subdomain takeover via @,
# path traversal via /, shell metacharacters. Use a newline-delimited temp file
# so slugs containing spaces are NOT split by shell word-splitting.
_invalid_tmp=$(mktemp)
cat > "$_invalid_tmp" <<'INVALID_EOF'
a?url=https://evil.com
a&url=https://evil.com
a@evil.com
a/b
a\b
a b
chloe-dong?url=http://evil.com
evil.com@legitimate
INVALID_EOF
while IFS= read -r attack || [[ -n "$attack" ]]; do
set +e
out=$("$SCRIPT" --source-tag x --dest-tag y --tenants "$attack" 2>&1); rc=$?
set -e
if [[ $rc -eq 64 ]] && printf '%s' "$out" | grep -q 'invalid slug'; then
PASS=$((PASS + 1)); printf ' ✓ slug rejected: %s\n' "$(printf '%q' "$attack")"
else
FAIL=$((FAIL + 1)); FAIL_NAMES+=("slug-reject:$attack")
printf ' ✗ slug should be rejected: %s — got exit %s\n' "$(printf '%q' "$attack")" "$rc"
fi
done < "$_invalid_tmp"
rm -f "$_invalid_tmp"
printf '\n== Test 10: slug validation — valid slugs pass through ==\n'
valid_slugs='chloe-dong hongming ab a abc123 my-tenant-42'
for slug in $valid_slugs; do
set +e
out=$("$SCRIPT" --source-tag x --dest-tag y --tenants "$slug" --mock-dir /nonexistent 2>&1); rc=$?
set -e
# valid slugs: script should fail at preflight (no such mock dir / no real infra),
# but NOT at slug validation (exit 64). So we check exit != 64.
if [[ $rc -ne 64 ]]; then
PASS=$((PASS + 1)); printf ' ✓ valid slug accepted: %s\n' "$slug"
else
FAIL=$((FAIL + 1)); FAIL_NAMES+=("slug-accept:$slug")
printf ' ✗ valid slug rejected: %s (should have passed slug check)\n' "$slug"
fi
done
printf '\n== Test 11: ROLLBACK_TAG follows YYYYMMDD via NOW_OVERRIDE_DATE ==\n'
printf '\n== Test 9: ROLLBACK_TAG follows YYYYMMDD via NOW_OVERRIDE_DATE ==\n'
m=$(mkmock)
mock_set "$m" aws_ecr_get_image '{}' 0
mock_set "$m" aws_ecr_describe_image '' 1
@ -333,7 +289,7 @@ fi
assert_calls_contain "rollback tag uses NOW_OVERRIDE_DATE (20260603)" "$m" 'aws_ecr_put_image b-prev-20260603'
rm -rf "$m"
printf '\n== Test 12: empty source manifest fails preflight ==\n'
printf '\n== Test 10: empty source manifest fails preflight ==\n'
m=$(mkmock)
mock_set "$m" aws_ecr_get_image '' 0 # rc=0 but empty body (the "None" case)
out=$(run_script "$m")
@ -341,7 +297,7 @@ assert_exit "empty source manifest fails preflight" "$out" 1
assert_contains "empty manifest message" "$out" 'returned empty manifest'
rm -rf "$m"
printf '\n== Test 13: tenant_buildinfo failure during verify → rollback ==\n'
printf '\n== Test 11: tenant_buildinfo failure during verify → rollback ==\n'
m=$(mkmock)
mock_set "$m" aws_ecr_get_image '{"manifests":[]}' 0
mock_set "$m" aws_ecr_describe_image '' 1
@ -355,7 +311,7 @@ assert_contains "logs buildinfo failure" "$out" '/buildinfo failed for chloe-don
assert_contains "rollback fired after verify fail" "$out" 'ROLLBACK:'
rm -rf "$m"
printf '\n== Test 14: ssm_refresh_ecr_auth JSON escaping (CWE-78 / OFFSEC-001) ==\n'
printf '\n== Test 12: ssm_refresh_ecr_auth JSON escaping (CWE-78 / OFFSEC-001) ==\n'
# Verify the python3 snippet in ssm_refresh_ecr_auth produces valid JSON and
# correctly escapes shell-injection characters in region + account ID fields.
# The fix replaces unquoted shell-printf interpolation with json.dumps.

View File

@ -1,296 +0,0 @@
#!/usr/bin/env bash
# E2E test: workspace broadcast and talk-to-user platform abilities.
#
# What this proves:
# 1. talk_to_user_enabled (default true) — POST /notify works out-of-the-box.
# 2. PATCH /workspaces/:id/abilities { talk_to_user_enabled: false } disables
# delivery: /notify → 403 with error="talk_to_user_disabled" + delegate hint.
# 3. Re-enabling talk_to_user_enabled restores delivery.
# 4. broadcast_enabled (default false) — POST /broadcast → 403 when disabled.
# 5. PATCH { broadcast_enabled: true } enables fan-out.
# 6. POST /broadcast delivers to all non-sender, non-removed workspaces:
# - Returns {"status":"sent","delivered":N}
# - Receiver's activity log has a broadcast_receive entry with the message.
# - Sender's activity log has a broadcast_sent entry.
# 7. The sender itself does NOT receive a broadcast_receive entry.
#
# Usage: tests/e2e/test_workspace_abilities_e2e.sh
# Prereqs: workspace-server on http://localhost:8080, MOLECULE_ENV != production
set -euo pipefail
source "$(dirname "$0")/_lib.sh"
PASS=0
FAIL=0
SENDER_ID=""
RECEIVER_ID=""
cleanup() {
for wid in "$SENDER_ID" "$RECEIVER_ID"; do
if [ -n "$wid" ]; then
curl -s -X DELETE "$BASE/workspaces/$wid?confirm=true" > /dev/null || true
fi
done
}
trap cleanup EXIT INT TERM
assert() {
local label="$1" actual="$2" expected="$3"
if [ "$actual" = "$expected" ]; then
echo " PASS — $label"
PASS=$((PASS+1))
else
echo " FAIL — $label"
echo " expected: $expected"
echo " actual: $actual"
FAIL=$((FAIL+1))
fi
}
assert_contains() {
local label="$1" haystack="$2" needle="$3"
if echo "$haystack" | grep -qF "$needle"; then
echo " PASS — $label"
PASS=$((PASS+1))
else
echo " FAIL — $label"
echo " needle: $needle"
echo " haystack: $haystack"
FAIL=$((FAIL+1))
fi
}
assert_not_contains() {
local label="$1" haystack="$2" needle="$3"
if ! echo "$haystack" | grep -qF "$needle"; then
echo " PASS — $label"
PASS=$((PASS+1))
else
echo " FAIL — $label (unexpected match)"
echo " needle: $needle"
echo " haystack: $haystack"
FAIL=$((FAIL+1))
fi
}
# ── Pre-sweep: remove any stale leftover workspaces from a prior aborted run ──
echo "=== Setup ==="
for NAME in "Abilities Sender" "Abilities Receiver"; do
PRIOR=$(curl -s "$BASE/workspaces" | python3 -c "
import json, sys
try:
print(' '.join(w['id'] for w in json.load(sys.stdin) if w.get('name') == '$NAME'))
except Exception:
pass
")
for _wid in $PRIOR; do
echo "Sweeping leftover '$NAME' workspace: $_wid"
curl -s -X DELETE "$BASE/workspaces/$_wid?confirm=true" > /dev/null || true
done
done
R=$(curl -s -X POST "$BASE/workspaces" -H "Content-Type: application/json" \
-d '{"name":"Abilities Sender","tier":1}')
SENDER_ID=$(echo "$R" | python3 -c 'import json,sys;print(json.load(sys.stdin)["id"])' 2>/dev/null || true)
[ -n "$SENDER_ID" ] || { echo "Failed to create sender workspace: $R"; exit 1; }
echo "Created sender workspace: $SENDER_ID"
R=$(curl -s -X POST "$BASE/workspaces" -H "Content-Type: application/json" \
-d '{"name":"Abilities Receiver","tier":1}')
RECEIVER_ID=$(echo "$R" | python3 -c 'import json,sys;print(json.load(sys.stdin)["id"])' 2>/dev/null || true)
[ -n "$RECEIVER_ID" ] || { echo "Failed to create receiver workspace: $R"; exit 1; }
echo "Created receiver workspace: $RECEIVER_ID"
# Mint workspace-scoped bearer tokens (test-only endpoint, disabled in prod).
SENDER_TOKEN=$(e2e_mint_test_token "$SENDER_ID")
[ -n "$SENDER_TOKEN" ] || { echo "Failed to mint sender token"; exit 1; }
SENDER_AUTH="Authorization: Bearer $SENDER_TOKEN"
# Admin token — any live workspace bearer satisfies AdminAuth in local dev.
# In production-like envs, set MOLECULE_ADMIN_TOKEN.
ADMIN_TOKEN="${MOLECULE_ADMIN_TOKEN:-$SENDER_TOKEN}"
ADMIN_AUTH="Authorization: Bearer $ADMIN_TOKEN"
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Part 1: talk_to_user ability ==="
echo ""
echo "--- 1a: /notify works with default talk_to_user_enabled=true ---"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$BASE/workspaces/$SENDER_ID/notify" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":"Hello from sender"}')
assert "POST /notify returns 200 when talk_to_user_enabled=true (default)" "$CODE" "200"
echo ""
echo "--- 1b: Disable talk_to_user ---"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X PATCH "$BASE/workspaces/$SENDER_ID/abilities" \
-H "Content-Type: application/json" -H "$ADMIN_AUTH" \
-d '{"talk_to_user_enabled": false}')
assert "PATCH /abilities talk_to_user_enabled=false returns 200" "$CODE" "200"
# Verify the flag is reflected in the workspace GET response.
WS=$(curl -s "$BASE/workspaces/$SENDER_ID" -H "$SENDER_AUTH")
FLAG=$(echo "$WS" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("talk_to_user_enabled","MISSING"))')
assert "GET /workspaces/:id reflects talk_to_user_enabled=false" "$FLAG" "False"
echo ""
echo "--- 1c: /notify blocked when talk_to_user disabled ---"
BODY=$(curl -s -w "" -X POST "$BASE/workspaces/$SENDER_ID/notify" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":"Should be blocked"}')
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$BASE/workspaces/$SENDER_ID/notify" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":"Should be blocked"}')
assert "POST /notify returns 403 when talk_to_user_enabled=false" "$CODE" "403"
ERR=$(echo "$BODY" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("error",""))' 2>/dev/null || echo "")
assert_contains "403 body contains talk_to_user_disabled error code" "$ERR" "talk_to_user_disabled"
HINT=$(echo "$BODY" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("hint",""))' 2>/dev/null || echo "")
assert_contains "403 body contains delegate_task hint" "$HINT" "delegate_task"
echo ""
echo "--- 1d: Re-enable talk_to_user and verify /notify works again ---"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X PATCH "$BASE/workspaces/$SENDER_ID/abilities" \
-H "Content-Type: application/json" -H "$ADMIN_AUTH" \
-d '{"talk_to_user_enabled": true}')
assert "PATCH /abilities talk_to_user_enabled=true returns 200" "$CODE" "200"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$BASE/workspaces/$SENDER_ID/notify" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":"Re-enabled, should work"}')
assert "POST /notify returns 200 after re-enabling talk_to_user" "$CODE" "200"
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Part 2: broadcast ability ==="
echo ""
echo "--- 2a: Broadcast blocked by default (broadcast_enabled=false) ---"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$BASE/workspaces/$SENDER_ID/broadcast" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":"Should be blocked"}')
assert "POST /broadcast returns 403 when broadcast_enabled=false (default)" "$CODE" "403"
echo ""
echo "--- 2b: Enable broadcast ---"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X PATCH "$BASE/workspaces/$SENDER_ID/abilities" \
-H "Content-Type: application/json" -H "$ADMIN_AUTH" \
-d '{"broadcast_enabled": true}')
assert "PATCH /abilities broadcast_enabled=true returns 200" "$CODE" "200"
WS=$(curl -s "$BASE/workspaces/$SENDER_ID" -H "$SENDER_AUTH")
FLAG=$(echo "$WS" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("broadcast_enabled","MISSING"))')
assert "GET /workspaces/:id reflects broadcast_enabled=true" "$FLAG" "True"
echo ""
echo "--- 2c: Successful broadcast fan-out ---"
BCAST=$(curl -s -X POST "$BASE/workspaces/$SENDER_ID/broadcast" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":"Org-wide notice: scheduled maintenance in 5 minutes."}')
BSTATUS=$(echo "$BCAST" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("status",""))' 2>/dev/null || echo "")
BDELIVERED=$(echo "$BCAST" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("delivered","-1"))' 2>/dev/null || echo "-1")
assert "POST /broadcast returns status=sent" "$BSTATUS" "sent"
# delivered count must be >= 1 (the receiver workspace).
echo " INFO — broadcast delivered=$BDELIVERED"
if python3 -c "import sys; sys.exit(0 if int('$BDELIVERED') >= 1 else 1)" 2>/dev/null; then
echo " PASS — delivered count >= 1"
PASS=$((PASS+1))
else
echo " FAIL — expected delivered >= 1, got $BDELIVERED"
FAIL=$((FAIL+1))
fi
echo ""
echo "--- 2d: Receiver activity log has broadcast_receive entry ---"
RECEIVER_TOKEN=$(e2e_mint_test_token "$RECEIVER_ID")
[ -n "$RECEIVER_TOKEN" ] || { echo "Failed to mint receiver token"; exit 1; }
RECEIVER_AUTH="Authorization: Bearer $RECEIVER_TOKEN"
ACT=$(curl -s -H "$RECEIVER_AUTH" "$BASE/workspaces/$RECEIVER_ID/activity?source=agent&limit=20")
ROW=$(echo "$ACT" | python3 -c '
import json, sys
rows = json.load(sys.stdin) or []
for r in rows:
if r.get("activity_type") == "broadcast_receive":
print(json.dumps(r))
break
')
[ -n "$ROW" ] || {
echo " FAIL — could not find broadcast_receive row in receiver activity"
FAIL=$((FAIL+1))
}
if [ -n "$ROW" ]; then
# Message is stored in summary field.
MSG=$(echo "$ROW" | python3 -c 'import json,sys;r=json.load(sys.stdin);print(r.get("summary",""))')
assert_contains "broadcast_receive row summary has original message" "$MSG" "scheduled maintenance"
# Sender ID is stored in source_id field.
SRC=$(echo "$ROW" | python3 -c 'import json,sys;r=json.load(sys.stdin);print(r.get("source_id",""))')
assert "broadcast_receive row source_id is sender workspace" "$SRC" "$SENDER_ID"
fi
echo ""
echo "--- 2e: Sender activity log has broadcast_sent entry ---"
ACT_SENDER=$(curl -s -H "$SENDER_AUTH" "$BASE/workspaces/$SENDER_ID/activity?limit=20")
SENT_ROW=$(echo "$ACT_SENDER" | python3 -c '
import json, sys
rows = json.load(sys.stdin) or []
for r in rows:
if r.get("activity_type") == "broadcast_sent":
print(json.dumps(r))
break
')
[ -n "$SENT_ROW" ] || {
echo " FAIL — could not find broadcast_sent row in sender activity"
FAIL=$((FAIL+1))
}
if [ -n "$SENT_ROW" ]; then
# Delivered count is baked into the summary field (no response_body for sender row).
SUMMARY=$(echo "$SENT_ROW" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("summary",""))')
assert_contains "broadcast_sent summary mentions workspace count" "$SUMMARY" "workspace"
fi
echo ""
echo "--- 2f: Sender does NOT receive a broadcast_receive entry ---"
SELF_RECV=$(echo "$ACT_SENDER" | python3 -c '
import json, sys
rows = json.load(sys.stdin) or []
for r in rows:
if r.get("activity_type") == "broadcast_receive":
print("found")
break
')
assert_not_contains "sender has no broadcast_receive in own activity log" "${SELF_RECV:-}" "found"
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "--- 2g: Empty message is rejected ---"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$BASE/workspaces/$SENDER_ID/broadcast" \
-H "Content-Type: application/json" -H "$SENDER_AUTH" \
-d '{"message":""}')
assert "POST /broadcast with empty message returns 400" "$CODE" "400"
echo ""
echo "--- 2h: Partial PATCH does not clobber other flags ---"
# Set talk_to_user=false, then patch only broadcast — talk_to_user must stay false.
curl -s -o /dev/null -X PATCH "$BASE/workspaces/$SENDER_ID/abilities" \
-H "Content-Type: application/json" -H "$ADMIN_AUTH" \
-d '{"talk_to_user_enabled": false}'
curl -s -o /dev/null -X PATCH "$BASE/workspaces/$SENDER_ID/abilities" \
-H "Content-Type: application/json" -H "$ADMIN_AUTH" \
-d '{"broadcast_enabled": false}'
WS=$(curl -s "$BASE/workspaces/$SENDER_ID" -H "$SENDER_AUTH")
TUF=$(echo "$WS" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("talk_to_user_enabled","MISSING"))')
BEF=$(echo "$WS" | python3 -c 'import json,sys;print(json.load(sys.stdin).get("broadcast_enabled","MISSING"))')
assert "partial PATCH preserves talk_to_user_enabled=false" "$TUF" "False"
assert "partial PATCH sets broadcast_enabled=false" "$BEF" "False"
# ─────────────────────────────────────────────────────────────────────────────
echo ""
echo "=== Results: $PASS passed, $FAIL failed ==="
[ "$FAIL" -eq 0 ]

View File

@ -545,70 +545,6 @@ def test_rule9_prod_manual_deploy_allows_rollback_control(tmp_path):
assert r.returncode == 0, f"stdout={r.stdout}\nstderr={r.stderr}"
# ---------------------------------------------------------------------------
# Rule 10 — docker info piped to head under pipefail
# ---------------------------------------------------------------------------
DOCKER_INFO_HEAD_BAD = """
name: docker-info-head-bad
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- run: |
set -euo pipefail
docker info 2>&1 | head -5 || exit 1
"""
DOCKER_INFO_CAPTURE_OK = """
name: docker-info-capture-ok
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- run: |
set -euo pipefail
docker_info="$(docker info 2>&1)" || exit 1
printf '%s\\n' "${docker_info}" | sed -n '1,5p'
"""
DOCKER_INFO_SEPARATE_STEP_OK = """
name: docker-info-separate-step-ok
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- run: |
set -euo pipefail
echo setup
- run: |
docker info 2>&1 | head -5 || true
"""
def test_rule10_docker_info_head_under_pipefail_detects_violation(tmp_path):
_write(tmp_path, "bad.yml", DOCKER_INFO_HEAD_BAD)
r = _run_lint(tmp_path)
assert r.returncode == 1
assert "docker info" in r.stdout.lower()
assert "pipefail" in r.stdout.lower()
def test_rule10_docker_info_capture_passes(tmp_path):
_write(tmp_path, "ok.yml", DOCKER_INFO_CAPTURE_OK)
r = _run_lint(tmp_path)
assert r.returncode == 0, f"stdout={r.stdout}\nstderr={r.stderr}"
def test_rule10_docker_info_head_in_separate_step_without_pipefail_passes(tmp_path):
_write(tmp_path, "ok.yml", DOCKER_INFO_SEPARATE_STEP_OK)
r = _run_lint(tmp_path)
assert r.returncode == 0, f"stdout={r.stdout}\nstderr={r.stderr}"
# ---------------------------------------------------------------------------
# CI change detector fanout — workflow-only PRs keep required contexts without
# running Go/Canvas/Python/shellcheck heavy steps.

View File

@ -495,7 +495,7 @@ def test_reap_required_check_pull_request_suffix_never_touched(sr_module, monkey
}
counters = sr_module.reap(workflow_map, combined, SHA, dry_run=False)
assert counters["compensated"] == 0
assert counters["preserved_pr_without_push_success"] == 1
assert counters["preserved_non_push_suffix"] == 1
assert calls == []
@ -1009,64 +1009,3 @@ def test_reap_continues_on_per_sha_apierror(sr_module, monkeypatch, capsys):
captured = capsys.readouterr()
assert "::warning::" in captured.out or "::notice::" in captured.out
assert SHA_A[:10] in captured.out
def test_main_soft_skips_when_commit_listing_times_out(sr_module, monkeypatch, capsys):
"""A transient outage while listing recent commits should not paint main red.
Per-SHA status read failures are already isolated inside `reap_branch`.
The real 2026-05-14 failure was earlier: `/commits?sha=main&limit=30`
timed out after all retries, aborting the tick. The next 5-minute tick can
retry safely, so `main()` should emit an observable warning and return 0.
"""
monkeypatch.setattr(sr_module, "scan_workflows", lambda _: {"workflow-without-push": False})
def fake_list_recent_commit_shas(*args, **kwargs):
raise sr_module.ApiError(
"GET /repos/owner/repo/commits failed after 4 attempts: timed out"
)
monkeypatch.setattr(sr_module, "list_recent_commit_shas", fake_list_recent_commit_shas)
monkeypatch.setattr(sys, "argv", ["status-reaper.py"])
assert sr_module.main() == 0
captured = capsys.readouterr()
assert "::warning::status-reaper skipped this tick" in captured.out
assert '"skipped": true' in captured.out
assert '"skip_reason": "commit-list-api-error"' in captured.out
def test_main_does_not_soft_skip_status_write_failures(sr_module, monkeypatch):
"""Only commit-list read failures are soft-skipped.
A compensation write failure means the reaper could not repair a red
status. That must still fail the job loudly instead of being mislabeled as
a transient commit-list outage.
"""
monkeypatch.setattr(sr_module, "scan_workflows", lambda _: {"workflow-without-push": False})
monkeypatch.setattr(sr_module, "list_recent_commit_shas", lambda *_args, **_kwargs: [SHA_A])
monkeypatch.setattr(
sr_module,
"get_combined_status",
lambda _sha: {
"state": "failure",
"statuses": [
{
"context": "workflow-without-push / job (push)",
"status": "failure",
"description": "stranded class-O red",
}
],
},
)
def fake_post_compensating_status(*args, **kwargs):
raise sr_module.ApiError("POST /statuses failed: 403")
monkeypatch.setattr(sr_module, "post_compensating_status", fake_post_compensating_status)
monkeypatch.setattr(sys, "argv", ["status-reaper.py"])
with pytest.raises(sr_module.ApiError, match="POST /statuses failed"):
sr_module.main()

View File

@ -18,7 +18,6 @@ require (
github.com/opencontainers/image-spec v1.1.1
github.com/redis/go-redis/v9 v9.19.0
github.com/robfig/cron/v3 v3.0.1
github.com/stretchr/testify v1.11.1
go.moleculesai.app/plugin/gh-identity v0.0.0-20260509010445-788988195fce
golang.org/x/crypto v0.50.0
gopkg.in/yaml.v3 v3.0.1
@ -34,7 +33,6 @@ require (
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@ -60,7 +58,6 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.59.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect

View File

@ -1,261 +0,0 @@
package bundle
import (
"os"
"path/filepath"
"testing"
)
// ---------------------------------------------------------------------------
// extractDescription
// ---------------------------------------------------------------------------
func TestExtractDescription_WithFrontmatter(t *testing.T) {
// YAML frontmatter is skipped; first non-comment, non-empty line after
// the closing `---` is the description.
content := `---
title: My Workspace
---
# This is a comment
This is the description line.
Another line.`
got := extractDescription(content)
if got != "This is the description line." {
t.Errorf("got %q, want %q", got, "This is the description line.")
}
}
func TestExtractDescription_NoFrontmatter(t *testing.T) {
// No frontmatter: first non-comment, non-empty line is returned.
content := `# Copyright header
My workspace description
Another line.`
got := extractDescription(content)
if got != "My workspace description" {
t.Errorf("got %q, want %q", got, "My workspace description")
}
}
func TestExtractDescription_CommentOnly(t *testing.T) {
// All content is comments or empty → empty string.
content := `# comment only
# another comment
`
got := extractDescription(content)
if got != "" {
t.Errorf("got %q, want empty string", got)
}
}
func TestExtractDescription_EmptyInput(t *testing.T) {
got := extractDescription("")
if got != "" {
t.Errorf("got %q, want empty string", got)
}
}
func TestExtractDescription_UnclosedFrontmatter(t *testing.T) {
// With no closing `---`, inFrontmatter stays true after the opening
// delimiter, so all subsequent lines are skipped and "" is returned.
// This is the documented behaviour: without a closing delimiter,
// all lines are considered frontmatter.
content := `---
title: No closing delimiter
This is the description.`
got := extractDescription(content)
if got != "" {
t.Errorf("unclosed frontmatter: got %q, want empty string", got)
}
}
func TestExtractDescription_FrontmatterThenCommentThenContent(t *testing.T) {
content := `---
tags: [test]
---
# internal comment
Real description here.
`
got := extractDescription(content)
if got != "Real description here." {
t.Errorf("got %q, want %q", got, "Real description here.")
}
}
func TestExtractDescription_BlankLinesSkipped(t *testing.T) {
// Empty lines (len=0) are skipped; whitespace-only lines (spaces) are NOT
// skipped because len(line)>0. First non-comment, non-empty line is returned.
content := "\n\n\n\nA. Description\nB. Should not be returned.\n"
got := extractDescription(content)
if got != "A. Description" {
t.Errorf("got %q, want %q", got, "A. Description")
}
}
// ---------------------------------------------------------------------------
// splitLines
// ---------------------------------------------------------------------------
func TestSplitLines_Basic(t *testing.T) {
got := splitLines("a\nb\nc")
want := []string{"a", "b", "c"}
if len(got) != len(want) {
t.Fatalf("len=%d, want %d", len(got), len(want))
}
for i := range want {
if got[i] != want[i] {
t.Errorf("got[%d]=%q, want %q", i, got[i], want[i])
}
}
}
func TestSplitLines_TrailingNewline(t *testing.T) {
got := splitLines("line1\nline2\n")
want := []string{"line1", "line2"}
if len(got) != len(want) {
t.Errorf("trailing newline: got %v, want %v", got, want)
}
}
func TestSplitLines_NoNewline(t *testing.T) {
got := splitLines("no newline")
want := []string{"no newline"}
if len(got) != 1 || got[0] != want[0] {
t.Errorf("got %v, want %v", got, want)
}
}
func TestSplitLines_EmptyString(t *testing.T) {
got := splitLines("")
if len(got) != 0 {
t.Errorf("empty string: got %v, want []", got)
}
}
func TestSplitLines_OnlyNewlines(t *testing.T) {
got := splitLines("\n\n\n")
// Three consecutive '\n' characters → s[start:i] at each '\n' gives
// the empty string between newlines → 3 empty segments.
// (No trailing segment because start == len(s) at the end.)
if len(got) != 3 {
t.Errorf("only newlines: got %v (len=%d), want 3 empty strings", got, len(got))
}
for i, s := range got {
if s != "" {
t.Errorf("got[%d]=%q, want empty string", i, s)
}
}
}
func TestSplitLines_MultipleConsecutiveNewlines(t *testing.T) {
got := splitLines("a\n\n\nb")
// a\n\n\nb → ["a", "", "", "b"]
if len(got) != 4 {
t.Errorf("consecutive newlines: got %v (len=%d)", got, len(got))
}
if got[0] != "a" || got[3] != "b" {
t.Errorf("first/last: got %v, want [a, ..., b]", got)
}
}
// ---------------------------------------------------------------------------
// findConfigDir
// ---------------------------------------------------------------------------
func TestFindConfigDir_NameMatch(t *testing.T) {
tmp := t.TempDir()
// Create two sub-dirs; only the one with matching name should be found.
mustMkdir(filepath.Join(tmp, "workspace-a"))
mustWrite(filepath.Join(tmp, "workspace-a", "config.yaml"),
"name: other-workspace\ntier: 1\n")
mustMkdir(filepath.Join(tmp, "workspace-b"))
mustWrite(filepath.Join(tmp, "workspace-b", "config.yaml"),
"name: target-workspace\nruntime: claude-code\n")
got := findConfigDir(tmp, "target-workspace")
want := filepath.Join(tmp, "workspace-b")
if got != want {
t.Errorf("got %q, want %q", got, want)
}
}
func TestFindConfigDir_NoMatch_UsesFallback(t *testing.T) {
tmp := t.TempDir()
mustMkdir(filepath.Join(tmp, "first"))
mustWrite(filepath.Join(tmp, "first", "config.yaml"), "name: workspace-a\n")
mustMkdir(filepath.Join(tmp, "second"))
mustWrite(filepath.Join(tmp, "second", "config.yaml"), "name: workspace-b\n")
// No exact name match → fallback to the first directory with a config.yaml.
got := findConfigDir(tmp, "nonexistent")
want := filepath.Join(tmp, "first")
if got != want {
t.Errorf("no match: got %q, want fallback %q", got, want)
}
}
func TestFindConfigDir_MissingDir(t *testing.T) {
got := findConfigDir("/nonexistent/path/for/findConfigDir", "any-name")
if got != "" {
t.Errorf("missing dir: got %q, want empty string", got)
}
}
func TestFindConfigDir_NoSubdirs(t *testing.T) {
tmp := t.TempDir()
// Empty directory → no matches, no fallback.
got := findConfigDir(tmp, "any")
if got != "" {
t.Errorf("empty dir: got %q, want empty string", got)
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func mustMkdir(path string) {
os.MkdirAll(path, 0o755)
}
func mustWrite(path, content string) {
os.WriteFile(path, []byte(content), 0o644)
}
// ---------------------------------------------------------------------------
// findConfigDir
// ---------------------------------------------------------------------------
func TestFindConfigDir_SubdirWithoutConfig(t *testing.T) {
tmp := t.TempDir()
mustMkdir(filepath.Join(tmp, "empty-skill"))
// Sub-dir without config.yaml → skipped.
got := findConfigDir(tmp, "any")
if got != "" {
t.Errorf("no config.yaml: got %q, want empty string", got)
}
}
func TestFindConfigDir_FirstWithConfigIsFallback(t *testing.T) {
// When name doesn't match, fallback is the FIRST dir with config.yaml,
// not the last. Confirm ordering by creating three dirs.
tmp := t.TempDir()
mustMkdir(filepath.Join(tmp, "a"))
mustWrite(filepath.Join(tmp, "a", "config.yaml"), "name: alpha\n")
mustMkdir(filepath.Join(tmp, "b"))
mustWrite(filepath.Join(tmp, "b", "config.yaml"), "name: beta\n")
mustMkdir(filepath.Join(tmp, "c"))
mustWrite(filepath.Join(tmp, "c", "config.yaml"), "name: gamma\n")
got := findConfigDir(tmp, "nonexistent")
want := filepath.Join(tmp, "a") // first dir with config.yaml
if got != want {
t.Errorf("fallback order: got %q, want first-with-config %q", got, want)
}
}

View File

@ -1,317 +0,0 @@
package bundle
import (
"testing"
)
func TestBuildBundleConfigFiles_EmptyBundle(t *testing.T) {
b := &Bundle{}
files := buildBundleConfigFiles(b)
if len(files) != 0 {
t.Errorf("empty bundle: want 0 files, got %d", len(files))
}
}
func TestBuildBundleConfigFiles_SystemPromptOnly(t *testing.T) {
b := &Bundle{
SystemPrompt: "You are a helpful assistant.",
}
files := buildBundleConfigFiles(b)
if n := len(files); n != 1 {
t.Fatalf("system-prompt only: want 1 file, got %d", n)
}
if content, ok := files["system-prompt.md"]; !ok {
t.Fatal("missing system-prompt.md")
} else if string(content) != "You are a helpful assistant." {
t.Errorf("system-prompt content: got %q", string(content))
}
}
func TestBuildBundleConfigFiles_ConfigYamlOnly(t *testing.T) {
b := &Bundle{
Prompts: map[string]string{
"config.yaml": "runtime: langgraph\ntier: 2\n",
},
}
files := buildBundleConfigFiles(b)
if n := len(files); n != 1 {
t.Fatalf("config.yaml only: want 1 file, got %d", n)
}
if content, ok := files["config.yaml"]; !ok {
t.Fatal("missing config.yaml")
} else if string(content) != "runtime: langgraph\ntier: 2\n" {
t.Errorf("config.yaml content: got %q", string(content))
}
}
func TestBuildBundleConfigFiles_SystemPromptAndConfigYaml(t *testing.T) {
b := &Bundle{
SystemPrompt: "Be concise.",
Prompts: map[string]string{
"config.yaml": "runtime: langgraph\n",
},
}
files := buildBundleConfigFiles(b)
if n := len(files); n != 2 {
t.Fatalf("system-prompt + config.yaml: want 2 files, got %d", n)
}
if _, ok := files["system-prompt.md"]; !ok {
t.Error("missing system-prompt.md")
}
if _, ok := files["config.yaml"]; !ok {
t.Error("missing config.yaml")
}
}
func TestBuildBundleConfigFiles_Skills(t *testing.T) {
b := &Bundle{
Skills: []BundleSkill{
{
ID: "web-search",
Files: map[string]string{"readme.md": "# Web Search\n"},
},
{
ID: "code-interpreter",
Files: map[string]string{"readme.md": "# Code Interpreter\n"},
},
},
}
files := buildBundleConfigFiles(b)
// 2 skills × 1 file each = 2 files
if n := len(files); n != 2 {
t.Fatalf("skills: want 2 files, got %d", n)
}
if _, ok := files["skills/web-search/readme.md"]; !ok {
t.Error("missing skills/web-search/readme.md")
}
if _, ok := files["skills/code-interpreter/readme.md"]; !ok {
t.Error("missing skills/code-interpreter/readme.md")
}
}
func TestBuildBundleConfigFiles_SkillSubPaths(t *testing.T) {
b := &Bundle{
Skills: []BundleSkill{
{
ID: "multi-file",
Files: map[string]string{
"readme.md": "# Multi",
"instructions.txt": "Step 1, Step 2",
},
},
},
}
files := buildBundleConfigFiles(b)
if n := len(files); n != 2 {
t.Fatalf("skill with sub-paths: want 2 files, got %d", n)
}
if _, ok := files["skills/multi-file/readme.md"]; !ok {
t.Error("missing skills/multi-file/readme.md")
}
if _, ok := files["skills/multi-file/instructions.txt"]; !ok {
t.Error("missing skills/multi-file/instructions.txt")
}
}
func TestBuildBundleConfigFiles_EmptySystemPrompt(t *testing.T) {
b := &Bundle{
SystemPrompt: "",
Prompts: map[string]string{
"config.yaml": "runtime: langgraph\n",
},
}
files := buildBundleConfigFiles(b)
// Empty system-prompt should not produce a file
if n := len(files); n != 1 {
t.Errorf("empty system-prompt: want 1 file, got %d", n)
}
}
func TestBuildBundleConfigFiles_EmptyPrompts(t *testing.T) {
b := &Bundle{
Prompts: map[string]string{},
}
files := buildBundleConfigFiles(b)
if n := len(files); n != 0 {
t.Errorf("empty prompts map: want 0 files, got %d", n)
}
}
func TestBuildBundleConfigFiles_emptyBundle(t *testing.T) {
b := &Bundle{}
files := buildBundleConfigFiles(b)
if len(files) != 0 {
t.Errorf("expected empty map for empty bundle, got %d entries", len(files))
}
}
func TestBuildBundleConfigFiles_systemPrompt(t *testing.T) {
b := &Bundle{SystemPrompt: "You are a helpful assistant."}
files := buildBundleConfigFiles(b)
if len(files) != 1 {
t.Fatalf("expected 1 file, got %d", len(files))
}
if string(files["system-prompt.md"]) != "You are a helpful assistant." {
t.Errorf("unexpected system prompt content: %q", files["system-prompt.md"])
}
}
func TestBuildBundleConfigFiles_configYaml(t *testing.T) {
b := &Bundle{Prompts: map[string]string{
"config.yaml": "runtime: langgraph\nmodel: claude-sonnet-4-20250514\n",
}}
files := buildBundleConfigFiles(b)
if len(files) != 1 {
t.Fatalf("expected 1 file, got %d", len(files))
}
if string(files["config.yaml"]) != "runtime: langgraph\nmodel: claude-sonnet-4-20250514\n" {
t.Errorf("unexpected config.yaml content: %q", files["config.yaml"])
}
}
func TestBuildBundleConfigFiles_systemPromptAndConfigYaml(t *testing.T) {
b := &Bundle{
SystemPrompt: "# System",
Prompts: map[string]string{"config.yaml": "runtime: langgraph"},
}
files := buildBundleConfigFiles(b)
if len(files) != 2 {
t.Fatalf("expected 2 files, got %d", len(files))
}
if _, ok := files["system-prompt.md"]; !ok {
t.Error("missing system-prompt.md")
}
if _, ok := files["config.yaml"]; !ok {
t.Error("missing config.yaml")
}
}
func TestBuildBundleConfigFiles_skills(t *testing.T) {
b := &Bundle{
Skills: []BundleSkill{
{
ID: "web-search",
Name: "Web Search",
Description: "Search the web",
Files: map[string]string{"readme.md": "# Web Search"},
},
{
ID: "code-runner",
Name: "Code Runner",
Description: "Execute code",
Files: map[string]string{"handler.py": "print('hello')"},
},
},
}
files := buildBundleConfigFiles(b)
if len(files) != 2 {
t.Fatalf("expected 2 skill files, got %d", len(files))
}
if content, ok := files["skills/web-search/readme.md"]; !ok {
t.Error("missing skills/web-search/readme.md")
} else if string(content) != "# Web Search" {
t.Errorf("unexpected readme.md: %q", content)
}
if _, ok := files["skills/code-runner/handler.py"]; !ok {
t.Error("missing skills/code-runner/handler.py")
}
}
func TestBuildBundleConfigFiles_skillsWithSubPaths(t *testing.T) {
b := &Bundle{
Skills: []BundleSkill{
{
ID: "nested-skill",
Files: map[string]string{"src/main.py": "def main(): pass", "pyproject.toml": "[tool.foo]"},
},
},
}
files := buildBundleConfigFiles(b)
if len(files) != 2 {
t.Fatalf("expected 2 files, got %d", len(files))
}
if _, ok := files["skills/nested-skill/src/main.py"]; !ok {
t.Error("missing skills/nested-skill/src/main.py")
}
if _, ok := files["skills/nested-skill/pyproject.toml"]; !ok {
t.Error("missing skills/nested-skill/pyproject.toml")
}
}
func TestBuildBundleConfigFiles_skipsEmptyPrompts(t *testing.T) {
b := &Bundle{Prompts: map[string]string{}}
files := buildBundleConfigFiles(b)
if len(files) != 0 {
t.Errorf("expected 0 files for empty prompts map, got %d", len(files))
}
}
func TestBuildBundleConfigFiles_skipsMissingConfigYaml(t *testing.T) {
b := &Bundle{
SystemPrompt: "# My Prompt",
Prompts: map[string]string{"other.yaml": "something: else"},
}
files := buildBundleConfigFiles(b)
if len(files) != 1 {
t.Fatalf("expected 1 file (system-prompt only), got %d", len(files))
}
if _, ok := files["config.yaml"]; ok {
t.Error("config.yaml should not be written when not in Prompts")
}
}
func TestNilIfEmpty_emptyString(t *testing.T) {
result := nilIfEmpty("")
if result != nil {
t.Errorf("expected nil for empty string, got %v", result)
}
}
func TestNilIfEmpty_nonEmptyString(t *testing.T) {
result := nilIfEmpty("hello")
if result == nil {
t.Fatal("expected non-nil result for non-empty string")
}
if result != "hello" {
t.Errorf("expected hello, got %q", result)
}
}
func TestNilIfEmpty_whitespaceString(t *testing.T) {
// Whitespace is not empty — nilIfEmpty only checks for zero-length
result := nilIfEmpty(" ")
if result == nil {
t.Error("expected non-nil for whitespace string")
} else if result != " " {
t.Errorf("expected ' ', got %q", result)
}
}
func TestNilIfEmpty_EmptyString(t *testing.T) {
got := nilIfEmpty("")
if got != nil {
t.Errorf("nilIfEmpty(\"\"): want nil, got %v", got)
}
}
func TestNilIfEmpty_NonEmptyString(t *testing.T) {
got := nilIfEmpty("hello")
if got == nil {
t.Fatal("nilIfEmpty(\"hello\"): want \"hello\", got nil")
}
if s, ok := got.(string); !ok || s != "hello" {
t.Errorf("nilIfEmpty(\"hello\"): got %v (%T)", got, got)
}
}
func TestNilIfEmpty_Whitespace(t *testing.T) {
got := nilIfEmpty(" ")
if got == nil {
t.Fatal("nilIfEmpty(\" \"): want \" \", got nil (whitespace is not empty)")
}
if s, ok := got.(string); !ok || s != " " {
t.Errorf("nilIfEmpty(\" \"): got %v (%T)", got, got)
}
}

View File

@ -97,28 +97,28 @@ const maxProxyResponseBody = 10 << 20
//
// Timeout model — three independent budgets, none of which gets in each other's way:
//
// 1. Client.Timeout — DELIBERATELY UNSET. Client.Timeout is a hard wall on
// the entire request including streamed body reads, and would pre-empt
// legitimate slow cold-start flows (Claude Code first-token over OAuth
// can take 30-60s on boot; long-running agent synthesis can stream
// tokens for minutes). Total-request budget is enforced per-request
// via context deadline (canvas = idle-only, agent-to-agent = 30 min ceiling).
// 1. Client.Timeout — DELIBERATELY UNSET. Client.Timeout is a hard wall on
// the entire request including streamed body reads, and would pre-empt
// legitimate slow cold-start flows (Claude Code first-token over OAuth
// can take 30-60s on boot; long-running agent synthesis can stream
// tokens for minutes). Total-request budget is enforced per-request
// via context deadline (canvas = idle-only, agent-to-agent = 30 min ceiling).
//
// 2. Transport.DialContext — 10s connect timeout. When a workspace's EC2
// black-holes TCP connects (instance terminated mid-flight, security group
// flipped, NACL bug), the OS default is 75s on Linux / 21s on macOS — long
// enough that Cloudflare's ~100s edge timeout can fire first and surface
// a generic 502 page to canvas. 10s is well above realistic intra-region
// latencies and well below CF's edge timeout.
// 2. Transport.DialContext — 10s connect timeout. When a workspace's EC2
// black-holes TCP connects (instance terminated mid-flight, security group
// flipped, NACL bug), the OS default is 75s on Linux / 21s on macOS — long
// enough that Cloudflare's ~100s edge timeout can fire first and surface
// a generic 502 page to canvas. 10s is well above realistic intra-region
// latencies and well below CF's edge timeout.
//
// 3. Transport.ResponseHeaderTimeout — 180s default. From request-body-end
// to response-headers-start. Configurable via
// A2A_PROXY_RESPONSE_HEADER_TIMEOUT (envx.Duration). Covers cold-start
// first-byte (30-60s OAuth flow above) with enough room for Opus agent
// turns (big context + internal delegate_task round-trips routinely exceed
// the old 60s ceiling). Body streaming after headers is governed by the
// per-request context deadline, NOT this timeout — so multi-minute agent
// responses still work fine.
// 3. Transport.ResponseHeaderTimeout — 180s default. From request-body-end
// to response-headers-start. Configurable via
// A2A_PROXY_RESPONSE_HEADER_TIMEOUT (envx.Duration). Covers cold-start
// first-byte (30-60s OAuth flow above) with enough room for Opus agent
// turns (big context + internal delegate_task round-trips routinely exceed
// the old 60s ceiling). Body streaming after headers is governed by the
// per-request context deadline, NOT this timeout — so multi-minute agent
// responses still work fine.
//
// The point of (2) and (3) is to surface a *structured* 503 from
// handleA2ADispatchError when the workspace agent is unreachable, so canvas
@ -645,7 +645,7 @@ func (h *WorkspaceHandler) resolveAgentURL(ctx context.Context, workspaceID stri
// the caller can retry once the workspace is back online (~10s).
if status == "hibernated" {
log.Printf("ProxyA2A: waking hibernated workspace %s", workspaceID)
h.goAsync(func() { h.RestartByID(workspaceID) })
go h.RestartByID(workspaceID)
return "", &proxyA2AError{
Status: http.StatusServiceUnavailable,
Headers: map[string]string{"Retry-After": "15"},

View File

@ -194,7 +194,7 @@ func (h *WorkspaceHandler) maybeMarkContainerDead(ctx context.Context, workspace
}
db.ClearWorkspaceKeys(ctx, workspaceID)
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOffline), workspaceID, map[string]interface{}{})
h.goAsync(func() { h.RestartByID(workspaceID) })
go h.RestartByID(workspaceID)
return true
}
@ -241,7 +241,7 @@ func (h *WorkspaceHandler) preflightContainerHealth(ctx context.Context, workspa
}
db.ClearWorkspaceKeys(ctx, workspaceID)
h.broadcaster.RecordAndBroadcast(ctx, string(events.EventWorkspaceOffline), workspaceID, map[string]interface{}{})
h.goAsync(func() { h.RestartByID(workspaceID) })
go h.RestartByID(workspaceID)
return &proxyA2AError{
Status: http.StatusServiceUnavailable,
Response: gin.H{
@ -262,8 +262,8 @@ func (h *WorkspaceHandler) logA2AFailure(ctx context.Context, workspaceID, calle
errWsName = workspaceID
}
summary := "A2A request to " + errWsName + " failed: " + errMsg
h.goAsync(func() {
logCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 30*time.Second)
go func(parent context.Context) {
logCtx, cancel := context.WithTimeout(context.WithoutCancel(parent), 30*time.Second)
defer cancel()
LogActivity(logCtx, h.broadcaster, ActivityParams{
WorkspaceID: workspaceID,
@ -277,7 +277,7 @@ func (h *WorkspaceHandler) logA2AFailure(ctx context.Context, workspaceID, calle
Status: "error",
ErrorDetail: &errMsg,
})
})
}(ctx)
}
// logA2ASuccess records a successful A2A round-trip and (for canvas-initiated
@ -298,19 +298,19 @@ func (h *WorkspaceHandler) logA2ASuccess(ctx context.Context, workspaceID, calle
// silent workspaces. Only update when callerID is a real workspace (not
// canvas, not a system caller) and the target returned 2xx/3xx.
if callerID != "" && !isSystemCaller(callerID) && statusCode < 400 {
h.goAsync(func() {
go func() {
bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if _, err := db.DB.ExecContext(bgCtx,
`UPDATE workspaces SET last_outbound_at = NOW() WHERE id = $1`, callerID); err != nil {
log.Printf("last_outbound_at update failed for %s: %v", callerID, err)
}
})
}()
}
summary := a2aMethod + " → " + wsNameForLog
toolTrace := extractToolTrace(respBody)
h.goAsync(func() {
logCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 30*time.Second)
go func(parent context.Context) {
logCtx, cancel := context.WithTimeout(context.WithoutCancel(parent), 30*time.Second)
defer cancel()
LogActivity(logCtx, h.broadcaster, ActivityParams{
WorkspaceID: workspaceID,
@ -325,7 +325,7 @@ func (h *WorkspaceHandler) logA2ASuccess(ctx context.Context, workspaceID, calle
DurationMs: &durationMs,
Status: logStatus,
})
})
}(ctx)
if callerID == "" && statusCode < 400 {
h.broadcaster.BroadcastOnly(workspaceID, string(events.EventA2AResponse), map[string]interface{}{
@ -510,8 +510,8 @@ func (h *WorkspaceHandler) logA2AReceiveQueued(ctx context.Context, workspaceID,
wsName = workspaceID
}
summary := a2aMethod + " → " + wsName + " (queued for poll)"
h.goAsync(func() {
logCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 30*time.Second)
go func(parent context.Context) {
logCtx, cancel := context.WithTimeout(context.WithoutCancel(parent), 30*time.Second)
defer cancel()
LogActivity(logCtx, h.broadcaster, ActivityParams{
WorkspaceID: workspaceID,
@ -523,7 +523,7 @@ func (h *WorkspaceHandler) logA2AReceiveQueued(ctx context.Context, workspaceID,
RequestBody: json.RawMessage(body),
Status: "ok",
})
})
}(ctx)
}
// readUsageMap extracts input_tokens / output_tokens from the "usage" key of m.

View File

@ -54,7 +54,6 @@ func TestPreflight_ContainerRunning_ReturnsNil(t *testing.T) {
_ = setupTestDB(t)
stub := &preflightLocalProv{running: true, err: nil}
h := NewWorkspaceHandler(newTestBroadcaster(), nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, h)
h.provisioner = stub
if err := h.preflightContainerHealth(context.Background(), "ws-running-123"); err != nil {
@ -187,8 +186,8 @@ func TestProxyA2A_Preflight_RoutesThroughProvisionerSSOT(t *testing.T) {
}
var (
callsIsRunning bool
callsContainerInspectRaw bool
callsIsRunning bool
callsContainerInspectRaw bool
callsRunningContainerNameDirect bool
)
ast.Inspect(fn.Body, func(n ast.Node) bool {

View File

@ -262,7 +262,6 @@ func TestProxyA2A_Upstream502_TriggersContainerDeadCheck(t *testing.T) {
allowLoopbackForTest(t)
broadcaster := newTestBroadcaster()
handler := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
cp := &fakeCPProv{running: false}
handler.SetCPProvisioner(cp)
@ -325,7 +324,6 @@ func TestProxyA2A_Upstream502_AliveAgent_PropagatesAsIs(t *testing.T) {
allowLoopbackForTest(t)
broadcaster := newTestBroadcaster()
handler := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
cp := &fakeCPProv{running: true}
handler.SetCPProvisioner(cp)
@ -515,7 +513,6 @@ func TestProxyA2A_AllowedSelf_SkipsAccessCheck(t *testing.T) {
allowLoopbackForTest(t)
broadcaster := newTestBroadcaster()
handler := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
agentServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
@ -664,18 +661,18 @@ func TestProxyA2A_CallerIDDerivedFromBearer(t *testing.T) {
// (column order: workspace_id, activity_type, source_id, target_id, ...)
mock.ExpectExec("INSERT INTO activity_logs").
WithArgs(
"ws-target", // $1 workspace_id
"a2a_receive", // $2 activity_type
sqlmock.AnyArg(), // $3 source_id — *string("ws-caller"), checked below
sqlmock.AnyArg(), // $4 target_id
sqlmock.AnyArg(), // $5 method
sqlmock.AnyArg(), // $6 summary
sqlmock.AnyArg(), // $7 request_body
sqlmock.AnyArg(), // $8 response_body
sqlmock.AnyArg(), // $9 tool_trace
sqlmock.AnyArg(), // $10 duration_ms
sqlmock.AnyArg(), // $11 status
sqlmock.AnyArg(), // $12 error_detail
"ws-target", // $1 workspace_id
"a2a_receive", // $2 activity_type
sqlmock.AnyArg(), // $3 source_id — *string("ws-caller"), checked below
sqlmock.AnyArg(), // $4 target_id
sqlmock.AnyArg(), // $5 method
sqlmock.AnyArg(), // $6 summary
sqlmock.AnyArg(), // $7 request_body
sqlmock.AnyArg(), // $8 response_body
sqlmock.AnyArg(), // $9 tool_trace
sqlmock.AnyArg(), // $10 duration_ms
sqlmock.AnyArg(), // $11 status
sqlmock.AnyArg(), // $12 error_detail
).
WillReturnResult(sqlmock.NewResult(0, 1))
@ -1719,6 +1716,7 @@ func TestDispatchA2A_RejectsUnsafeURL(t *testing.T) {
}
}
// --- handleA2ADispatchError ---
func TestHandleA2ADispatchError_ContextDeadline(t *testing.T) {
@ -1805,7 +1803,6 @@ func TestMaybeMarkContainerDead_CPOnly_NotRunning(t *testing.T) {
mock := setupTestDB(t)
setupTestRedis(t)
handler := NewWorkspaceHandler(newTestBroadcaster(), nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
cp := &fakeCPProv{running: false}
handler.SetCPProvisioner(cp)
@ -1958,7 +1955,6 @@ func TestLogA2AFailure_Smoke(t *testing.T) {
mock := setupTestDB(t)
setupTestRedis(t)
handler := NewWorkspaceHandler(newTestBroadcaster(), nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
// Sync workspace-name lookup (called in the caller goroutine).
mock.ExpectQuery(`SELECT name FROM workspaces WHERE id =`).
@ -1977,7 +1973,6 @@ func TestLogA2AFailure_EmptyNameFallback(t *testing.T) {
mock := setupTestDB(t)
setupTestRedis(t)
handler := NewWorkspaceHandler(newTestBroadcaster(), nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
// Empty name from DB → summary uses the workspaceID as the name.
mock.ExpectQuery(`SELECT name FROM workspaces WHERE id =`).
@ -1994,7 +1989,6 @@ func TestLogA2ASuccess_Smoke(t *testing.T) {
mock := setupTestDB(t)
setupTestRedis(t)
handler := NewWorkspaceHandler(newTestBroadcaster(), nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
mock.ExpectQuery(`SELECT name FROM workspaces WHERE id =`).
WithArgs("ws-ok").
@ -2011,7 +2005,6 @@ func TestLogA2ASuccess_ErrorStatus(t *testing.T) {
mock := setupTestDB(t)
setupTestRedis(t)
handler := NewWorkspaceHandler(newTestBroadcaster(), nil, "http://localhost:8080", t.TempDir())
waitForHandlerAsyncBeforeDBCleanup(t, handler)
mock.ExpectQuery(`SELECT name FROM workspaces WHERE id =`).
WithArgs("ws-err").

View File

@ -26,19 +26,14 @@ import (
// setupTestDBForQueueTests creates a sqlmock DB using QueryMatcherEqual (exact
// string matching) so that ExpectQuery/ExpectExec patterns are compared verbatim.
// Uses the same global db.DB as setupTestDB so the handler can use it.
//
// IMPORTANT: db.DB is saved before assignment and restored via t.Cleanup so
// that tests running after this one are not polluted by a closed mock.
// Same fix as setupTestDB (handlers_test.go); same root cause as mc#975.
func setupTestDBForQueueTests(t *testing.T) sqlmock.Sqlmock {
t.Helper()
mockDB, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
t.Cleanup(func() { mockDB.Close() })
return mock
}

View File

@ -482,13 +482,6 @@ func (h *ActivityHandler) Notify(c *gin.Context) {
c.JSON(http.StatusNotFound, gin.H{"error": "workspace not found"})
return
}
if errors.Is(err, ErrTalkToUserDisabled) {
c.JSON(http.StatusForbidden, gin.H{
"error": "talk_to_user_disabled",
"hint": "This workspace is not allowed to send messages directly to the user. Forward your update to a parent workspace using delegate_task — they may be able to reach the user.",
})
return
}
c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
return
}

View File

@ -388,13 +388,9 @@ func TestActivityList_BeforeTSRejectsInvalidFormat(t *testing.T) {
// ---------- Activity type allowlist (#125: memory_write added) ----------
func TestActivityReport_AcceptsMemoryWriteType(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
mockDB, mock, _ := sqlmock.New()
defer mockDB.Close()
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
mock.ExpectExec(`INSERT INTO activity_logs`).
WillReturnResult(sqlmock.NewResult(1, 1))
@ -417,13 +413,9 @@ func TestActivityReport_AcceptsMemoryWriteType(t *testing.T) {
}
func TestActivityReport_RejectsUnknownType(t *testing.T) {
mockDB, _, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
mockDB, _, _ := sqlmock.New()
defer mockDB.Close()
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
broadcaster := newTestBroadcaster()
handler := NewActivityHandler(broadcaster)
@ -455,18 +447,14 @@ func TestNotify_PersistsToActivityLogsForReloadRecovery(t *testing.T) {
// - Have source_id NULL (canvas-source filter)
// - Carry the message text in response_body so extractResponseText
// can reconstruct the agent reply on reload
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
mockDB, mock, _ := sqlmock.New()
defer mockDB.Close()
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
// Workspace existence check
mock.ExpectQuery(`SELECT name, talk_to_user_enabled FROM workspaces`).
mock.ExpectQuery(`SELECT name FROM workspaces`).
WithArgs("ws-notify").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("DD", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("DD"))
// Persistence INSERT — verify shape
mock.ExpectExec(`INSERT INTO activity_logs`).
@ -503,17 +491,13 @@ func TestNotify_WithAttachments_PersistsFilePartsForReload(t *testing.T) {
// download chips after a page reload. Without `parts`, the bubble
// shows up but the attachment chip is silently dropped on every
// refresh.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
mockDB, mock, _ := sqlmock.New()
defer mockDB.Close()
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
mock.ExpectQuery(`SELECT name, talk_to_user_enabled FROM workspaces`).
mock.ExpectQuery(`SELECT name FROM workspaces`).
WithArgs("ws-attach").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("DD", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("DD"))
// Capture the JSONB arg so we can assert on the persisted shape
// AFTER the call (must include parts[].kind=file so reload
@ -581,13 +565,9 @@ func TestNotify_RejectsAttachmentWithEmptyURIOrName(t *testing.T) {
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
mockDB, _, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
mockDB, _, _ := sqlmock.New()
defer mockDB.Close()
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
// No DB expectations — handler must reject with 400 BEFORE
// reaching SELECT/INSERT. sqlmock will fail "expectations not met"
// only if the handler unexpectedly queries.
@ -632,17 +612,13 @@ func TestNotify_DBFailure_StillBroadcastsAnd200(t *testing.T) {
// WebSocket push (which the user is already seeing in their open
// canvas). Pre-fix the WS push always succeeded; we don't want
// the new persistence step to regress that path.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
mockDB, mock, _ := sqlmock.New()
defer mockDB.Close()
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
mock.ExpectQuery(`SELECT name, talk_to_user_enabled FROM workspaces`).
mock.ExpectQuery(`SELECT name FROM workspaces`).
WithArgs("ws-x").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("DD", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("DD"))
mock.ExpectExec(`INSERT INTO activity_logs`).
WillReturnError(fmt.Errorf("simulated db hiccup"))

View File

@ -54,11 +54,6 @@ import (
// timeout) surface as wrapped errors and should be treated as 503.
var ErrWorkspaceNotFound = errors.New("agent_message: workspace not found")
// ErrTalkToUserDisabled is returned when the workspace has
// talk_to_user_enabled=false. Callers surface HTTP 403 so the Python tool
// can detect it and suggest forwarding to a parent workspace.
var ErrTalkToUserDisabled = errors.New("agent_message: talk_to_user disabled")
// AgentMessageAttachment is one file attached to an agent → user
// message. Identical to handlers.NotifyAttachment in field set; kept
// distinct so the writer's API doesn't import a handler type with HTTP
@ -112,20 +107,16 @@ func (w *AgentMessageWriter) Send(
// notify call surfaced as "workspace not found" and masked real
// incidents in the alert path.
var wsName string
var talkToUserEnabled bool
err := w.db.QueryRowContext(ctx,
`SELECT name, talk_to_user_enabled FROM workspaces WHERE id = $1 AND status != 'removed'`,
`SELECT name FROM workspaces WHERE id = $1 AND status != 'removed'`,
workspaceID,
).Scan(&wsName, &talkToUserEnabled)
).Scan(&wsName)
if errors.Is(err, sql.ErrNoRows) {
return ErrWorkspaceNotFound
}
if err != nil {
return fmt.Errorf("agent_message: workspace lookup: %w", err)
}
if !talkToUserEnabled {
return ErrTalkToUserDisabled
}
// 2. Build broadcast payload + WS-emit. Same shape that ChatTab's
// AGENT_MESSAGE handler in canvas/src/store/canvas-events.ts has

View File

@ -88,9 +88,9 @@ func TestAgentMessageWriter_Send_Success_NoAttachments(t *testing.T) {
mock := setupTestDB(t)
w := NewAgentMessageWriter(db.DB, newTestBroadcaster())
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-1").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("CEO Ryan PC", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("CEO Ryan PC"))
mock.ExpectExec(`INSERT INTO activity_logs.*'a2a_receive'.*'notify'`).
WithArgs(
@ -116,9 +116,9 @@ func TestAgentMessageWriter_Send_Success_WithAttachments(t *testing.T) {
mock := setupTestDB(t)
w := NewAgentMessageWriter(db.DB, newTestBroadcaster())
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-att").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("Ryan", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("Ryan"))
mock.ExpectExec(`INSERT INTO activity_logs.*'a2a_receive'.*'notify'`).
WithArgs(
@ -173,9 +173,9 @@ func TestAgentMessageWriter_Send_WorkspaceNotFound(t *testing.T) {
emitter := &capturingEmitter{}
w := NewAgentMessageWriter(db.DB, emitter)
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-missing").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}))
WillReturnRows(sqlmock.NewRows([]string{"name"}))
err := w.Send(context.Background(), "ws-missing", "lost in the void", nil)
if !errors.Is(err, ErrWorkspaceNotFound) {
@ -202,9 +202,9 @@ func TestAgentMessageWriter_Send_DBInsertFailureStillReturnsNil(t *testing.T) {
mock := setupTestDB(t)
w := NewAgentMessageWriter(db.DB, newTestBroadcaster())
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-dbfail").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("CEO Ryan PC", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("CEO Ryan PC"))
mock.ExpectExec(`INSERT INTO activity_logs`).
WillReturnError(errors.New("transient db error"))
@ -223,9 +223,9 @@ func TestAgentMessageWriter_Send_PreviewTruncation(t *testing.T) {
mock := setupTestDB(t)
w := NewAgentMessageWriter(db.DB, newTestBroadcaster())
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-trunc").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("Ryan", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("Ryan"))
longMsg := strings.Repeat("x", 200)
mock.ExpectExec(`INSERT INTO activity_logs`).
@ -263,9 +263,9 @@ func TestAgentMessageWriter_Send_BroadcastsAgentMessageEvent(t *testing.T) {
emitter := &capturingEmitter{}
w := NewAgentMessageWriter(db.DB, emitter)
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-bc").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("Workspace Name", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("Workspace Name"))
mock.ExpectExec(`INSERT INTO activity_logs`).
WillReturnResult(sqlmock.NewResult(1, 1))
@ -315,7 +315,7 @@ func TestAgentMessageWriter_Send_DBErrorOnLookupReturnsWrapped(t *testing.T) {
w := NewAgentMessageWriter(db.DB, newTestBroadcaster())
transientErr := errors.New("connection refused")
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-dbdown").
WillReturnError(transientErr)
@ -350,9 +350,9 @@ func TestAgentMessageWriter_Send_NonASCIIMessagePersists(t *testing.T) {
// the byte-slice bug.
msg := strings.Repeat("你", 200)
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-cjk").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("CEO Ryan PC", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("CEO Ryan PC"))
mock.ExpectExec(`INSERT INTO activity_logs`).
WithArgs(
@ -395,9 +395,9 @@ func TestAgentMessageWriter_Send_OmitsAttachmentsKeyWhenEmpty(t *testing.T) {
emitter := &capturingEmitter{}
w := NewAgentMessageWriter(db.DB, emitter)
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-noatt").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("X", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("X"))
mock.ExpectExec(`INSERT INTO activity_logs`).
WillReturnResult(sqlmock.NewResult(1, 1))

View File

@ -15,7 +15,6 @@ import (
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/channels"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
"github.com/gin-gonic/gin"
)
@ -365,20 +364,6 @@ func TestChannelHandler_Discover_MissingToken(t *testing.T) {
}
func TestChannelHandler_Discover_UnsupportedType(t *testing.T) {
// Set up db.DB so PausePollersForToken (called inside Discover) doesn't panic.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("sqlmock: %v", err)
}
t.Cleanup(func() { mockDB.Close() })
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB })
mock.ExpectQuery(`SELECT id, channel_config FROM workspace_channels WHERE enabled = true AND workspace_id`).
WithArgs("ws-test").
WillReturnRows(sqlmock.NewRows([]string{"id", "channel_config"}))
handler := NewChannelHandler(newTestChannelManager())
// #329: workspace_id required — include so we actually reach the
@ -402,20 +387,6 @@ func TestChannelHandler_Discover_UnsupportedType(t *testing.T) {
}
func TestChannelHandler_Discover_InvalidBotToken(t *testing.T) {
// Set up db.DB so PausePollersForToken (called inside Discover) doesn't panic.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("sqlmock: %v", err)
}
t.Cleanup(func() { mockDB.Close() })
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB })
mock.ExpectQuery(`SELECT id, channel_config FROM workspace_channels WHERE enabled = true AND workspace_id`).
WithArgs("ws-test").
WillReturnRows(sqlmock.NewRows([]string{"id", "channel_config"}))
handler := NewChannelHandler(newTestChannelManager())
body, _ := json.Marshal(map[string]interface{}{

View File

@ -2,7 +2,6 @@ package handlers
import (
"context"
"database/sql"
"encoding/json"
"log"
"net/http"
@ -263,20 +262,14 @@ func insertDelegationRow(ctx context.Context, c *gin.Context, sourceID string, b
"task": body.Task,
"delegation_id": delegationID,
})
// Store delegation_id in response_body so agent check_delegation_status
// (which reads response_body->>delegation_id) can locate this row even
// when request_body hasn't propagated yet. Fixes mc#984.
respJSON, _ := json.Marshal(map[string]interface{}{
"delegation_id": delegationID,
})
var idemArg interface{}
if body.IdempotencyKey != "" {
idemArg = body.IdempotencyKey
}
_, err := db.DB.ExecContext(ctx, `
INSERT INTO activity_logs (workspace_id, activity_type, method, source_id, target_id, summary, request_body, response_body, status, idempotency_key)
VALUES ($1, 'delegation', 'delegate', $2, $3, $4, $5::jsonb, $6::jsonb, 'pending', $7)
`, sourceID, sourceID, body.TargetID, "Delegating to "+body.TargetID, string(taskJSON), string(respJSON), idemArg)
INSERT INTO activity_logs (workspace_id, activity_type, method, source_id, target_id, summary, request_body, status, idempotency_key)
VALUES ($1, 'delegation', 'delegate', $2, $3, $4, $5::jsonb, 'pending', $6)
`, sourceID, sourceID, body.TargetID, "Delegating to "+body.TargetID, string(taskJSON), idemArg)
if err == nil {
// RFC #2829 #318 — mirror to the durable delegations ledger
// (gated by DELEGATION_LEDGER_WRITE; default off → no-op).
@ -551,15 +544,10 @@ func (h *DelegationHandler) Record(c *gin.Context) {
"task": body.Task,
"delegation_id": body.DelegationID,
})
// Store delegation_id in response_body so agent check_delegation_status
// can locate this row. Fixes mc#984.
respJSON, _ := json.Marshal(map[string]interface{}{
"delegation_id": body.DelegationID,
})
if _, err := db.DB.ExecContext(ctx, `
INSERT INTO activity_logs (workspace_id, activity_type, method, source_id, target_id, summary, request_body, response_body, status)
VALUES ($1, 'delegation', 'delegate', $2, $3, $4, $5::jsonb, $6::jsonb, 'dispatched')
`, sourceID, sourceID, body.TargetID, "Delegating to "+body.TargetID, string(taskJSON), string(respJSON)); err != nil {
INSERT INTO activity_logs (workspace_id, activity_type, method, source_id, target_id, summary, request_body, status)
VALUES ($1, 'delegation', 'delegate', $2, $3, $4, $5::jsonb, 'dispatched')
`, sourceID, sourceID, body.TargetID, "Delegating to "+body.TargetID, string(taskJSON)); err != nil {
log.Printf("Delegation Record: insert failed for %s: %v", body.DelegationID, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to record delegation"})
return
@ -699,8 +687,7 @@ func (h *DelegationHandler) listDelegationsFromLedger(ctx context.Context, works
var result []map[string]interface{}
for rows.Next() {
var delegationID, callerID, calleeID, taskPreview, status string
var resultPreview, errorDetail sql.NullString
var delegationID, callerID, calleeID, taskPreview, status, resultPreview, errorDetail string
var lastHeartbeat, deadline, createdAt, updatedAt *time.Time
if err := rows.Scan(
&delegationID, &callerID, &calleeID, &taskPreview,
@ -719,11 +706,11 @@ func (h *DelegationHandler) listDelegationsFromLedger(ctx context.Context, works
"updated_at": updatedAt,
"_ledger": true, // marker so callers know this row is from the ledger
}
if resultPreview.Valid && resultPreview.String != "" {
entry["response_preview"] = textutil.TruncateBytes(resultPreview.String, 300)
if resultPreview != "" {
entry["response_preview"] = textutil.TruncateBytes(resultPreview, 300)
}
if errorDetail.Valid && errorDetail.String != "" {
entry["error"] = errorDetail.String
if errorDetail != "" {
entry["error"] = errorDetail
}
if lastHeartbeat != nil {
entry["last_heartbeat"] = lastHeartbeat

View File

@ -1,224 +0,0 @@
package handlers
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
// extractResponseText tests — walks A2A JSON-RPC response bodies and
// returns the first text part, falling back to raw body on parse failures.
func TestExtractResponseText_PartsWithTextKind(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{
map[string]interface{}{"kind": "text", "text": "hello world"},
map[string]interface{}{"kind": "text", "text": "second part"},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "hello world", extractResponseText(body))
}
func TestExtractResponseText_PartNotTextKind(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{
map[string]interface{}{"kind": "image", "data": "base64..."},
map[string]interface{}{"kind": "text", "text": "visible"},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "visible", extractResponseText(body))
}
func TestExtractResponseText_PartsEmpty(t *testing.T) {
// Empty parts array — falls through to artifacts, then raw body
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{},
"artifacts": []interface{}{},
},
}
body, _ := json.Marshal(resp)
// Falls through to raw body (which is the JSON string)
result := extractResponseText(body)
assert.NotEmpty(t, result)
}
func TestExtractResponseText_ArtifactPartsWithText(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{},
"artifacts": []interface{}{
map[string]interface{}{
"kind": "file",
"parts": []interface{}{
map[string]interface{}{"kind": "text", "text": "artifact text"},
},
},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "artifact text", extractResponseText(body))
}
func TestExtractResponseText_ArtifactPartNotTextKind(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{},
"artifacts": []interface{}{
map[string]interface{}{
"kind": "code",
"parts": []interface{}{
map[string]interface{}{"kind": "image", "data": "..."},
map[string]interface{}{"kind": "text", "text": "code comment"},
},
},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "code comment", extractResponseText(body))
}
func TestExtractResponseText_ArtifactsEmpty(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{},
"artifacts": []interface{}{},
},
}
body, _ := json.Marshal(resp)
result := extractResponseText(body)
// Falls back to raw body
assert.Equal(t, string(body), result)
}
func TestExtractResponseText_NoResult(t *testing.T) {
// No "result" key at all — falls back to raw body
body := []byte(`{"error": {"code": -32600, "message": "Invalid Request"}}`)
result := extractResponseText(body)
assert.Equal(t, string(body), result)
}
func TestExtractResponseText_ResultNotMap(t *testing.T) {
// result is a string, not a map — falls back to raw body
body := []byte(`{"result": "just a string"}`)
result := extractResponseText(body)
assert.Equal(t, string(body), result)
}
func TestExtractResponseText_NonJSONBody(t *testing.T) {
// Non-JSON bytes — returns the raw string
body := []byte("plain text response, not JSON at all")
result := extractResponseText(body)
assert.Equal(t, "plain text response, not JSON at all", result)
}
func TestExtractResponseText_PartWithNilText(t *testing.T) {
// Text field is nil — kind is "text" but text is nil, should skip
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{
map[string]interface{}{"kind": "text", "text": nil},
map[string]interface{}{"kind": "text", "text": "found"},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "found", extractResponseText(body))
}
func TestExtractResponseText_ArtifactPartWithNilText(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{},
"artifacts": []interface{}{
map[string]interface{}{
"parts": []interface{}{
map[string]interface{}{"kind": "text", "text": nil},
map[string]interface{}{"kind": "text", "text": "artifact-found"},
},
},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "artifact-found", extractResponseText(body))
}
func TestExtractResponseText_PartsWithNonMapElement(t *testing.T) {
// parts contains a non-map element — should be skipped gracefully
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{
"not a map",
123,
nil,
map[string]interface{}{"kind": "text", "text": "parsed"},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "parsed", extractResponseText(body))
}
func TestExtractResponseText_ArtifactWithNonMapElement(t *testing.T) {
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{},
"artifacts": []interface{}{
"not a map",
nil,
map[string]interface{}{
"parts": []interface{}{
"not a map",
map[string]interface{}{"kind": "text", "text": "safe"},
},
},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "safe", extractResponseText(body))
}
func TestExtractResponseText_PartKindNotString(t *testing.T) {
// kind is an integer, not a string — should be skipped
resp := map[string]interface{}{
"result": map[string]interface{}{
"parts": []interface{}{
map[string]interface{}{"kind": 123, "text": "ignored"},
map[string]interface{}{"kind": "text", "text": "found"},
},
},
}
body, _ := json.Marshal(resp)
assert.Equal(t, "found", extractResponseText(body))
}
func TestExtractResponseText_EmptyResponse(t *testing.T) {
body := []byte("{}")
result := extractResponseText(body)
// Falls back to raw "{}"
assert.Equal(t, "{}", result)
}
func TestExtractResponseText_NilBody(t *testing.T) {
// nil byte slice — string(nil) = ""
result := extractResponseText(nil)
assert.Equal(t, "", result)
}
func TestExtractResponseText_WhitespaceBody(t *testing.T) {
body := []byte(" \n\t ")
result := extractResponseText(body)
// Unmarshals to empty map, no result, returns raw string
assert.Equal(t, " \n\t ", result)
}

View File

@ -1,488 +0,0 @@
package handlers
// delegation_list_test.go — unit tests for listDelegationsFromLedger and
// listDelegationsFromActivityLogs. Both methods are the data-backend of the
// ListDelegations handler; coverage was missing (cf. infra-sre review of PR #942).
import (
"context"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
)
// ---------- listDelegationsFromLedger ----------
func TestListDelegationsFromLedger_EmptyResult(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
rows := sqlmock.NewRows([]string{
"delegation_id", "caller_id", "callee_id", "task_preview",
"status", "result_preview", "error_detail",
"last_heartbeat", "deadline", "created_at", "updated_at",
})
mock.ExpectQuery("SELECT .+ FROM delegations").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromLedger(context.Background(), "ws-1")
if got != nil {
t.Errorf("empty result: expected nil, got %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromLedger_SingleRow(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
now := time.Now()
// Use time.Time{} for nullable *time.Time columns — sqlmock passes the
// zero value to the handler's scan destination. The handler checks Valid
// before using each nullable field, so zero values are safe.
rows := sqlmock.NewRows([]string{
"delegation_id", "caller_id", "callee_id", "task_preview",
"status", "result_preview", "error_detail",
"last_heartbeat", "deadline", "created_at", "updated_at",
}).AddRow(
"del-1", "ws-1", "ws-2", "summarise the report",
"completed", "the report is about Q1",
"", now, now, now, now,
)
mock.ExpectQuery("SELECT .+ FROM delegations").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromLedger(context.Background(), "ws-1")
if len(got) != 1 {
t.Fatalf("expected 1 entry, got %d", len(got))
}
e := got[0]
if e["delegation_id"] != "del-1" {
t.Errorf("delegation_id: got %v, want del-1", e["delegation_id"])
}
if e["source_id"] != "ws-1" {
t.Errorf("source_id: got %v, want ws-1", e["source_id"])
}
if e["target_id"] != "ws-2" {
t.Errorf("target_id: got %v, want ws-2", e["target_id"])
}
if e["status"] != "completed" {
t.Errorf("status: got %v, want completed", e["status"])
}
if e["response_preview"] != "the report is about Q1" {
t.Errorf("response_preview: got %v", e["response_preview"])
}
if _, ok := e["error"]; ok {
t.Errorf("error should be absent when empty, got %v", e["error"])
}
if e["_ledger"] != true {
t.Errorf("_ledger marker: got %v, want true", e["_ledger"])
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromLedger_MultipleRows(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
now := time.Now()
rows := sqlmock.NewRows([]string{
"delegation_id", "caller_id", "callee_id", "task_preview",
"status", "result_preview", "error_detail",
"last_heartbeat", "deadline", "created_at", "updated_at",
}).
AddRow("del-a", "ws-1", "ws-2", "task a", "in_progress", "", "", now, now, now, now).
AddRow("del-b", "ws-1", "ws-3", "task b", "failed", "", "timeout", now, now, now, now).
AddRow("del-c", "ws-1", "ws-4", "task c", "completed", "result c", "", now, now, now, now)
mock.ExpectQuery("SELECT .+ FROM delegations").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromLedger(context.Background(), "ws-1")
if len(got) != 3 {
t.Fatalf("expected 3 entries, got %d", len(got))
}
if got[0]["delegation_id"] != "del-a" || got[1]["delegation_id"] != "del-b" || got[2]["delegation_id"] != "del-c" {
t.Errorf("unexpected order: %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromLedger_NullsOmitted(t *testing.T) {
// last_heartbeat, deadline, result_preview, error_detail are all NULL.
// Handler must not panic and must omit those keys from the map.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { mockDB.Close(); db.DB = prevDB })
now := time.Now()
rows := sqlmock.NewRows([]string{
"delegation_id", "caller_id", "callee_id", "task_preview",
"status", "result_preview", "error_detail",
"last_heartbeat", "deadline", "created_at", "updated_at",
}).
AddRow("del-1", "ws-1", "ws-2", "task", "queued", nil, nil, nil, nil, now, now)
mock.ExpectQuery("SELECT .+ FROM delegations").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromLedger(context.Background(), "ws-1")
if len(got) != 1 {
t.Fatalf("expected 1 entry, got %d", len(got))
}
e := got[0]
if _, ok := e["last_heartbeat"]; ok {
t.Error("last_heartbeat should be absent when NULL")
}
if _, ok := e["deadline"]; ok {
t.Error("deadline should be absent when NULL")
}
if _, ok := e["response_preview"]; ok {
t.Error("response_preview should be absent when NULL result_preview")
}
if _, ok := e["error"]; ok {
t.Error("error should be absent when NULL error_detail")
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromLedger_QueryError(t *testing.T) {
// Query failure returns nil — graceful fallback, no panic.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
mock.ExpectQuery("SELECT .+ FROM delegations").
WithArgs("ws-1").
WillReturnError(context.DeadlineExceeded)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromLedger(context.Background(), "ws-1")
if got != nil {
t.Errorf("query error: expected nil, got %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromLedger_RowsErr(t *testing.T) {
// rows.Err() mid-stream: handler collects partial results and returns them.
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
now := time.Now()
// RowError(0) before AddRow(0): row 0 is "bad", rows.Next() returns false
// on first call — the row never scans, result stays nil. To get partial
// results (row 0 scanned) with rows.Err() non-nil, we use 2 rows and put
// RowError(1) after AddRow(1): row 0 scans normally, row 1 is bad,
// rows.Err() is error, handler returns partial result.
rows := sqlmock.NewRows([]string{
"delegation_id", "caller_id", "callee_id", "task_preview",
"status", "result_preview", "error_detail",
"last_heartbeat", "deadline", "created_at", "updated_at",
}).
AddRow("del-1", "ws-1", "ws-2", "task", "queued", "", "", now, now, now, now).
AddRow("del-2", "ws-1", "ws-3", "another task", "queued", "", "", now, now, now, now).
RowError(1, context.DeadlineExceeded)
mock.ExpectQuery("SELECT .+ FROM delegations").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromLedger(context.Background(), "ws-1")
// Row 0 scanned and appended; row 1 is bad; rows.Err() is non-nil.
// Handler logs the error but returns result (partial results because result != nil).
if got == nil || len(got) != 1 {
t.Errorf("rows.Err path: expected 1 partial result, got %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
// TestListDelegationsFromLedger_ScanError is removed.
//
// In Go 1.25 sqlmock.NewRows validates column count at AddRow() time and
// panics when len(values) != len(columns). The old pattern
// sqlmock.NewRows([]string{}).AddRow("only-one-col")
// therefore panics in test SETUP, not inside the handler. The handler has no
// recover(), so a scan panic would propagate out of listDelegationsFromLedger
// and crash the process — this is the correct behaviour (not silently skipping
// a row). The correct way to cover this path is a real-DB integration test.
//
// ---------- listDelegationsFromActivityLogs ----------
func TestListDelegationsFromActivityLogs_EmptyResult(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
rows := sqlmock.NewRows([]string{
"id", "activity_type", "source_id", "target_id",
"summary", "status", "error_detail",
"response_preview", "delegation_id", "created_at",
})
mock.ExpectQuery("SELECT .+ FROM activity_logs").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromActivityLogs(context.Background(), "ws-1")
if len(got) != 0 {
t.Errorf("empty result: expected empty slice, got %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromActivityLogs_SingleDelegateRow(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
now := time.Now()
rows := sqlmock.NewRows([]string{
"id", "activity_type", "source_id", "target_id",
"summary", "status", "error_detail",
"response_preview", "delegation_id", "created_at",
}).AddRow(
"act-1", "delegate",
"ws-1", "ws-2",
"analyse Q1 numbers",
"in_progress",
"", "", "",
now,
)
mock.ExpectQuery("SELECT .+ FROM activity_logs").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromActivityLogs(context.Background(), "ws-1")
if len(got) != 1 {
t.Fatalf("expected 1 entry, got %d", len(got))
}
e := got[0]
if e["id"] != "act-1" {
t.Errorf("id: got %v, want act-1", e["id"])
}
if e["type"] != "delegate" {
t.Errorf("type: got %v, want delegate", e["type"])
}
if e["source_id"] != "ws-1" {
t.Errorf("source_id: got %v, want ws-1", e["source_id"])
}
if e["target_id"] != "ws-2" {
t.Errorf("target_id: got %v, want ws-2", e["target_id"])
}
if e["summary"] != "analyse Q1 numbers" {
t.Errorf("summary: got %v", e["summary"])
}
if e["status"] != "in_progress" {
t.Errorf("status: got %v", e["status"])
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromActivityLogs_DelegateResultWithError(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
now := time.Now()
rows := sqlmock.NewRows([]string{
"id", "activity_type", "source_id", "target_id",
"summary", "status", "error_detail",
"response_preview", "delegation_id", "created_at",
}).AddRow(
"act-2", "delegate_result",
"ws-1", "ws-2",
"result summary",
"failed",
"Callee workspace not reachable",
`{"text":"the result body text"}`,
"del-abc",
now,
)
mock.ExpectQuery("SELECT .+ FROM activity_logs").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromActivityLogs(context.Background(), "ws-1")
if len(got) != 1 {
t.Fatalf("expected 1 entry, got %d", len(got))
}
e := got[0]
if e["type"] != "delegate_result" {
t.Errorf("type: got %v", e["type"])
}
if e["error"] != "Callee workspace not reachable" {
t.Errorf("error: got %v", e["error"])
}
if e["response_preview"] != `{"text":"the result body text"}` {
t.Errorf("response_preview: got %v", e["response_preview"])
}
if e["delegation_id"] != "del-abc" {
t.Errorf("delegation_id: got %v", e["delegation_id"])
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromActivityLogs_QueryError(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
mock.ExpectQuery("SELECT .+ FROM activity_logs").
WithArgs("ws-1").
WillReturnError(context.DeadlineExceeded)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromActivityLogs(context.Background(), "ws-1")
// Error → returns empty slice, not nil.
if len(got) != 0 {
t.Errorf("query error: expected empty slice, got %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}
func TestListDelegationsFromActivityLogs_RowsErr(t *testing.T) {
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
now := time.Now()
// RowError(0) before AddRow(0): row 0 is "bad", rows.Next() returns false
// on first call — the row never scans, result stays nil. To get partial
// results (row 0 scanned) with rows.Err() non-nil, we use 2 rows and put
// RowError(1) after AddRow(1): row 0 scans normally, row 1 is bad,
// rows.Err() is error, handler returns partial result.
rows := sqlmock.NewRows([]string{
"id", "activity_type", "source_id", "target_id",
"summary", "status", "error_detail",
"response_preview", "delegation_id", "created_at",
}).
AddRow("act-1", "delegate", "ws-1", "ws-2", "task", "queued", "", "", "", now).
AddRow("act-2", "delegate", "ws-1", "ws-3", "another task", "queued", "", "", "", now).
RowError(1, context.DeadlineExceeded)
mock.ExpectQuery("SELECT .+ FROM activity_logs").
WithArgs("ws-1").
WillReturnRows(rows)
broadcaster := newTestBroadcaster()
wh := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
dh := NewDelegationHandler(wh, broadcaster)
got := dh.listDelegationsFromActivityLogs(context.Background(), "ws-1")
// Row 0 scanned and appended; row 1 is bad; rows.Err() is non-nil.
// Handler logs the error but returns result (partial results because result != nil).
if got == nil || len(got) != 1 {
t.Errorf("rows.Err path: expected 1 partial result, got %v", got)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("sqlmock expectations: %v", err)
}
}

View File

@ -133,9 +133,9 @@ func TestDelegate_Success(t *testing.T) {
targetID := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
// Expect INSERT into activity_logs for delegation tracking
// (6th arg is response_body, 7th is idempotency_key — nil here since the request omits it)
// (6th arg is idempotency_key — nil here since the request omits it)
mock.ExpectExec("INSERT INTO activity_logs").
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), sqlmock.AnyArg(), nil).
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), nil).
WillReturnResult(sqlmock.NewResult(0, 1))
// Expect RecordAndBroadcast INSERT into structure_events
@ -189,9 +189,9 @@ func TestDelegate_DBInsertFails_Still202WithWarning(t *testing.T) {
targetID := "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
// DB insert fails (6th arg = response_body, 7th = idempotency_key, nil for this test)
// DB insert fails (6th arg = idempotency_key, nil for this test)
mock.ExpectExec("INSERT INTO activity_logs").
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), sqlmock.AnyArg(), nil).
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), nil).
WillReturnError(fmt.Errorf("database connection lost"))
// RecordAndBroadcast still fires
@ -491,7 +491,6 @@ func TestDelegationRecord_InsertsActivityLogRow(t *testing.T) {
"550e8400-e29b-41d4-a716-446655440001", // target_id
"Delegating to 550e8400-e29b-41d4-a716-446655440001", // summary
sqlmock.AnyArg(), // request_body (jsonb)
sqlmock.AnyArg(), // response_body (jsonb) — mc#984 fix
).
WillReturnResult(sqlmock.NewResult(0, 1))
// RecordAndBroadcast INSERT for DELEGATION_SENT
@ -700,9 +699,9 @@ func TestDelegate_IdempotentFailedRowIsReleasedAndReplaced(t *testing.T) {
mock.ExpectExec("DELETE FROM activity_logs").
WithArgs("ws-source", "retry-key").
WillReturnResult(sqlmock.NewResult(0, 1))
// Fresh insert with the same idempotency key (response_body added as mc#984 fix).
// Fresh insert with the same idempotency key.
mock.ExpectExec("INSERT INTO activity_logs").
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), sqlmock.AnyArg(), "retry-key").
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), "retry-key").
WillReturnResult(sqlmock.NewResult(0, 1))
mock.ExpectExec("INSERT INTO structure_events").
WillReturnResult(sqlmock.NewResult(0, 1))
@ -746,9 +745,9 @@ func TestDelegate_IdempotentRaceUniqueViolationReturnsExisting(t *testing.T) {
mock.ExpectQuery("SELECT request_body->>'delegation_id', status, target_id").
WithArgs("ws-source", "race-key").
WillReturnError(fmt.Errorf("sql: no rows in result set"))
// Insert loses the race against a concurrent caller (response_body added as mc#984 fix).
// Insert loses the race against a concurrent caller.
mock.ExpectExec("INSERT INTO activity_logs").
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), sqlmock.AnyArg(), "race-key").
WithArgs("ws-source", "ws-source", targetID, "Delegating to "+targetID, sqlmock.AnyArg(), "race-key").
WillReturnError(fmt.Errorf("pq: duplicate key value violates unique constraint \"activity_logs_idempotency_uniq\""))
// Re-query returns the winner.
mock.ExpectQuery("SELECT request_body->>'delegation_id', status").

View File

@ -1,160 +0,0 @@
package handlers
import (
"testing"
)
// filterPeersByQuery tests — nil-safe role/name filtering for peer discovery.
func TestFilterPeersByQuery_EmptyQueryNoOp(t *testing.T) {
peers := []map[string]interface{}{
{"name": "foo", "role": "bar"},
{"name": "baz", "role": "qux"},
}
result := filterPeersByQuery(peers, "")
if len(result) != 2 {
t.Errorf("empty query: expected 2, got %d", len(result))
}
}
func TestFilterPeersByQuery_WhitespaceQueryNoOp(t *testing.T) {
peers := []map[string]interface{}{
{"name": "foo", "role": "bar"},
}
result := filterPeersByQuery(peers, " ")
if len(result) != 1 {
t.Errorf("whitespace-only query: expected 1, got %d", len(result))
}
}
func TestFilterPeersByQuery_MatchName(t *testing.T) {
peers := []map[string]interface{}{
{"name": "backend-agent", "role": "sre"},
{"name": "frontend-agent", "role": "ui"},
}
result := filterPeersByQuery(peers, "backend")
if len(result) != 1 || result[0]["name"] != "backend-agent" {
t.Errorf("expected backend-agent, got %v", result)
}
}
func TestFilterPeersByQuery_MatchRole(t *testing.T) {
peers := []map[string]interface{}{
{"name": "agent-alpha", "role": "security engineer"},
{"name": "agent-beta", "role": "devops"},
}
result := filterPeersByQuery(peers, "engineer")
if len(result) != 1 || result[0]["name"] != "agent-alpha" {
t.Errorf("expected agent-alpha, got %v", result)
}
}
func TestFilterPeersByQuery_CaseInsensitive(t *testing.T) {
peers := []map[string]interface{}{
{"name": "AgentX", "role": "SRE"},
}
result := filterPeersByQuery(peers, "AGENTx")
if len(result) != 1 {
t.Errorf("expected 1 match (case-insensitive), got %d", len(result))
}
}
func TestFilterPeersByQuery_NilRoleNoPanic(t *testing.T) {
// This is the regression case for #730: queryPeerMaps explicitly sets
// peer["role"] = nil when the DB role is empty string. Before the fix,
// p["role"].(string) panics on nil. After the fix, it returns "" and
// no match occurs — which is the correct behaviour.
defer func() {
if r := recover(); r != nil {
t.Errorf("filterPeersByQuery panicked on nil role: %v", r)
}
}()
peers := []map[string]interface{}{
{"name": "some-agent", "role": nil},
}
result := filterPeersByQuery(peers, "some-agent")
if len(result) != 1 {
t.Errorf("expected 1 match by name, got %d", len(result))
}
}
func TestFilterPeersByQuery_NilRoleQueryNoMatch(t *testing.T) {
// When role is nil and query does not match name, nothing matches.
defer func() {
if r := recover(); r != nil {
t.Errorf("filterPeersByQuery panicked on nil role: %v", r)
}
}()
peers := []map[string]interface{}{
{"name": "agent-alpha", "role": nil},
}
result := filterPeersByQuery(peers, "no-match")
if len(result) != 0 {
t.Errorf("expected 0 matches, got %d", len(result))
}
}
func TestFilterPeersByQuery_NilNameNoPanic(t *testing.T) {
// Defensive check: name could also theoretically be nil.
defer func() {
if r := recover(); r != nil {
t.Errorf("filterPeersByQuery panicked on nil name: %v", r)
}
}()
peers := []map[string]interface{}{
{"name": nil, "role": "sre"},
}
result := filterPeersByQuery(peers, "sre")
if len(result) != 1 {
t.Errorf("expected 1 match by role, got %d", len(result))
}
}
func TestFilterPeersByQuery_BothNilNoPanic(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Errorf("filterPeersByQuery panicked on nil name+role: %v", r)
}
}()
peers := []map[string]interface{}{
{"name": nil, "role": nil},
}
result := filterPeersByQuery(peers, "")
if len(result) != 1 {
t.Errorf("empty query with nil name/role: expected 1, got %d", len(result))
}
result = filterPeersByQuery(peers, "anything")
if len(result) != 0 {
t.Errorf("non-empty query with nil name/role: expected 0, got %d", len(result))
}
}
func TestFilterPeersByQuery_NoMatches(t *testing.T) {
peers := []map[string]interface{}{
{"name": "alpha", "role": "beta"},
{"name": "gamma", "role": "delta"},
}
result := filterPeersByQuery(peers, "zzz")
if len(result) != 0 {
t.Errorf("expected 0, got %d", len(result))
}
}
func TestFilterPeersByQuery_EmptyPeers(t *testing.T) {
result := filterPeersByQuery([]map[string]interface{}{}, "query")
if len(result) != 0 {
t.Errorf("empty peers: expected 0, got %d", len(result))
}
}
func TestFilterPeersByQuery_MultipleMatches(t *testing.T) {
peers := []map[string]interface{}{
{"name": "backend-alpha", "role": "eng"},
{"name": "backend-beta", "role": "eng"},
{"name": "frontend", "role": "ui"},
}
result := filterPeersByQuery(peers, "backend")
if len(result) != 2 {
t.Errorf("expected 2 backend matches, got %d", len(result))
}
}

View File

@ -646,12 +646,8 @@ const externalOpenClawTemplate = `# OpenClaw MCP config — outbound tool path.
# external machine today, pair with the Python SDK tab.
# 1. Install openclaw CLI + the workspace runtime wheel:
# The version pin (>=0.1.999) ensures the "molecule-mcp" console
# script is present it is what keeps the workspace ALIVE on canvas
# (register-on-startup + 20s heartbeat). Older versions only ship
# a2a_mcp_server which does not heartbeat.
npm install -g openclaw@latest
pip install "molecule-ai-workspace-runtime>=0.1.999"
pip install molecule-ai-workspace-runtime
# 2. Onboard openclaw against your model provider (one-time setup).
# --non-interactive needs an explicit --provider + --model so it

View File

@ -230,21 +230,20 @@ func TestWorkspaceList_WithData(t *testing.T) {
broadcaster := newTestBroadcaster()
handler := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", t.TempDir())
// 23 cols — broadcast_enabled + talk_to_user_enabled added after monthly_spend
// (migration 20260514). Column order must match scanWorkspaceRow exactly.
// 21 cols — see scanWorkspaceRow for order (max_concurrent_tasks
// lands between active_tasks and last_error_rate).
columns := []string{
"id", "name", "role", "tier", "status", "agent_card", "url",
"parent_id", "active_tasks", "max_concurrent_tasks",
"last_error_rate", "last_sample_error",
"uptime_seconds", "current_task", "runtime", "workspace_dir", "x", "y", "collapsed",
"budget_limit", "monthly_spend",
"broadcast_enabled", "talk_to_user_enabled",
}
rows := sqlmock.NewRows(columns).
AddRow("ws-1", "Agent One", "worker", 1, "online", []byte(`{"name":"agent1"}`), "http://localhost:8001",
nil, 3, 1, 0.02, "", 7200, "processing", "langgraph", "", 10.0, 20.0, false, nil, int64(0), false, true).
nil, 3, 1, 0.02, "", 7200, "processing", "langgraph", "", 10.0, 20.0, false, nil, int64(0)).
AddRow("ws-2", "Agent Two", "", 2, "degraded", []byte("null"), "",
nil, 0, 1, 0.6, "timeout", 100, "", "claude-code", "", 50.0, 60.0, true, nil, int64(0), false, true)
nil, 0, 1, 0.6, "timeout", 100, "", "claude-code", "", 50.0, 60.0, true, nil, int64(0))
mock.ExpectQuery("SELECT w.id, w.name").
WillReturnRows(rows)

View File

@ -29,20 +29,14 @@ func init() {
// setupTestDB creates a sqlmock DB and assigns it to the global db.DB.
// It also disables the SSRF URL check so that httptest.NewServer loopback
// URLs and fake hostnames (*.example) used in tests don't trigger rejections.
//
// IMPORTANT: db.DB is saved before assignment and restored via t.Cleanup so
// that tests running after this one are not polluted by a closed mock.
// This is the single root cause of the systemic CI/Platform (Go) failures on
// main HEAD 8026f020 (mc#975).
func setupTestDB(t *testing.T) sqlmock.Sqlmock {
t.Helper()
mockDB, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("failed to create sqlmock: %v", err)
}
prevDB := db.DB
db.DB = mockDB
t.Cleanup(func() { db.DB = prevDB; mockDB.Close() })
t.Cleanup(func() { mockDB.Close() })
// Disable SSRF checks for the duration of this test only. Restore
// the previous state via t.Cleanup so that TestIsSafeURL_* tests
@ -62,11 +56,6 @@ func setupTestDB(t *testing.T) sqlmock.Sqlmock {
return mock
}
func waitForHandlerAsyncBeforeDBCleanup(t *testing.T, h *WorkspaceHandler) {
t.Helper()
t.Cleanup(h.waitAsyncForTest)
}
// setupTestRedis creates a miniredis instance and assigns it to the global db.RDB.
func setupTestRedis(t *testing.T) *miniredis.Miniredis {
t.Helper()
@ -366,11 +355,6 @@ func TestWorkspaceCreate(t *testing.T) {
}
func TestBuildProvisionerConfig_IncludesAwarenessSettings(t *testing.T) {
mock := setupTestDB(t)
mock.ExpectQuery(`SELECT digest FROM runtime_image_pins`).
WithArgs("claude-code").
WillReturnError(sql.ErrNoRows)
broadcaster := newTestBroadcaster()
handler := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", "/tmp/configs")
@ -382,7 +366,7 @@ func TestBuildProvisionerConfig_IncludesAwarenessSettings(t *testing.T) {
"ws-123",
"/tmp/configs/template",
map[string][]byte{"config.yaml": []byte("name: test")},
models.CreateWorkspacePayload{Tier: 2, Runtime: "claude-code", WorkspaceDir: "/tmp/workspace", WorkspaceAccess: "read_write"},
models.CreateWorkspacePayload{Tier: 2, Runtime: "claude-code"},
map[string]string{"OPENAI_API_KEY": "sk-test"},
"/tmp/plugins",
"workspace:ws-123",
@ -407,21 +391,21 @@ func TestWorkspaceList(t *testing.T) {
broadcaster := newTestBroadcaster()
handler := NewWorkspaceHandler(broadcaster, nil, "http://localhost:8080", "/tmp/configs")
// 23 cols: broadcast_enabled + talk_to_user_enabled added after monthly_spend
// (migration 20260514). Column order must match scanWorkspaceRow exactly.
// 21 cols: `max_concurrent_tasks` added between active_tasks and
// last_error_rate (see scanWorkspaceRow + COALESCE(w.max_concurrent_tasks, 1)
// in workspace.go). Column order must match that scan exactly.
columns := []string{
"id", "name", "role", "tier", "status", "agent_card", "url",
"parent_id", "active_tasks", "max_concurrent_tasks",
"last_error_rate", "last_sample_error",
"uptime_seconds", "current_task", "runtime", "workspace_dir", "x", "y", "collapsed",
"budget_limit", "monthly_spend",
"broadcast_enabled", "talk_to_user_enabled",
}
rows := sqlmock.NewRows(columns).
AddRow("ws-1", "Agent One", "worker", 1, "online", []byte("null"), "http://localhost:8001",
nil, 0, 1, 0.0, "", 100, "", "claude-code", "", 10.0, 20.0, false, nil, int64(0), false, true).
nil, 0, 1, 0.0, "", 100, "", "claude-code", "", 10.0, 20.0, false, nil, int64(0)).
AddRow("ws-2", "Agent Two", "manager", 2, "provisioning", []byte("null"), "",
nil, 0, 1, 0.0, "", 0, "", "langgraph", "", 50.0, 60.0, false, nil, int64(0), false, true)
nil, 0, 1, 0.0, "", 0, "", "langgraph", "", 50.0, 60.0, false, nil, int64(0))
mock.ExpectQuery("SELECT w.id, w.name").
WillReturnRows(rows)
@ -1135,14 +1119,13 @@ func TestWorkspaceGet_CurrentTask(t *testing.T) {
"parent_id", "active_tasks", "max_concurrent_tasks", "last_error_rate", "last_sample_error",
"uptime_seconds", "current_task", "runtime", "workspace_dir", "x", "y", "collapsed",
"budget_limit", "monthly_spend",
"broadcast_enabled", "talk_to_user_enabled",
}
mock.ExpectQuery("SELECT w.id, w.name").
WithArgs("dddddddd-0004-0000-0000-000000000000").
WillReturnRows(sqlmock.NewRows(columns).AddRow(
"dddddddd-0004-0000-0000-000000000000", "Task Worker", "worker", 1, "online", []byte("null"), "http://localhost:9000",
nil, 2, 1, 0.0, "", 300, "Analyzing document", "langgraph", "", 10.0, 20.0, false,
nil, int64(0), false, true,
nil, int64(0),
))
w := httptest.NewRecorder()

File diff suppressed because it is too large Load Diff

View File

@ -751,9 +751,9 @@ func TestMCPHandler_SendMessageToUser_DBErrorLogsAndStill200s(t *testing.T) {
t.Setenv("MOLECULE_MCP_ALLOW_SEND_MESSAGE", "true")
h, mock := newMCPHandler(t)
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-err").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("CEO Ryan PC", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("CEO Ryan PC"))
// INSERT fails — must NOT abort the tool response.
mock.ExpectExec(`INSERT INTO activity_logs.*'a2a_receive'.*'notify'`).
@ -802,9 +802,9 @@ func TestMCPHandler_SendMessageToUser_ResponseBodyShape(t *testing.T) {
const userMessage = "Hi there from the agent"
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-shape").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("CEO Ryan PC", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("CEO Ryan PC"))
// Capture the response_body argument and assert its exact shape.
mock.ExpectExec(`INSERT INTO activity_logs.*'a2a_receive'.*'notify'`).
@ -861,9 +861,9 @@ func TestMCPHandler_SendMessageToUser_PersistsToActivityLog(t *testing.T) {
// before it does anything else. Returning a name lets the
// broadcast payload populate; the test doesn't assert on the
// broadcast (no observable WS in this fake), only on the DB.
mock.ExpectQuery("SELECT name, talk_to_user_enabled FROM workspaces").
mock.ExpectQuery("SELECT name FROM workspaces").
WithArgs("ws-msg").
WillReturnRows(sqlmock.NewRows([]string{"name", "talk_to_user_enabled"}).AddRow("CEO Ryan PC", true))
WillReturnRows(sqlmock.NewRows([]string{"name"}).AddRow("CEO Ryan PC"))
// The persistence INSERT — pin the exact shape so a future
// refactor that switches columns or drops `method='notify'`

View File

@ -15,7 +15,6 @@ import (
"gopkg.in/yaml.v3"
)
// resolvePromptRef reads a prompt body from either an inline string or a
// file ref relative to the workspace's files_dir. Inline always wins when
// both are non-empty (caller-provided inline is more authoritative than a
@ -79,105 +78,17 @@ func hasUnresolvedVarRef(original, expanded string) bool {
}
// expandWithEnv expands ${VAR} and $VAR references in s using the env map.
// Falls back to the platform process env only when the whole value is a
// single variable reference; embedded process-env expansion is too broad for
// imported org YAML because host variables such as HOME are not template data.
// Falls back to the platform process env if a var isn't in the map.
func expandWithEnv(s string, env map[string]string) string {
if s == "" {
return ""
}
var b strings.Builder
for i := 0; i < len(s); {
if s[i] != '$' {
b.WriteByte(s[i])
i++
continue
return os.Expand(s, func(key string) string {
if v, ok := env[key]; ok {
return v
}
if i+1 >= len(s) {
b.WriteByte('$')
i++
continue
}
if s[i+1] == '{' {
end := strings.IndexByte(s[i+2:], '}')
if end < 0 {
b.WriteByte('$')
i++
continue
}
end += i + 2
key := s[i+2 : end]
ref := s[i : end+1]
b.WriteString(expandEnvRef(key, ref, s, env))
i = end + 1
continue
}
if !isEnvIdentStart(s[i+1]) {
b.WriteByte('$')
i++
continue
}
j := i + 2
for j < len(s) && isEnvIdentPart(s[j]) {
j++
}
key := s[i+1 : j]
ref := s[i:j]
b.WriteString(expandEnvRef(key, ref, s, env))
i = j
}
return b.String()
}
func isEnvIdentStart(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_'
}
func isEnvIdentPart(c byte) bool {
return isEnvIdentStart(c) || (c >= '0' && c <= '9')
}
// expandEnvRef resolves a single variable reference extracted from s.
//
// Guards:
// - Empty key → "$$" escape, return "$"
// - key[0] not POSIX ident start → "$" + partial chars, return "$<chars>"
// - Key in env map → return the mapped value (template override wins)
// - Otherwise → only fall back to os.Getenv if the whole input string IS the
// variable reference (ref == whole).
//
// Bare $VAR format:
// $HOME (alone) → ref==whole → os.Getenv ✓ (host HOME is org-template HOME)
// $HOME/path (partial) → ref!=whole → literal "$HOME" ✓ (CWE-78: prevents host leak)
//
// Braced ${VAR} format:
// ${HOME} (alone) → ref==whole → os.Getenv ✓
// ${ROLE}/admin (partial) → ref!=whole → literal ✓
// "yes and ${NOT_SET}" (embedded) → ref!=whole → literal ✓
//
// This is the CWE-78 fix from commit a3a358f9.
func expandEnvRef(key, ref, whole string, env map[string]string) string {
if key == "" {
return "$"
}
if !isEnvIdentStart(key[0]) {
return "$" + key
}
if v, ok := env[key]; ok {
return v
}
if ref == whole {
return os.Getenv(key)
}
return ref
})
}
// loadWorkspaceEnv reads the org root .env and the workspace-specific .env .env and the workspace-specific .env
// loadWorkspaceEnv reads the org root .env and the workspace-specific .env
// (workspace overrides org root). Used by both secret injection and channel
// config expansion.
//

View File

@ -1,126 +0,0 @@
package handlers
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// setupOrgEnv creates a temp dir with an optional org .env file and returns the dir.
func setupOrgEnv(t *testing.T, orgEnvContent string) string {
t.Helper()
dir := t.TempDir()
if orgEnvContent != "" {
require.NoError(t, os.WriteFile(filepath.Join(dir, ".env"), []byte(orgEnvContent), 0o600))
}
return dir
}
func Test_loadWorkspaceEnv_orgRootOnly(t *testing.T) {
org := setupOrgEnv(t, "ORG_VAR=orgval\nORG_DEBUG=true")
vars := loadWorkspaceEnv(org, "")
assert.Equal(t, "orgval", vars["ORG_VAR"])
assert.Equal(t, "true", vars["ORG_DEBUG"])
}
func Test_loadWorkspaceEnv_orgRootMissing(t *testing.T) {
// No .env at org root — should return empty map without error.
dir := t.TempDir()
vars := loadWorkspaceEnv(dir, "")
assertEmpty(t, vars)
}
func Test_loadWorkspaceEnv_workspaceEnvMerges(t *testing.T) {
org := setupOrgEnv(t, "SHARED=sharedval\nORG_ONLY=orgonly")
wsDir := filepath.Join(org, "myworkspace")
require.NoError(t, os.MkdirAll(wsDir, 0o700))
require.NoError(t, os.WriteFile(filepath.Join(wsDir, ".env"), []byte("WS_VAR=wsval\nSHARED=overridden"), 0o600))
vars := loadWorkspaceEnv(org, "myworkspace")
assert.Equal(t, "wsval", vars["WS_VAR"])
assert.Equal(t, "overridden", vars["SHARED"]) // workspace overrides org
assert.Equal(t, "orgonly", vars["ORG_ONLY"]) // org vars preserved
}
func Test_loadWorkspaceEnv_emptyFilesDir(t *testing.T) {
org := setupOrgEnv(t, "VAR=val")
vars := loadWorkspaceEnv(org, "")
assert.Equal(t, "val", vars["VAR"])
}
func Test_loadWorkspaceEnv_traversalRejects(t *testing.T) {
// #321 / CWE-22: filesDir "../../../etc" must not escape the org root.
// resolveInsideRoot rejects the traversal so workspace .env is skipped;
// org root .env is still loaded (it's before the guard).
org := setupOrgEnv(t, "INNOCENT=val\nSAFE_WS=wsval")
parent := filepath.Dir(org)
require.NoError(t, os.WriteFile(filepath.Join(parent, ".env"), []byte("MALICIOUS=evil"), 0o600))
// Also create a workspace dir inside org to prove it IS accessible normally.
wsDir := filepath.Join(org, "legit-workspace")
require.NoError(t, os.MkdirAll(wsDir, 0o700))
require.NoError(t, os.WriteFile(filepath.Join(wsDir, ".env"), []byte("WS_SECRET=ssh-key-123"), 0o600))
// Traversal is blocked.
vars := loadWorkspaceEnv(org, "../../../etc")
// Org root vars present; workspace vars blocked.
assert.Equal(t, "val", vars["INNOCENT"])
assert.Equal(t, "wsval", vars["SAFE_WS"]) // from org root .env
assert.Empty(t, vars["WS_SECRET"]) // workspace .env blocked by traversal guard
_, hasEvil := vars["MALICIOUS"]
assert.False(t, hasEvil, "MALICIOUS from escaped path must not appear")
}
func Test_loadWorkspaceEnv_traversalWithDots(t *testing.T) {
// A sibling-traversal attempt: go up one level then into a sibling dir.
// The sibling dir is NOT inside org, so it must be rejected.
org := setupOrgEnv(t, "INNOCENT=val")
parent := filepath.Dir(org)
require.NoError(t, os.MkdirAll(filepath.Join(parent, "sibling"), 0o700))
require.NoError(t, os.WriteFile(filepath.Join(parent, "sibling/.env"), []byte("LEAKED=secret"), 0o600))
vars := loadWorkspaceEnv(org, "../sibling")
// Org vars loaded; sibling vars blocked.
assert.Equal(t, "val", vars["INNOCENT"])
assert.Empty(t, vars["LEAKED"], "sibling traversal must be rejected")
}
func Test_loadWorkspaceEnv_absolutePathRejected(t *testing.T) {
// Absolute paths are rejected outright by resolveInsideRoot.
org := setupOrgEnv(t, "INNOCENT=val")
vars := loadWorkspaceEnv(org, "/etc")
assert.Equal(t, "val", vars["INNOCENT"]) // org root still loaded
assert.Empty(t, vars["SAFE_WS"])
}
func Test_loadWorkspaceEnv_dotPathRejected(t *testing.T) {
// "." resolves to the org root itself — this is NOT a traversal but
// would create org-root/.env which is the org root .env, not a
// workspace .env. resolveInsideRoot accepts this; the workspace .env
// path is org/.env, which IS the org root .env (already loaded).
// So the correct result is the org vars (same as org root, no change).
org := setupOrgEnv(t, "INNOCENT=val")
vars := loadWorkspaceEnv(org, ".")
// "." passes resolveInsideRoot (resolves to org root, which is valid).
// But workspace path org/.env is the same as org/.env already loaded.
assert.Equal(t, "val", vars["INNOCENT"])
}
func Test_loadWorkspaceEnv_emptyOrgRootReturnsEmpty(t *testing.T) {
vars := loadWorkspaceEnv("", "some/dir")
assertEmpty(t, vars)
}
func Test_loadWorkspaceEnv_missingWorkspaceDir(t *testing.T) {
org := setupOrgEnv(t, "ORG=val")
// Workspace dir doesn't exist — org vars still loaded.
vars := loadWorkspaceEnv(org, "nonexistent")
assert.Equal(t, "val", vars["ORG"])
}
func assertEmpty(t *testing.T, m map[string]string) {
t.Helper()
assert.Equal(t, 0, len(m), "expected empty map, got %v", m)
}

View File

@ -1,759 +0,0 @@
package handlers
import (
"testing"
"github.com/stretchr/testify/assert"
)
// ── isSafeRoleName ────────────────────────────────────────────────────────────
func TestIsSafeRoleName_Valid(t *testing.T) {
cases := []string{
"backend",
"frontend",
"backend-engineer",
"Frontend_Engineer",
"DevOps123",
"sre-team",
"a",
"ABC",
"Role_With_Underscores_And-Numbers123",
}
for _, r := range cases {
t.Run(r, func(t *testing.T) {
if !isSafeRoleName(r) {
t.Errorf("isSafeRoleName(%q): expected true, got false", r)
}
})
}
}
func TestIsSafeRoleName_Invalid(t *testing.T) {
cases := []struct {
name string
role string
}{
{"empty", ""},
{"dot", "."},
{"double dot", ".."},
{"path separator", "backend/engineer"},
{"space", "backend engineer"},
{"special char", "backend@engineer"},
{"at sign", "role@team"},
{"colon", "role:admin"},
{"hash", "role#1"},
{"percent", "role%20"},
{"quote", `role"name`},
{"backslash", `role\name`},
{"tilde", "role~test"},
{"backtick", "`role"},
{"bracket open", "[role]"},
{"bracket close", "role]"},
{"plus", "role+admin"},
{"equals", "role=admin"},
{"caret", "role^admin"},
{"question mark", "role?"},
{"pipe at end", "role|"},
{"greater than", "role>"},
{"asterisk", "role*"},
{"ampersand", "role&"},
{"exclamation at end", "role!"},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if isSafeRoleName(tc.role) {
t.Errorf("isSafeRoleName(%q): expected false, got true", tc.role)
}
})
}
}
// ── hasUnresolvedVarRef ───────────────────────────────────────────────────────
func TestHasUnresolvedVarRef_NoVars(t *testing.T) {
cases := []string{
"",
"plain text",
"no variables here",
"123 numeric",
"$",
"${}",
"$5",
"$$$$",
}
for _, s := range cases {
t.Run(s, func(t *testing.T) {
if hasUnresolvedVarRef(s, s) {
t.Errorf("hasUnresolvedVarRef(%q, %q): expected false, got true", s, s)
}
})
}
}
func TestHasUnresolvedVarRef_Resolved(t *testing.T) {
// Expansion consumed the var refs (where "consumed" means the output no longer
// contains the original var reference syntax).
cases := []struct {
orig string
expanded string
want bool // true = unresolved (function returns true), false = resolved
}{
// Empty output: function conservatively returns true — it cannot distinguish
// "var was set to empty" from "var was not found and stripped". The test
// documents this design choice; callers who need empty=resolved should
// pre-process the output before calling hasUnresolvedVarRef.
{"${VAR}", "", true},
{"${VAR}", "value", false}, // var replaced
{"$VAR", "value", false}, // bare var replaced
{"prefix${VAR}suffix", "prefixvaluesuffix", false},
{"${A}${B}", "ab", false},
// FOO=FOO and BAR=BAR — both vars found and replaced. Expanded output
// "FOO and BAR" has no ${...} syntax left, so function returns false.
{"${FOO} and ${BAR}", "FOO and BAR", false},
}
for _, tc := range cases {
t.Run(tc.orig, func(t *testing.T) {
got := hasUnresolvedVarRef(tc.orig, tc.expanded)
if got != tc.want {
t.Errorf("hasUnresolvedVarRef(%q, %q): got %v, want %v", tc.orig, tc.expanded, got, tc.want)
}
})
}
}
func TestHasUnresolvedVarRef_Unresolved(t *testing.T) {
// Expansion left the refs intact → unresolved.
cases := []struct {
orig string
expanded string
}{
{"${VAR}", "${VAR}"}, // untouched
{"$VAR", "$VAR"}, // bare untouched
{"prefix${VAR}suffix", "prefix${VAR}suffix"},
{"${A}${B}", "${A}${B}"}, // both unresolved
{"${FOO}", ""}, // empty result with var ref in original
}
for _, tc := range cases {
t.Run(tc.orig, func(t *testing.T) {
if !hasUnresolvedVarRef(tc.orig, tc.expanded) {
t.Errorf("hasUnresolvedVarRef(%q, %q): expected true, got false", tc.orig, tc.expanded)
}
})
}
}
// ── expandWithEnv ─────────────────────────────────────────────────────────────
func TestExpandWithEnv_Basic(t *testing.T) {
env := map[string]string{"FOO": "bar", "BAZ": "qux"}
cases := []struct {
input string
want string
}{
{"", ""},
{"no vars", "no vars"},
{"${FOO}", "bar"},
{"$FOO", "bar"},
{"prefix${FOO}suffix", "prefixbarsuffix"},
{"${FOO}${BAZ}", "barqux"},
{"${MISSING}", ""}, // not in env, not in os env → empty
}
for _, tc := range cases {
t.Run(tc.input, func(t *testing.T) {
got := expandWithEnv(tc.input, env)
if got != tc.want {
t.Errorf("expandWithEnv(%q, %v) = %q, want %q", tc.input, env, got, tc.want)
}
})
}
}
// ── mergeCategoryRouting ─────────────────────────────────────────────────────
func TestMergeCategoryRouting_EmptyInputs(t *testing.T) {
// Both empty → empty
r := mergeCategoryRouting(nil, nil)
if len(r) != 0 {
t.Errorf("mergeCategoryRouting(nil, nil): got %v, want empty", r)
}
r = mergeCategoryRouting(map[string][]string{}, map[string][]string{})
if len(r) != 0 {
t.Errorf("mergeCategoryRouting({}, {}): got %v, want empty", r)
}
}
func TestMergeCategoryRouting_DefaultsOnly(t *testing.T) {
defaults := map[string][]string{
"security": {"Backend Engineer", "DevOps"},
"ui": {"Frontend Engineer"},
"data": {"Data Engineer"},
}
r := mergeCategoryRouting(defaults, nil)
if len(r) != 3 {
t.Errorf("got %d keys, want 3", len(r))
}
if len(r["security"]) != 2 {
t.Errorf("security roles: got %v, want 2", r["security"])
}
}
func TestMergeCategoryRouting_WorkspaceOverrides(t *testing.T) {
defaults := map[string][]string{
"security": {"Backend Engineer", "DevOps"},
"ui": {"Frontend Engineer"},
}
ws := map[string][]string{
"security": {"SRE Team"}, // narrows
"ui": {}, // drops
"infra": {"Platform Team"}, // adds
}
r := mergeCategoryRouting(defaults, ws)
if len(r["security"]) != 1 || r["security"][0] != "SRE Team" {
t.Errorf("security: got %v, want [SRE Team]", r["security"])
}
if _, ok := r["ui"]; ok {
t.Errorf("ui should be dropped, got %v", r["ui"])
}
if len(r["infra"]) != 1 || r["infra"][0] != "Platform Team" {
t.Errorf("infra: got %v, want [Platform Team]", r["infra"])
}
}
func TestMergeCategoryRouting_EmptyListDrops(t *testing.T) {
defaults := map[string][]string{"foo": {"A", "B"}}
ws := map[string][]string{"foo": {}}
r := mergeCategoryRouting(defaults, ws)
if _, ok := r["foo"]; ok {
t.Errorf("foo with empty ws list: should be dropped, got %v", r["foo"])
}
}
func TestMergeCategoryRouting_EmptyKeySkipped(t *testing.T) {
defaults := map[string][]string{"": {"Role"}}
ws := map[string][]string{"": {}}
r := mergeCategoryRouting(defaults, ws)
if _, ok := r[""]; ok {
t.Errorf("empty key should be skipped, got %v", r[""])
}
}
// ── renderCategoryRoutingYAML ────────────────────────────────────────────────
func TestRenderCategoryRoutingYAML_Empty(t *testing.T) {
out, err := renderCategoryRoutingYAML(nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if out != "" {
t.Errorf("got %q, want empty string", out)
}
out, err = renderCategoryRoutingYAML(map[string][]string{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if out != "" {
t.Errorf("got %q, want empty string", out)
}
}
func TestRenderCategoryRoutingYAML_StableOrdering(t *testing.T) {
// Keys are sorted so output is deterministic regardless of map iteration order.
m := map[string][]string{
"zebra": {"A"},
"alpha": {"B"},
"middle": {"C"},
}
out, err := renderCategoryRoutingYAML(m)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// alpha must come before middle, which must come before zebra
ai := 0
zi := 0
mi := 0
for i, c := range out {
switch {
case c == 'a' && i < len(out)-5 && out[i:i+5] == "alpha":
ai = i
case c == 'z' && i < len(out)-5 && out[i:i+5] == "zebra":
zi = i
case c == 'm' && i < len(out)-6 && out[i:i+6] == "middle":
mi = i
}
}
if ai <= 0 || zi <= 0 || mi <= 0 {
t.Fatalf("could not locate all keys in output: %s", out)
}
if ai >= mi || mi >= zi {
t.Errorf("keys not sorted: alpha=%d middle=%d zebra=%d, output:\n%s", ai, mi, zi, out)
}
}
func TestRenderCategoryRoutingYAML_SpecialCharsEscaped(t *testing.T) {
// YAML library should escape characters that need quoting.
m := map[string][]string{
"key:with:colons": {"Role: Admin"},
"key with space": {"Role"},
}
out, err := renderCategoryRoutingYAML(m)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// The output must be valid YAML (yaml.Marshal handles quoting).
// The key with colons should appear quoted in the output.
if out == "" {
t.Error("output is empty")
}
}
// ── appendYAMLBlock ───────────────────────────────────────────────────────────
func TestAppendYAMLBlock_NoExisting(t *testing.T) {
got := appendYAMLBlock(nil, "key: value")
if string(got) != "key: value" {
t.Errorf("got %q, want 'key: value'", string(got))
}
}
func TestAppendYAMLBlock_EmptyBlock(t *testing.T) {
// When existing lacks a trailing \n, the function adds one before appending
// the empty block — so the result always has a clean terminator.
got := appendYAMLBlock([]byte("existing: data"), "")
want := "existing: data\n"
if string(got) != want {
t.Errorf("got %q, want %q", string(got), want)
}
}
func TestAppendYAMLBlock_AppendsWithNewline(t *testing.T) {
existing := []byte("key: value")
block := "new: entry"
got := appendYAMLBlock(existing, block)
want := "key: value\nnew: entry"
if string(got) != want {
t.Errorf("got %q, want %q", string(got), want)
}
}
func TestAppendYAMLBlock_AlreadyEndsWithNewline(t *testing.T) {
existing := []byte("key: value\n")
block := "new: entry"
got := appendYAMLBlock(existing, block)
want := "key: value\nnew: entry"
if string(got) != want {
t.Errorf("got %q, want %q", string(got), want)
}
}
// ── mergePlugins ─────────────────────────────────────────────────────────────
func TestMergePlugins_EmptyInputs(t *testing.T) {
r := mergePlugins(nil, nil)
if len(r) != 0 {
t.Errorf("got %v, want []", r)
}
r = mergePlugins([]string{}, []string{})
if len(r) != 0 {
t.Errorf("got %v, want []", r)
}
}
func TestMergePlugins_BasicMerge(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b"}
ws := []string{"plugin-b", "plugin-c"}
r := mergePlugins(defaults, ws)
// defaults first, ws appended, b deduplicated
if len(r) != 3 {
t.Errorf("got %v, want 3 items", r)
}
if r[0] != "plugin-a" || r[1] != "plugin-b" || r[2] != "plugin-c" {
t.Errorf("got %v, want [a, b, c]", r)
}
}
func TestMergePlugins_ExcludeWithBang(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b", "plugin-c"}
ws := []string{"!plugin-b"}
r := mergePlugins(defaults, ws)
if len(r) != 2 {
t.Errorf("got %v, want 2 items", r)
}
if r[0] != "plugin-a" || r[1] != "plugin-c" {
t.Errorf("got %v, want [a, c]", r)
}
}
func TestMergePlugins_ExcludeWithDash(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b", "plugin-c"}
ws := []string{"-plugin-b"}
r := mergePlugins(defaults, ws)
if len(r) != 2 || r[0] != "plugin-a" || r[1] != "plugin-c" {
t.Errorf("got %v, want [a, c]", r)
}
}
func TestMergePlugins_ExcludeNonexistent(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b"}
ws := []string{"!plugin-c"} // c not present
r := mergePlugins(defaults, ws)
if len(r) != 2 {
t.Errorf("got %v, want 2 items", r)
}
}
func TestMergePlugins_ExcludeEmptyTarget(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b"}
ws := []string{"!"}
r := mergePlugins(defaults, ws)
if len(r) != 2 {
t.Errorf("got %v, want 2 items", r)
}
}
func TestMergePlugins_EmptyPlugin(t *testing.T) {
defaults := []string{"", "plugin-a", ""}
ws := []string{"plugin-b", ""}
r := mergePlugins(defaults, ws)
if len(r) != 2 {
t.Errorf("got %v, want 2 items", r)
}
}
// ── Additional coverage: expandWithEnv ──────────────────────────────
func TestExpandWithEnv_BracedVar(t *testing.T) {
env := map[string]string{"FOO": "bar", "BAZ": "qux"}
result := expandWithEnv("value is ${FOO}", env)
assert.Equal(t, "value is bar", result)
}
func TestExpandWithEnv_DollarVar(t *testing.T) {
env := map[string]string{"X": "1", "Y": "2"}
result := expandWithEnv("$X + $Y = 3", env)
assert.Equal(t, "1 + 2 = 3", result)
}
func TestExpandWithEnv_Mixed(t *testing.T) {
env := map[string]string{"A": "alpha", "B": "beta"}
result := expandWithEnv("${A}_${B}", env)
assert.Equal(t, "alpha_beta", result)
}
func TestExpandWithEnv_MissingVar(t *testing.T) {
// Missing vars stay as-is (os.Getenv fallback returns "" for unset vars).
env := map[string]string{}
result := expandWithEnv("${UNSET}", env)
assert.Equal(t, "", result)
}
func TestExpandWithEnv_EmptyMap(t *testing.T) {
result := expandWithEnv("no vars here", map[string]string{})
assert.Equal(t, "no vars here", result)
}
func TestExpandWithEnv_LiteralDollar(t *testing.T) {
// A bare $ not followed by a valid identifier char stays as-is.
result := expandWithEnv("cost $100", map[string]string{})
assert.Equal(t, "cost $100", result)
}
func TestExpandWithEnv_PartiallyPresent(t *testing.T) {
env := map[string]string{"SET": "yes"}
result := expandWithEnv("${SET} and ${NOT_SET}", env)
assert.Equal(t, "yes and ${NOT_SET}", result)
}
func TestExpandWithEnv_EmbeddedMissingProcessEnvStaysLiteral(t *testing.T) {
t.Setenv("MOL_TEST_EMBEDDED_MISSING", "")
result := expandWithEnv("prefix/${MOL_TEST_EMBEDDED_MISSING}/suffix", map[string]string{})
assert.Equal(t, "prefix/${MOL_TEST_EMBEDDED_MISSING}/suffix", result)
}
// POSIX identifier guard regression tests (CWE-78 fix).
// Keys not starting with [a-zA-Z_] must not be looked up in env or os.Getenv.
func TestExpandWithEnv_DigitPrefix_NotExpanded(t *testing.T) {
// ${0}, ${5}, ${1VAR} — numeric prefix → not a valid shell identifier.
// Guard must return "$0", "$5", "$1VAR" literally; no env lookup.
cases := []struct {
input string
want string
}{
{"${0}", "$0"},
{"${5}", "$5"},
{"${1VAR}", "$1VAR"},
{"prefix ${0} suffix", "prefix $0 suffix"},
{"$0", "$0"},
{"$5", "$5"},
{"HOME=${HOME}", "HOME=${HOME}"}, // HOME is valid but embedded in larger string
}
for _, tc := range cases {
t.Run(tc.input, func(t *testing.T) {
got := expandWithEnv(tc.input, map[string]string{})
assert.Equal(t, tc.want, got)
})
}
}
func TestExpandWithEnv_EmptyKey_ReturnsDollar(t *testing.T) {
// ${} → "$" (empty key, guard returns "$")
result := expandWithEnv("value=${}", map[string]string{})
assert.Equal(t, "value=$", result)
}
// mergeCategoryRouting tests — unions defaults with per-workspace routing.
// ── Additional coverage: mergeCategoryRouting ──────────────────────
func TestMergeCategoryRouting_WorkspaceAddsCategory(t *testing.T) {
defaults := map[string][]string{
"security": {"Backend Engineer"},
}
wsRouting := map[string][]string{
"ui": {"Frontend Engineer"},
}
result := mergeCategoryRouting(defaults, wsRouting)
assert.Equal(t, []string{"Backend Engineer"}, result["security"])
assert.Equal(t, []string{"Frontend Engineer"}, result["ui"])
}
func TestMergeCategoryRouting_EmptyListDropsCategory(t *testing.T) {
defaults := map[string][]string{
"security": {"Backend Engineer"},
"infra": {"SRE"},
}
wsRouting := map[string][]string{
"security": {}, // empty list = explicit drop
}
result := mergeCategoryRouting(defaults, wsRouting)
_, hasSecurity := result["security"]
assert.False(t, hasSecurity)
assert.Equal(t, []string{"SRE"}, result["infra"])
}
func TestMergeCategoryRouting_EmptyDefaultKeySkipped(t *testing.T) {
defaults := map[string][]string{
"": {"Backend Engineer"}, // empty key should be skipped
}
result := mergeCategoryRouting(defaults, nil)
_, has := result[""]
assert.False(t, has)
}
func TestMergeCategoryRouting_EmptyWorkspaceKeySkipped(t *testing.T) {
defaults := map[string][]string{
"security": {"Backend Engineer"},
}
wsRouting := map[string][]string{
"": {"Some Role"},
}
result := mergeCategoryRouting(defaults, wsRouting)
_, has := result[""]
assert.False(t, has)
assert.Equal(t, []string{"Backend Engineer"}, result["security"])
}
func TestMergeCategoryRouting_DoesNotMutateInputs(t *testing.T) {
defaults := map[string][]string{
"security": {"Backend Engineer"},
}
wsRouting := map[string][]string{
"security": {"DevOps"},
}
orig := defaults["security"][0]
_ = mergeCategoryRouting(defaults, wsRouting)
assert.Equal(t, orig, defaults["security"][0])
}
// renderCategoryRoutingYAML tests — deterministic YAML emission.
// ── Additional coverage: renderCategoryRoutingYAML ────────────────
func TestRenderCategoryRoutingYAML_SingleCategory(t *testing.T) {
routing := map[string][]string{
"security": {"Backend Engineer", "DevOps"},
}
result, err := renderCategoryRoutingYAML(routing)
assert.NoError(t, err)
assert.Contains(t, result, "security:")
assert.Contains(t, result, "Backend Engineer")
assert.Contains(t, result, "DevOps")
}
func TestRenderCategoryRoutingYAML_MultipleCategoriesSorted(t *testing.T) {
routing := map[string][]string{
"zebra": {"RoleZ"},
"alpha": {"RoleA"},
"middleware": {"RoleM"},
}
result, err := renderCategoryRoutingYAML(routing)
assert.NoError(t, err)
// Keys are sorted alphabetically.
idxAlpha := assertFind(t, result, "alpha:")
idxZebra := assertFind(t, result, "zebra:")
idxMid := assertFind(t, result, "middleware:")
if idxAlpha > -1 && idxZebra > -1 {
assert.True(t, idxAlpha < idxZebra, "alpha should appear before zebra")
}
if idxMid > -1 && idxZebra > -1 {
assert.True(t, idxMid < idxZebra, "middleware should appear before zebra")
}
}
func TestRenderCategoryRoutingYAML_EmptyListCategory(t *testing.T) {
// Empty-list category should still render (mergeCategoryRouting drops
// them before they reach this function, but we test the render in isolation).
routing := map[string][]string{
"security": {},
}
result, err := renderCategoryRoutingYAML(routing)
assert.NoError(t, err)
assert.Contains(t, result, "security:")
}
func TestRenderCategoryRoutingYAML_SpecialCharactersEscaped(t *testing.T) {
routing := map[string][]string{
"notes": {`has: colon`, `and "quotes"`, "emoji: 🚀"},
}
result, err := renderCategoryRoutingYAML(routing)
assert.NoError(t, err)
// Should not panic and should produce valid YAML.
assert.Contains(t, result, "notes:")
}
// appendYAMLBlock tests — safe concatenation with newline boundary.
// ── Additional coverage: appendYAMLBlock ───────────────────────────
func TestAppendYAMLBlock_BothEmpty(t *testing.T) {
result := appendYAMLBlock(nil, "")
assert.Nil(t, result)
}
func TestAppendYAMLBlock_ExistingHasNewline(t *testing.T) {
existing := []byte("existing:\n")
block := "key: value\n"
result := appendYAMLBlock(existing, block)
assert.Equal(t, "existing:\nkey: value\n", string(result))
}
func TestAppendYAMLBlock_ExistingNoNewline(t *testing.T) {
existing := []byte("existing:")
block := "key: value\n"
result := appendYAMLBlock(existing, block)
assert.Equal(t, "existing:\nkey: value\n", string(result))
}
func TestAppendYAMLBlock_ExistingEmpty(t *testing.T) {
existing := []byte("")
block := "key: value\n"
result := appendYAMLBlock(existing, block)
assert.Equal(t, "key: value\n", string(result))
}
func TestAppendYAMLBlock_NilExisting(t *testing.T) {
block := "key: value\n"
result := appendYAMLBlock(nil, block)
assert.Equal(t, "key: value\n", string(result))
}
// mergePlugins tests — union with exclusion prefix (!/-).
// ── Additional coverage: mergePlugins (additional cases) ───────────
func TestMergePlugins_DefaultsOnly(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b"}
result := mergePlugins(defaults, nil)
assert.Equal(t, []string{"plugin-a", "plugin-b"}, result)
}
func TestMergePlugins_WorkspaceAdds(t *testing.T) {
defaults := []string{"plugin-a"}
wsPlugins := []string{"plugin-b", "plugin-a"} // duplicate of default
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a", "plugin-b"}, result)
}
func TestMergePlugins_ExclusionWithBang(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b", "plugin-c"}
wsPlugins := []string{"!plugin-b"}
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a", "plugin-c"}, result)
}
func TestMergePlugins_ExclusionWithDash(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b", "plugin-c"}
wsPlugins := []string{"-plugin-b"}
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a", "plugin-c"}, result)
}
func TestMergePlugins_ExclusionEmptyTarget(t *testing.T) {
defaults := []string{"plugin-a", "plugin-b"}
wsPlugins := []string{"!", "-"} // no-op exclusions
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a", "plugin-b"}, result)
}
func TestMergePlugins_ExclusionNotInDefaults(t *testing.T) {
// Excluding something not in defaults is a no-op.
defaults := []string{"plugin-a"}
wsPlugins := []string{"!plugin-b"}
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a"}, result)
}
func TestMergePlugins_WorkspaceAddsNew(t *testing.T) {
defaults := []string{"plugin-a"}
wsPlugins := []string{"plugin-b"}
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a", "plugin-b"}, result)
}
func TestMergePlugins_DeduplicationOrder(t *testing.T) {
// Defaults first; workspace entries deduplicated.
defaults := []string{"plugin-a", "plugin-a", "plugin-b"}
wsPlugins := []string{"plugin-b", "plugin-c", "plugin-c"}
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-a", "plugin-b", "plugin-c"}, result)
}
func TestMergePlugins_ExclusionThenAddSameName(t *testing.T) {
// Remove then re-add: order matters.
defaults := []string{"plugin-a", "plugin-b"}
wsPlugins := []string{"!plugin-a", "plugin-a"}
result := mergePlugins(defaults, wsPlugins)
assert.Equal(t, []string{"plugin-b", "plugin-a"}, result)
}
// isSafeRoleName tests — alphanumeric + hyphen/underscore, no path separators.
// ── Additional coverage: isSafeRoleName ───────────────────────────
func TestIsSafeRoleName_SpecialCharsRejected(t *testing.T) {
bad := []string{
"role@name",
"role#name",
"role$name",
"role%name",
"role&name",
"role*name",
"role?name",
"role=name",
}
for _, r := range bad {
if isSafeRoleName(r) {
t.Errorf("isSafeRoleName(%q) expected false, got true", r)
}
}
}
// assertFind is a helper: returns index of first occurrence of substr in s, or -1.
func assertFind(t *testing.T, s, substr string) int {
t.Helper()
idx := -1
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
idx = i
break
}
}
return idx
}

View File

@ -1,432 +0,0 @@
package handlers
import (
"path/filepath"
"strings"
"testing"
)
// org_helpers_security_test.go — security-critical path sanitization + role-name
// validation for org template processing. Covers OFFSEC-006-class attacks:
// path traversal via user-controlled files_dir / prompt_file refs, and role-name
// injection via the persona env loader.
// ── resolveInsideRoot ──────────────────────────────────────────────────────────
func TestResolveInsideRoot_EmptyUserPath(t *testing.T) {
_, err := resolveInsideRoot("/safe/root", "")
if err == nil {
t.Fatalf("empty userPath: expected error, got nil")
}
if err.Error() != "path is empty" {
t.Errorf("empty userPath: got %q, want %q", err.Error(), "path is empty")
}
}
func TestResolveInsideRoot_AbsolutePathRejected(t *testing.T) {
_, err := resolveInsideRoot("/safe/root", "/etc/passwd")
if err == nil {
t.Fatalf("absolute userPath: expected error, got nil")
}
if err.Error() != "absolute paths are not allowed" {
t.Errorf("absolute userPath: got %q, want %q", err.Error(), "absolute paths are not allowed")
}
}
func TestResolveInsideRoot_DotDotTraversal(t *testing.T) {
// ../../etc/passwd from /safe/root
got, err := resolveInsideRoot("/safe/root", "../../etc/passwd")
if err == nil {
t.Fatalf("dotdot traversal: expected error, got %q", got)
}
if err.Error() != "path escapes root" {
t.Errorf("dotdot traversal: got %q, want %q", err.Error(), "path escapes root")
}
}
// TestResolveInsideRoot_DotDotWithIntermediate verifies that a/b/../../c does NOT
// escape when root=/safe/root. After normalization: a/b/../.. = ., so a/b/../../c = c,
// which is a valid descendant of /safe/root. The original test expected an error
// but resolveInsideRoot correctly returns nil (the path stays within root).
// The OFFSEC-006 concern is covered by ../../etc/passwd which DOES escape.
func TestResolveInsideRoot_DotDotWithIntermediate(t *testing.T) {
// a/b/../../c normalises to "c" — a valid descendant inside any root.
// Must use t.TempDir() for a real filesystem path so filepath.Abs resolves.
root := t.TempDir()
got, err := resolveInsideRoot(root, "a/b/../../c")
if err != nil {
t.Fatalf("a/b/../../c should resolve within root: %v", err)
}
// Verify result is inside root and ends with "c"
if !strings.HasPrefix(got, root+string(filepath.Separator)) {
t.Errorf("result should be inside root %q, got %q", root, got)
}
if got[len(got)-1:] != "c" {
t.Errorf("resolved path should end in 'c', got %q", got)
}
}
func TestResolveInsideRoot_ValidRelativePath(t *testing.T) {
// This test uses the real filesystem since resolveInsideRoot calls filepath.Abs.
// Use t.TempDir() so we have a real root to work with.
root := t.TempDir()
got, err := resolveInsideRoot(root, "subdir/file.txt")
if err != nil {
t.Fatalf("valid relative: unexpected error: %v", err)
}
// Must be inside root
if got[:len(root)] != root {
t.Errorf("result should start with root %q, got %q", root, got)
}
}
func TestResolveInsideRoot_ExactRootMatch(t *testing.T) {
root := t.TempDir()
got, err := resolveInsideRoot(root, ".")
if err != nil {
t.Fatalf("exact root: unexpected error: %v", err)
}
if got != root {
t.Errorf("exact root match: got %q, want %q", got, root)
}
}
func TestResolveInsideRoot_DotPathComponent(t *testing.T) {
root := t.TempDir()
// ./subdir/./file.txt should resolve to root/subdir/file.txt
got, err := resolveInsideRoot(root, "./subdir/./file.txt")
if err != nil {
t.Fatalf("dot path component: unexpected error: %v", err)
}
// Verify the file component is subdir/file.txt regardless of root length.
suffix := string(filepath.Separator) + "subdir" + string(filepath.Separator) + "file.txt"
if !strings.HasSuffix(got, suffix) {
t.Errorf("dot path component: got %q, want suffix %q", got, suffix)
}
}
func TestResolveInsideRoot_NestedDotDotEscapes(t *testing.T) {
root := t.TempDir()
// a/../../b from /tmp/xyz → /tmp/b (escapes temp dir)
got, err := resolveInsideRoot(root, "a/../../b")
if err == nil {
t.Fatalf("nested dotdot: expected error, got %q", got)
}
if err.Error() != "path escapes root" {
t.Errorf("nested dotdot: got %q, want %q", err.Error(), "path escapes root")
}
}
func TestResolveInsideRoot_DotdotAtStart(t *testing.T) {
root := t.TempDir()
got, err := resolveInsideRoot(root, "../sibling")
if err == nil {
t.Fatalf("../sibling: expected error, got %q", got)
}
if err.Error() != "path escapes root" {
t.Errorf("../sibling: got %q, want %q", err.Error(), "path escapes root")
}
}
func TestResolveInsideRoot_SiblingNotEscaped(t *testing.T) {
// /foo/bar and /foo/baz are siblings — the prefix check with
// filepath.Separator guard must allow /foo/bar/child without matching /foo/baz
// (which would be wrong if the check were just strings.HasPrefix).
root := t.TempDir()
got, err := resolveInsideRoot(root, "valid-subdir/file.txt")
if err != nil {
t.Fatalf("sibling not escaped: unexpected error: %v", err)
}
// Must be inside root
if !strings.HasPrefix(got, root+string(filepath.Separator)) {
t.Errorf("result should be inside root %q, got %q", root, got)
}
}
// ── isSafeRoleName ────────────────────────────────────────────────────────────
func TestIsSafeRoleName_Empty(t *testing.T) {
if isSafeRoleName("") {
t.Error("isSafeRoleName(\"\"): expected false, got true")
}
}
func TestIsSafeRoleName_Dot(t *testing.T) {
if isSafeRoleName(".") {
t.Error("isSafeRoleName(\".\"): expected false, got true")
}
}
func TestIsSafeRoleName_DotDot(t *testing.T) {
if isSafeRoleName("..") {
t.Error("isSafeRoleName(\"..\"): expected false, got true")
}
}
func TestIsSafeRoleName_PathTraversal(t *testing.T) {
unsafe := []string{
"../etc",
"foo/../../../etc",
"foo/../../bar",
}
for _, name := range unsafe {
if isSafeRoleName(name) {
t.Errorf("isSafeRoleName(%q): expected false (path traversal), got true", name)
}
}
}
func TestIsSafeRoleName_SpecialChars(t *testing.T) {
unsafe := []string{
"foo:bar",
"foo bar",
"foo\tbar",
"foo\nbar",
"foo\x00bar",
"foo@bar",
"foo#bar",
"foo$bar",
}
for _, name := range unsafe {
if isSafeRoleName(name) {
t.Errorf("isSafeRoleName(%q): expected false (special char), got true", name)
}
}
}
// ── mergeCategoryRouting ──────────────────────────────────────────────────────
// Duplicate mergeCategoryRouting tests removed to avoid redeclaration with
// org_helpers_pure_test.go. Only security-specific behaviour lives here.
func TestSecureRouting_BothNil(t *testing.T) {
got := mergeCategoryRouting(nil, nil)
if len(got) != 0 {
t.Errorf("both nil: got %v, want empty", got)
}
}
func TestSecureRouting_DefaultOnly(t *testing.T) {
defaultRouting := map[string][]string{
"security": {"Backend Engineer", "DevOps"},
}
got := mergeCategoryRouting(defaultRouting, nil)
if len(got) != 1 {
t.Fatalf("default only: got %d entries, want 1", len(got))
}
if len(got["security"]) != 2 {
t.Errorf("security roles: got %v, want [Backend Engineer, DevOps]", got["security"])
}
}
func TestSecureRouting_WorkspaceOnly(t *testing.T) {
wsRouting := map[string][]string{
"ui": {"Frontend Engineer"},
}
got := mergeCategoryRouting(nil, wsRouting)
if len(got) != 1 {
t.Fatalf("ws only: got %d entries, want 1", len(got))
}
if got["ui"][0] != "Frontend Engineer" {
t.Errorf("ui roles: got %v, want [Frontend Engineer]", got["ui"])
}
}
func TestSecureRouting_MergeNoOverlap(t *testing.T) {
defaultRouting := map[string][]string{
"security": {"Backend Engineer"},
}
wsRouting := map[string][]string{
"ui": {"Frontend Engineer"},
}
got := mergeCategoryRouting(defaultRouting, wsRouting)
if len(got) != 2 {
t.Errorf("merge no overlap: got %d entries, want 2", len(got))
}
}
func TestSecureRouting_WsOverrideDropsDefault(t *testing.T) {
defaultRouting := map[string][]string{
"security": {"Backend Engineer", "DevOps"},
}
wsRouting := map[string][]string{
"security": {"Security Engineer"},
}
got := mergeCategoryRouting(defaultRouting, wsRouting)
if len(got["security"]) != 1 {
t.Errorf("ws override: got %v, want [Security Engineer]", got["security"])
}
if got["security"][0] != "Security Engineer" {
t.Errorf("ws override: got %v, want [Security Engineer]", got["security"])
}
}
func TestSecureRouting_EmptyListDropsCategory(t *testing.T) {
defaultRouting := map[string][]string{
"security": {"Backend Engineer"},
"ui": {"Frontend Engineer"},
}
wsRouting := map[string][]string{
"security": {}, // empty list = opt out
}
got := mergeCategoryRouting(defaultRouting, wsRouting)
if _, exists := got["security"]; exists {
t.Error("empty ws list should delete the category from output")
}
if len(got["ui"]) != 1 {
t.Errorf("ui should still exist: got %v", got["ui"])
}
}
func TestSecureRouting_EmptyKeySkipped(t *testing.T) {
defaultRouting := map[string][]string{
"": {"Backend Engineer"},
}
got := mergeCategoryRouting(defaultRouting, nil)
if _, exists := got[""]; exists {
t.Error("empty key should be skipped")
}
}
func TestSecureRouting_EmptyRolesInDefaultSkipped(t *testing.T) {
defaultRouting := map[string][]string{
"security": {},
}
got := mergeCategoryRouting(defaultRouting, nil)
if len(got) != 0 {
t.Errorf("empty roles in default should be skipped, got %v", got)
}
}
func TestSecureRouting_OriginalMapsUnmodified(t *testing.T) {
defaultRouting := map[string][]string{
"security": {"Backend Engineer"},
}
wsRouting := map[string][]string{
"ui": {"Frontend Engineer"},
}
mergeCategoryRouting(defaultRouting, wsRouting)
if len(defaultRouting) != 1 || len(defaultRouting["security"]) != 1 {
t.Error("default routing should be unmodified after merge")
}
if len(wsRouting) != 1 {
t.Error("ws routing should be unmodified after merge")
}
}
// ── expandWithEnv ─────────────────────────────────────────────────────────────
//
// CWE-78 regression tests. The original fix (a3a358f9) ensures that partial
// variable references like $HOME/path are NOT resolved via os.Getenv — the
// host HOME env var must not leak into org template values. Only whole-string
// references ($VAR or ${VAR}) may fall back to the host process environment.
func TestExpandWithEnv_PartialRefDollarHomePath(t *testing.T) {
// $HOME/path must NOT resolve to the host's HOME env var.
// The literal $HOME must be returned as-is.
got := expandWithEnv("$HOME/path", nil)
if got != "$HOME/path" {
t.Errorf("$HOME/path: got %q, want literal $HOME/path", got)
}
}
func TestExpandWithEnv_PartialRefBracedRoleAdmin(t *testing.T) {
// ${ROLE}/admin — ROLE is not in env, so expand to the literal ${ROLE}/admin.
got := expandWithEnv("${ROLE}/admin", nil)
if got != "${ROLE}/admin" {
t.Errorf("${ROLE}/admin: got %q, want literal ${ROLE}/admin", got)
}
}
func TestExpandWithEnv_PartialRefMiddleOfString(t *testing.T) {
// $ROLE in the middle of a string — literal, not os.Getenv.
got := expandWithEnv("prefix/$ROLE/suffix", nil)
if got != "prefix/$ROLE/suffix" {
t.Errorf("prefix/$ROLE/suffix: got %q, want literal", got)
}
}
func TestExpandWithEnv_WholeVarInEnv(t *testing.T) {
// Whole-string $VAR that IS in env — env value wins.
env := map[string]string{"FOO": "barvalue"}
got := expandWithEnv("$FOO", env)
if got != "barvalue" {
t.Errorf("$FOO with FOO=barvalue: got %q, want barvalue", got)
}
}
func TestExpandWithEnv_WholeVarBracedInEnv(t *testing.T) {
// Whole-string ${VAR} that IS in env — env value wins.
env := map[string]string{"FOO": "barvalue"}
got := expandWithEnv("${FOO}", env)
if got != "barvalue" {
t.Errorf("${FOO} with FOO=barvalue: got %q, want barvalue", got)
}
}
func TestExpandWithEnv_WholeVarNotInEnvBare(t *testing.T) {
// Whole-string $VAR not in env — falls back to os.Getenv.
// If the host has the var, we get the host value. If not, empty.
// At minimum, the result must NOT be the literal "$UNDEFINED_VAR_9Z".
got := expandWithEnv("$UNDEFINED_VAR_9Z", nil)
if got == "$UNDEFINED_VAR_9Z" {
t.Errorf("$UNDEFINED_VAR_9Z: should expand (whole-string fallback to os.Getenv), got literal")
}
}
func TestExpandWithEnv_WholeVarNotInEnvBraced(t *testing.T) {
// Whole-string ${VAR} not in env — falls back to os.Getenv.
got := expandWithEnv("${UNDEFINED_VAR_9Z}", nil)
if got == "${UNDEFINED_VAR_9Z}" {
t.Errorf("${UNDEFINED_VAR_9Z}: should expand (whole-string fallback to os.Getenv), got literal")
}
}
func TestExpandWithEnv_EmptyString(t *testing.T) {
got := expandWithEnv("", map[string]string{"FOO": "bar"})
if got != "" {
t.Errorf("empty string: got %q, want empty", got)
}
}
func TestExpandWithEnv_NoVarRefs(t *testing.T) {
got := expandWithEnv("plain string with no vars", map[string]string{"FOO": "bar"})
if got != "plain string with no vars" {
t.Errorf("plain string: got %q, want unchanged", got)
}
}
func TestExpandWithEnv_MultipleVarRefs(t *testing.T) {
// Two vars, both whole — both expand from env.
env := map[string]string{"A": "alpha", "B": "beta"}
got := expandWithEnv("$A and $B and more", env)
if got != "alpha and beta and more" {
t.Errorf("multiple vars: got %q, want alpha and beta and more", got)
}
}
func TestExpandWithEnv_NumericVarRef(t *testing.T) {
// $5 — starts with digit, not a valid identifier start.
// Must return the literal "$5", not expand via os.Getenv.
got := expandWithEnv("$5", map[string]string{"5": "five"})
if got != "$5" {
t.Errorf("$5: got %q, want literal $5", got)
}
}
func TestExpandWithEnv_DollarEscape(t *testing.T) {
// $$ → both $ written literally (each $ is not followed by an identifier char,
// so it is written as-is). No special escape sequence for $$.
got := expandWithEnv("$$", nil)
if got != "$$" {
t.Errorf("$$: got %q, want literal $$", got)
}
}
func TestExpandWithEnv_MixedPartialAndWhole(t *testing.T) {
// $A is in env (whole), $HOME is partial — only $A expands.
env := map[string]string{"A": "alpha"}
got := expandWithEnv("$A at $HOME", env)
if got != "alpha at $HOME" {
t.Errorf("$A at $HOME: got %q, want alpha at $HOME", got)
}
}

View File

@ -1,191 +0,0 @@
package handlers
import (
"errors"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
// walkOrgWorkspaceNames tests — recursive collection of non-empty workspace names.
func TestWalkOrgWorkspaceNames_EmptySlice(t *testing.T) {
var names []string
walkOrgWorkspaceNames([]OrgWorkspace{}, &names)
assert.Empty(t, names)
}
func TestWalkOrgWorkspaceNames_SingleNode(t *testing.T) {
var names []string
walkOrgWorkspaceNames([]OrgWorkspace{{Name: "my-workspace"}}, &names)
assert.Equal(t, []string{"my-workspace"}, names)
}
func TestWalkOrgWorkspaceNames_SingleNodeEmptyName(t *testing.T) {
var names []string
walkOrgWorkspaceNames([]OrgWorkspace{{Name: ""}}, &names)
assert.Empty(t, names)
}
func TestWalkOrgWorkspaceNames_NestedChildren(t *testing.T) {
var names []string
tree := []OrgWorkspace{
{
Name: "parent",
Children: []OrgWorkspace{
{Name: "child-a"},
{Name: "child-b"},
},
},
}
walkOrgWorkspaceNames(tree, &names)
assert.Equal(t, []string{"parent", "child-a", "child-b"}, names)
}
func TestWalkOrgWorkspaceNames_DeeplyNested(t *testing.T) {
var names []string
tree := []OrgWorkspace{
{
Name: "level0",
Children: []OrgWorkspace{
{
Name: "level1",
Children: []OrgWorkspace{
{
Name: "level2",
Children: []OrgWorkspace{
{Name: "level3"},
},
},
},
},
},
},
}
walkOrgWorkspaceNames(tree, &names)
assert.Equal(t, []string{"level0", "level1", "level2", "level3"}, names)
}
func TestWalkOrgWorkspaceNames_SkipsEmptyNames(t *testing.T) {
var names []string
tree := []OrgWorkspace{
{Name: "a"},
{Name: ""},
{Name: "b"},
}
walkOrgWorkspaceNames(tree, &names)
assert.Equal(t, []string{"a", "b"}, names)
}
func TestWalkOrgWorkspaceNames_Siblings(t *testing.T) {
var names []string
tree := []OrgWorkspace{
{Name: "team"},
{Name: "alpha"},
{Name: "beta"},
}
walkOrgWorkspaceNames(tree, &names)
assert.Equal(t, []string{"team", "alpha", "beta"}, names)
}
func TestWalkOrgWorkspaceNames_MultipleRoots(t *testing.T) {
var names []string
tree := []OrgWorkspace{
{Name: "root-a", Children: []OrgWorkspace{{Name: "child-a"}}},
{Name: "root-b", Children: []OrgWorkspace{{Name: "child-b"}}},
}
walkOrgWorkspaceNames(tree, &names)
assert.Equal(t, []string{"root-a", "child-a", "root-b", "child-b"}, names)
}
func TestWalkOrgWorkspaceNames_SpawningFalseStillWalks(t *testing.T) {
// The comment in the source is explicit: spawning:false subtrees are
// still walked. Empty names within those subtrees are still skipped.
var names []string
yes := true
no := false
tree := []OrgWorkspace{
{
Name: "parent",
Children: []OrgWorkspace{
{Name: "spawning-child", Spawning: &yes},
{Name: "non-spawning-child", Spawning: &no},
{Name: ""},
},
},
}
walkOrgWorkspaceNames(tree, &names)
assert.Equal(t, []string{"parent", "spawning-child", "non-spawning-child"}, names)
}
// resolveProvisionConcurrency tests — env-var parsing with sensible fallback.
func TestResolveProvisionConcurrency_Default(t *testing.T) {
os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
assert.Equal(t, defaultProvisionConcurrency, val)
}
func TestResolveProvisionConcurrency_ValidPositiveInt(t *testing.T) {
os.Setenv("MOLECULE_PROVISION_CONCURRENCY", "5")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
assert.Equal(t, 5, val)
}
func TestResolveProvisionConcurrency_ZeroUnlimited(t *testing.T) {
os.Setenv("MOLECULE_PROVISION_CONCURRENCY", "0")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
// Zero is mapped to 1<<20 (unlimited semantics with finite cap)
assert.Equal(t, 1<<20, val)
}
func TestResolveProvisionConcurrency_NegativeFallsBack(t *testing.T) {
os.Setenv("MOLECULE_PROVISION_CONCURRENCY", "-1")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
assert.Equal(t, defaultProvisionConcurrency, val)
}
func TestResolveProvisionConcurrency_NonIntegerFallsBack(t *testing.T) {
os.Setenv("MOLECULE_PROVISION_CONCURRENCY", "not-a-number")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
assert.Equal(t, defaultProvisionConcurrency, val)
}
func TestResolveProvisionConcurrency_WhitespaceOnly(t *testing.T) {
os.Setenv("MOLECULE_PROVISION_CONCURRENCY", " ")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
assert.Equal(t, defaultProvisionConcurrency, val)
}
func TestResolveProvisionConcurrency_LargeValue(t *testing.T) {
os.Setenv("MOLECULE_PROVISION_CONCURRENCY", "10000")
defer os.Unsetenv("MOLECULE_PROVISION_CONCURRENCY")
val := resolveProvisionConcurrency()
assert.Equal(t, 10000, val)
}
// errString tests — nil-safe error-to-string wrapper.
func TestErrString_NilError(t *testing.T) {
result := errString(nil)
assert.Equal(t, "", result)
}
func TestErrString_WithError(t *testing.T) {
err := errors.New("something went wrong")
result := errString(err)
assert.Equal(t, "something went wrong", result)
}
func TestErrString_EmptyError(t *testing.T) {
err := errors.New("")
result := errString(err)
assert.Equal(t, "", result)
}

View File

@ -1,8 +1,6 @@
package handlers
import (
"errors"
"fmt"
"sort"
"strings"
"testing"
@ -356,6 +354,12 @@ func TestExpandWithEnv_UnsetVar(t *testing.T) {
}
}
func TestHasUnresolvedVarRef_NoVars(t *testing.T) {
if hasUnresolvedVarRef("plain text", "plain text") {
t.Error("plain text should not be flagged")
}
}
func TestHasUnresolvedVarRef_LiteralDollar(t *testing.T) {
// "$5" is a literal price, not a var ref — should NOT be flagged
if hasUnresolvedVarRef("price: $5", "price: $5") {
@ -363,6 +367,20 @@ func TestHasUnresolvedVarRef_LiteralDollar(t *testing.T) {
}
}
func TestHasUnresolvedVarRef_Resolved(t *testing.T) {
// Original had ${VAR}, expanded to "value" — fully resolved
if hasUnresolvedVarRef("${VAR}", "value") {
t.Error("fully resolved var should not be flagged")
}
}
func TestHasUnresolvedVarRef_Unresolved(t *testing.T) {
// Original had ${VAR}, expanded to "" — unresolved
if !hasUnresolvedVarRef("${VAR}", "") {
t.Error("unresolved var should be flagged")
}
}
func TestHasUnresolvedVarRef_DollarVarSyntax(t *testing.T) {
// $VAR syntax (no braces) — also a real ref
if !hasUnresolvedVarRef("$MISSING_VAR", "") {
@ -1058,71 +1076,3 @@ func TestCollectOrgEnv_AnyOfWithInvalidMemberKeepsValidOnes(t *testing.T) {
t.Errorf("expected VALID_ONE to survive, got %v", reqNames(req))
}
}
func TestResolveProvisionConcurrency_ValidPositive(t *testing.T) {
t.Setenv("MOLECULE_PROVISION_CONCURRENCY", "8")
got := resolveProvisionConcurrency()
if got != 8 {
t.Errorf("valid positive: got %d, want 8", got)
}
}
func TestResolveProvisionConcurrency_Zero(t *testing.T) {
t.Setenv("MOLECULE_PROVISION_CONCURRENCY", "0")
got := resolveProvisionConcurrency()
if got != 1<<20 {
t.Errorf("zero (unlimited): got %d, want %d", got, 1<<20)
}
}
func TestResolveProvisionConcurrency_Negative(t *testing.T) {
t.Setenv("MOLECULE_PROVISION_CONCURRENCY", "-5")
got := resolveProvisionConcurrency()
if got != defaultProvisionConcurrency {
t.Errorf("negative: got %d, want default %d", got, defaultProvisionConcurrency)
}
}
func TestResolveProvisionConcurrency_NonInteger(t *testing.T) {
t.Setenv("MOLECULE_PROVISION_CONCURRENCY", "abc")
got := resolveProvisionConcurrency()
if got != defaultProvisionConcurrency {
t.Errorf("non-integer: got %d, want default %d", got, defaultProvisionConcurrency)
}
}
func TestResolveProvisionConcurrency_Whitespace(t *testing.T) {
t.Setenv("MOLECULE_PROVISION_CONCURRENCY", " 7 ")
got := resolveProvisionConcurrency()
if got != 7 {
t.Errorf("whitespace: got %d, want 7", got)
}
}
// ─────────────────────────────────────────────────────────────────────────────
// errString tests
// ─────────────────────────────────────────────────────────────────────────────
func TestErrString_Nil(t *testing.T) {
got := errString(nil)
if got != "" {
t.Errorf("nil error: got %q, want empty string", got)
}
}
func TestErrString_NonNil(t *testing.T) {
err := fmt.Errorf("something went wrong")
got := errString(err)
if got != "something went wrong" {
t.Errorf("non-nil error: got %q, want %q", got, "something went wrong")
}
}
func TestErrString_Wrapped(t *testing.T) {
inner := errors.New("inner")
err := fmt.Errorf("outer: %w", inner)
got := errString(err)
if !strings.Contains(got, "outer") {
t.Errorf("wrapped error: got %q, want containing 'outer'", got)
}
}

View File

@ -1,310 +0,0 @@
package handlers
// plugins_atomic_tar_test.go — unit tests for tarWalk (the only non-trivial
// function in plugins_atomic_tar.go). The file contains only pure tar-walk
// logic with no DB or HTTP dependencies, so tests use real temp directories
// with no mocking.
import (
"archive/tar"
"bytes"
"io"
"os"
"path/filepath"
"strings"
"testing"
)
// ─── newTarWriter ─────────────────────────────────────────────────────────────
func TestNewTarWriter_Basic(t *testing.T) {
var buf bytes.Buffer
tw := newTarWriter(&buf)
if tw == nil {
t.Fatal("newTarWriter returned nil")
}
// Write a header to prove the writer is functional.
hdr := &tar.Header{
Name: "test.txt",
Mode: 0644,
Size: 5,
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatalf("WriteHeader failed: %v", err)
}
if _, err := tw.Write([]byte("hello")); err != nil {
t.Fatalf("Write failed: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("Close failed: %v", err)
}
}
// ─── tarWalk: empty directory ─────────────────────────────────────────────────
func TestTarWalk_EmptyDir(t *testing.T) {
tmp := t.TempDir()
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
if err := tarWalk(tmp, "prefix", tw); err != nil {
t.Fatalf("tarWalk error: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("tw.Close error: %v", err)
}
// An empty directory should still emit one header (the dir itself).
rdr := tar.NewReader(&buf)
hdr, err := rdr.Next()
if err != nil {
t.Fatalf("expected at least the dir header, got error: %v", err)
}
if !strings.HasSuffix(hdr.Name, "/") {
t.Errorf("expected directory name ending in '/', got %q", hdr.Name)
}
// No more entries.
if _, err := rdr.Next(); err != io.EOF {
t.Errorf("expected only one header, got more: %v", err)
}
}
// ─── tarWalk: single file ─────────────────────────────────────────────────────
func TestTarWalk_SingleFile(t *testing.T) {
tmp := t.TempDir()
if err := os.WriteFile(filepath.Join(tmp, "hello.txt"), []byte("world"), 0644); err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
if err := tarWalk(tmp, "mydir", tw); err != nil {
t.Fatalf("tarWalk error: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
// Should have 2 entries: the dir prefix, then hello.txt.
entries := 0
names := []string{}
rdr := tar.NewReader(&buf)
for {
hdr, err := rdr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatalf("unexpected error reading tar: %v", err)
}
entries++
names = append(names, hdr.Name)
if hdr.Name == "mydir/hello.txt" {
if hdr.Size != 5 {
t.Errorf("expected size 5, got %d", hdr.Size)
}
content := make([]byte, 5)
if _, err := rdr.Read(content); err != nil && err != io.EOF {
t.Fatalf("read error: %v", err)
}
if string(content) != "world" {
t.Errorf("expected 'world', got %q", string(content))
}
}
}
if entries != 2 {
t.Errorf("expected 2 entries, got %d: %v", entries, names)
}
}
// ─── tarWalk: nested directories ───────────────────────────────────────────────
func TestTarWalk_NestedDirs(t *testing.T) {
tmp := t.TempDir()
subdir := filepath.Join(tmp, "a", "b", "c")
if err := os.MkdirAll(subdir, 0755); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(subdir, "deep.txt"), []byte("nested"), 0644); err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
if err := tarWalk(tmp, "root", tw); err != nil {
t.Fatalf("tarWalk error: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
// Collect all file paths (not dirs) with content.
files := map[string]string{}
rdr := tar.NewReader(&buf)
for {
hdr, err := rdr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(hdr.Name, "/") && hdr.Size > 0 {
content := make([]byte, hdr.Size)
rdr.Read(content)
files[hdr.Name] = string(content)
}
}
expected := "root/a/b/c/deep.txt"
if _, ok := files[expected]; !ok {
t.Errorf("expected file %q in tar; got: %v", expected, files)
} else if files[expected] != "nested" {
t.Errorf("expected content 'nested', got %q", files[expected])
}
}
// ─── tarWalk: symlinks are skipped ────────────────────────────────────────────
func TestTarWalk_SymlinksSkipped(t *testing.T) {
tmp := t.TempDir()
// Create a real file.
realPath := filepath.Join(tmp, "real.txt")
if err := os.WriteFile(realPath, []byte("real content"), 0644); err != nil {
t.Fatal(err)
}
// Create a symlink to it.
linkPath := filepath.Join(tmp, "link.txt")
if err := os.Symlink(realPath, linkPath); err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
if err := tarWalk(tmp, "prefix", tw); err != nil {
t.Fatalf("tarWalk error: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
// Only real.txt should appear; link.txt should be absent.
names := []string{}
rdr := tar.NewReader(&buf)
for {
hdr, err := rdr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
names = append(names, hdr.Name)
}
foundLink := false
for _, n := range names {
if strings.Contains(n, "link") {
foundLink = true
}
}
if foundLink {
t.Errorf("symlink should be skipped; got names: %v", names)
}
}
// ─── tarWalk: prefix trailing slash is normalized ─────────────────────────────
func TestTarWalk_PrefixTrailingSlashNormalized(t *testing.T) {
tmp := t.TempDir()
if err := os.WriteFile(filepath.Join(tmp, "f.txt"), []byte("x"), 0644); err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
// Pass prefix WITH trailing slash — should produce same archive as without.
if err := tarWalk(tmp, "foo/", tw); err != nil {
t.Fatal(err)
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
// The file should be under "foo/", not "foo//".
rdr := tar.NewReader(&buf)
for {
hdr, err := rdr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(hdr.Name, "/") && strings.Contains(hdr.Name, "f.txt") {
if strings.Contains(hdr.Name, "//") {
t.Errorf("double slash found in path %q — trailing slash not normalized", hdr.Name)
}
if !strings.HasPrefix(hdr.Name, "foo/") {
t.Errorf("expected path to start with 'foo/', got %q", hdr.Name)
}
}
}
}
// ─── tarWalk: prefix = "." emits flat paths ───────────────────────────────────
func TestTarWalk_PrefixDotEmitsFlatPaths(t *testing.T) {
tmp := t.TempDir()
subdir := filepath.Join(tmp, "sub")
if err := os.MkdirAll(subdir, 0755); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(subdir, "file.txt"), []byte("data"), 0644); err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
if err := tarWalk(tmp, ".", tw); err != nil {
t.Fatal(err)
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
// With prefix ".", paths should NOT start with "./" (filepath.Clean normalizes it).
rdr := tar.NewReader(&buf)
for {
hdr, err := rdr.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(hdr.Name, "/") && strings.Contains(hdr.Name, "file.txt") {
if strings.HasPrefix(hdr.Name, "./") {
t.Errorf("prefix '.' should not emit './' prefix; got %q", hdr.Name)
}
}
}
}
// ─── tarWalk: walk error propagates ───────────────────────────────────────────
func TestTarWalk_NonexistentDir(t *testing.T) {
nonexistent := filepath.Join(t.TempDir(), "does-not-exist")
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
err := tarWalk(nonexistent, "x", tw)
if err == nil {
t.Error("expected error for nonexistent directory, got nil")
}
}

View File

@ -215,6 +215,51 @@ func TestTarWalk_EmptyDirectory(t *testing.T) {
}
}
// TestTarWalk_NestedDirs: deeply nested directories produce all intermediate
// dir entries plus leaf entries. This exercises the recursive walk.
func TestTarWalk_NestedDirs(t *testing.T) {
hostDir := t.TempDir()
deep := filepath.Join(hostDir, "a", "b", "c")
if err := os.MkdirAll(deep, 0o755); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(deep, "leaf.txt"), []byte("content"), 0o644); err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
tw := newTarWriter(&buf)
if err := tarWalk(hostDir, "configs/plugins/.staging", tw); err != nil {
t.Fatalf("tarWalk: %v", err)
}
if err := tw.Close(); err != nil {
t.Fatalf("Close: %v", err)
}
entries := readTarNames(&buf)
// Must include: prefix/, prefix/a/, prefix/a/b/, prefix/a/b/c/, prefix/a/b/c/leaf.txt
expected := []string{
"configs/plugins/.staging/",
"configs/plugins/.staging/a/",
"configs/plugins/.staging/a/b/",
"configs/plugins/.staging/a/b/c/",
"configs/plugins/.staging/a/b/c/leaf.txt",
}
if len(entries) != len(expected) {
t.Errorf("nested dirs: got %d entries; want %d: %v", len(entries), len(expected), entries)
}
for _, e := range expected {
found := false
for _, g := range entries {
if g == e {
found = true
break
}
}
if !found {
t.Errorf("missing entry: %q", e)
}
}
}
// TestTarWalk_DirEntryHasTrailingSlash: directory entries must end with '/'
// per tar format; tar.Header.Typeflag '5' (dir) must produce "name/" not "name".
func TestTarWalk_DirEntryHasTrailingSlash(t *testing.T) {

View File

@ -1,80 +0,0 @@
package handlers
import (
"testing"
"github.com/stretchr/testify/assert"
)
// supportsRuntime tests — plugin runtime compatibility checking.
func TestSupportsRuntime_EmptyRuntimes(t *testing.T) {
// Empty runtimes = unspecified, try it → always compatible.
info := pluginInfo{Name: "test", Runtimes: nil}
assert.True(t, info.supportsRuntime("claude_code"))
assert.True(t, info.supportsRuntime("any_runtime"))
}
func TestSupportsRuntime_ExactMatch(t *testing.T) {
info := pluginInfo{Name: "test", Runtimes: []string{"claude_code", "anthropic"}}
assert.True(t, info.supportsRuntime("claude_code"))
assert.True(t, info.supportsRuntime("anthropic"))
}
func TestSupportsRuntime_NoMatch(t *testing.T) {
info := pluginInfo{Name: "test", Runtimes: []string{"claude_code"}}
assert.False(t, info.supportsRuntime("openai"))
}
func TestSupportsRuntime_HyphenUnderscoreNormalized(t *testing.T) {
// "claude-code" and "claude_code" are considered equal.
info := pluginInfo{Name: "test", Runtimes: []string{"claude-code"}}
assert.True(t, info.supportsRuntime("claude_code"))
assert.True(t, info.supportsRuntime("claude-code")) // symmetric hyphen form
}
func TestSupportsRuntime_HyphenVsUnderscoreReverse(t *testing.T) {
// Plugin declares underscore form; runtime uses hyphen.
info := pluginInfo{Name: "test", Runtimes: []string{"claude_code"}}
assert.True(t, info.supportsRuntime("claude-code"))
}
func TestSupportsRuntime_EmptyStringRuntime(t *testing.T) {
info := pluginInfo{Name: "test", Runtimes: []string{"claude_code"}}
// Empty runtime string: should not match any plugin.
assert.False(t, info.supportsRuntime(""))
}
func TestSupportsRuntime_SingleRuntimeMatch(t *testing.T) {
// Multiple declared runtimes: only matching one is sufficient.
info := pluginInfo{Name: "test", Runtimes: []string{"python", "nodejs", "claude_code"}}
assert.True(t, info.supportsRuntime("claude_code"))
assert.False(t, info.supportsRuntime("ruby"))
}
func TestSupportsRuntime_AllHyphenForms(t *testing.T) {
// Both plugin and runtime use hyphen form.
info := pluginInfo{Name: "test", Runtimes: []string{"claude-code"}}
assert.True(t, info.supportsRuntime("claude-code"))
}
func TestSupportsRuntime_MultipleHyphenNormalization(t *testing.T) {
// Mixed hyphen/underscore forms normalize to the same.
info := pluginInfo{Name: "test", Runtimes: []string{"some-runtime-name"}}
assert.True(t, info.supportsRuntime("some_runtime_name"))
assert.True(t, info.supportsRuntime("some-runtime-name"))
}
func TestSupportsRuntime_EmptyPluginRuntimesWithAnyInput(t *testing.T) {
// Empty Runtimes on plugin = try it regardless of runtime.
info := pluginInfo{Name: "test", Runtimes: []string{}}
assert.True(t, info.supportsRuntime(""))
assert.True(t, info.supportsRuntime("any"))
assert.True(t, info.supportsRuntime("unknown"))
}
func TestSupportsRuntime_ZeroLengthRuntimes(t *testing.T) {
// Empty slice vs nil: both should be treated as "unspecified".
info := pluginInfo{Name: "test"}
assert.True(t, info.supportsRuntime("anything"))
}

View File

@ -342,11 +342,6 @@ func TestPluginInstall_InstanceLookupError_Returns503(t *testing.T) {
// ---------- dispatch: uninstall ----------
func TestPluginUninstall_SaaS_DispatchesToEIC(t *testing.T) {
mock := setupTestDB(t)
mock.ExpectExec("DELETE FROM workspace_plugins WHERE workspace_id").
WithArgs("ws-1", "browser-automation").
WillReturnResult(sqlmock.NewResult(0, 1))
stubReadPluginManifestViaEIC(t, func(ctx context.Context, instanceID, runtime, pluginName string) ([]byte, error) {
return []byte("name: browser-automation\nskills:\n - browse\n"), nil
})

View File

@ -629,9 +629,6 @@ func TestPluginInstall_RejectsUnknownScheme(t *testing.T) {
}
func TestPluginInstall_LocalSourceReachesContainerLookup(t *testing.T) {
mock := setupTestDB(t)
expectAllowlistAllowAll(mock)
base := t.TempDir()
pluginDir := filepath.Join(base, "demo")
_ = os.MkdirAll(pluginDir, 0o755)
@ -958,14 +955,14 @@ func TestLogInstallLimitsOnce(t *testing.T) {
func TestRegexpEscapeForAwk(t *testing.T) {
cases := map[string]string{
"my-plugin": `my-plugin`,
"# Plugin: foo /": `# Plugin: foo \/`,
"# Plugin: a.b /": `# Plugin: a\.b \/`,
"foo[bar]": `foo\[bar\]`,
"a*b+c?": `a\*b\+c\?`,
"path|with|pipes": `path\|with\|pipes`,
`back\slash`: `back\\slash`,
"": ``,
"my-plugin": `my-plugin`,
"# Plugin: foo /": `# Plugin: foo \/`,
"# Plugin: a.b /": `# Plugin: a\.b \/`,
"foo[bar]": `foo\[bar\]`,
"a*b+c?": `a\*b\+c\?`,
"path|with|pipes": `path\|with\|pipes`,
`back\slash`: `back\\slash`,
"": ``,
}
for in, want := range cases {
got := regexpEscapeForAwk(in)
@ -1250,7 +1247,7 @@ func TestPluginDownload_GithubSchemeStreamsTarball(t *testing.T) {
scheme: "github",
fetchFn: func(_ context.Context, _ string, dst string) (string, error) {
files := map[string]string{
"plugin.yaml": "name: remote-plugin\nversion: 1.0.0\n",
"plugin.yaml": "name: remote-plugin\nversion: 1.0.0\n",
"skills/x/SKILL.md": "---\nname: x\n---\n",
"adapters/claude_code.py": "from plugins_registry.builtins import AgentskillsAdaptor as Adaptor\n",
}

View File

@ -58,7 +58,7 @@ func (h *WorkspaceHandler) gracefulPreRestart(ctx context.Context, workspaceID s
// Non-blocking send — don't stall the restart cycle.
// Run in a detached goroutine so the caller (runRestartCycle) can
// proceed to stopForRestart without waiting.
h.goAsync(func() {
go func() {
signalCtx, cancel := context.WithTimeout(context.Background(), restartSignalTimeout)
defer cancel()
@ -109,7 +109,7 @@ func (h *WorkspaceHandler) gracefulPreRestart(ctx context.Context, workspaceID s
} else {
log.Printf("A2AGracefulRestart: %s returned status %d — proceeding with stop", workspaceID, resp.StatusCode)
}
})
}()
}
// resolveAgentURLForRestartSignal returns the routable URL for the workspace

View File

@ -271,7 +271,6 @@ func TestGracefulPreRestart_URLResolutionError(t *testing.T) {
WorkspaceHandler: newHandlerWithTestDeps(t),
errToReturn: context.DeadlineExceeded,
}
waitForHandlerAsyncBeforeDBCleanup(t, hWrapper.WorkspaceHandler)
hWrapper.gracefulPreRestart(context.Background(), "ws-url-err-111")
time.Sleep(200 * time.Millisecond)

View File

@ -63,9 +63,6 @@ func (h *SecretsHandler) List(c *gin.Context) {
"updated_at": updatedAt,
})
}
if err := rows.Err(); err != nil {
log.Printf("List secrets rows.Err: %v", err)
}
// 2. Global secrets not overridden at workspace level
globalRows, err := db.DB.QueryContext(ctx,
@ -94,9 +91,6 @@ func (h *SecretsHandler) List(c *gin.Context) {
"updated_at": updatedAt,
})
}
if err := globalRows.Err(); err != nil {
log.Printf("List secrets (global) rows.Err: %v", err)
}
c.JSON(http.StatusOK, secrets)
}
@ -180,9 +174,6 @@ func (h *SecretsHandler) Values(c *gin.Context) {
out[k] = string(decrypted)
}
}
if err := globalRows.Err(); err != nil {
log.Printf("secrets.Values globalRows.Err: %v", err)
}
}
wsRows, wErr := db.DB.QueryContext(ctx,
@ -204,9 +195,6 @@ func (h *SecretsHandler) Values(c *gin.Context) {
out[k] = string(decrypted) // workspace override wins over global
}
}
if err := wsRows.Err(); err != nil {
log.Printf("secrets.Values wsRows.Err: %v", err)
}
}
if len(failedKeys) > 0 {
@ -336,9 +324,6 @@ func (h *SecretsHandler) ListGlobal(c *gin.Context) {
"scope": "global",
})
}
if err := rows.Err(); err != nil {
log.Printf("ListGlobal rows.Err: %v", err)
}
c.JSON(http.StatusOK, secrets)
}
@ -415,9 +400,6 @@ func (h *SecretsHandler) restartAllAffectedByGlobalKey(key string) {
ids = append(ids, id)
}
}
if err := rows.Err(); err != nil {
log.Printf("restartAllAffectedByGlobalKey rows.Err: %v", err)
}
if len(ids) == 0 {
return
}

Some files were not shown because too many files have changed in this diff Show More