Compare commits

..

No commits in common. "main" and "test/canvas-workspacenode-coverage" have entirely different histories.

192 changed files with 1165 additions and 25306 deletions

View File

@ -49,11 +49,6 @@ if [ "$MERGED" != "true" ]; then
exit 0
fi
# NOTE: no || true — with set -euo pipefail, jq parse failures (e.g. field
# missing from API response) propagate as hard errors. Use jq's // operator
# for graceful defaults instead of bash || true guards. This was re-added by
# 8c343e3a ("fix(gitea): add || true guards to jq pipelines") — reverted
# here because the guards mask silent failures that hide malformed API responses.
MERGE_SHA=$(echo "$PR" | jq -r '.merge_commit_sha // empty')
MERGED_BY=$(echo "$PR" | jq -r '.merged_by.login // "unknown"')
TITLE=$(echo "$PR" | jq -r '.title // ""')
@ -102,9 +97,6 @@ fi
# 5. Emit structured audit event.
NOW=$(date -u +%Y-%m-%dT%H:%M:%SZ)
# jq -R (raw input) converts each line to a JSON string; jq -s wraps into array.
# If FAILED_CHECKS is unexpectedly empty (shouldn't happen — we exit above),
# this produces []. No || true needed.
FAILED_JSON=$(printf '%s\n' "${FAILED_CHECKS[@]}" | jq -R . | jq -s .)
# Print as a single-line JSON so Vector's parse_json transform can pick

View File

@ -301,19 +301,7 @@ def expected_context(job_key: str, workflow_name: str = "ci") -> str:
# Drift detection
# --------------------------------------------------------------------------
def detect_drift(branch: str) -> tuple[list[str], dict]:
"""Returns (findings, debug). Empty findings == no drift.
Raises:
ApiError: propagated from the protection fetch only when the
failure is likely a transient Gitea outage (5xx).
403/404 from the protection endpoint is treated as
"cannot determine drift for this branch" a token-
scope issue (missing repo-admin on DRIFT_BOT_TOKEN) or
a repo with no protection set should not turn the
hourly cron red. The workflow continues to the next
branch; no [ci-drift] issue is filed for a branch
whose protection cannot be read.
"""
"""Returns (findings, debug). Empty findings == no drift."""
findings: list[str] = []
ci_doc = load_yaml(CI_WORKFLOW_PATH)
@ -325,50 +313,9 @@ def detect_drift(branch: str) -> tuple[list[str], dict]:
env_set = required_checks_env(audit_doc)
# Protection
# api() raises ApiError on non-2xx. Transient 5xx should fail loud.
# 403/404 means the token lacks repo-admin scope (Gitea 1.22.6's
# branch_protections endpoint requires it — see DRIFT_BOT_TOKEN
# provisioning trail in ci-required-drift.yml). Treat as
# "cannot determine drift for this branch" — skip without turning
# the workflow red. Surface a clear diagnostic so the operator
# knows what to fix.
contexts: set[str] = set()
protection_path = f"/repos/{OWNER}/{NAME}/branch_protections/{branch}"
try:
_, protection = api("GET", protection_path)
except ApiError as e:
# Isolate the HTTP status from the error message.
http_status: int | None = None
msg = str(e)
# ApiError message format: "{method} {path} → HTTP {status}: {body}"
import re as _re
m = _re.search(r"HTTP (\d{3})", msg)
if m:
http_status = int(m.group(1))
if http_status in (403, 404):
# Token lacks scope OR branch has no protection. Cannot
# determine drift — skip this branch. Do NOT exit non-zero;
# the issue IS the alarm, not a red workflow.
sys.stderr.write(
f"::error::GET {protection_path} returned HTTP {http_status}"
f"DRIFT_BOT_TOKEN lacks repo-admin scope (Gitea 1.22.6 "
f"requires it for this endpoint) OR branch has no protection "
f"configured. Cannot determine drift for {branch}; "
f"skipping. Fix: grant repo-admin to mc-drift-bot or "
f"configure protection on {branch}.\n"
)
debug = {
"branch": branch,
"ci_jobs": sorted(jobs),
"sentinel_needs": sorted(needs),
"protection_contexts_skipped": True,
"protection_http_status": http_status,
"audit_env_checks": sorted(env_set),
}
return [], debug
# 5xx — propagate (transient outage, fail loud per design).
raise
# api() raises ApiError on non-2xx; let it propagate so a transient
# 500 fails the run loudly rather than producing a "no drift" lie.
_, protection = api("GET", f"/repos/{OWNER}/{NAME}/branch_protections/{branch}")
if not isinstance(protection, dict):
sys.stderr.write(
f"::error::protection response for {branch} not a JSON object\n"

View File

@ -1,369 +0,0 @@
#!/usr/bin/env python3
"""gitea-merge-queue — conservative serialized merge bot for Gitea.
Gitea 1.22.6 has auto-merge (`pull_auto_merge`) but no GitHub-style merge
queue. This script provides the missing serialized policy in user space:
1. Pick the oldest open PR carrying QUEUE_LABEL.
2. Refuse to act unless main is green.
3. Refuse fork PRs; the queue may only mutate same-repo branches.
4. If the PR branch does not contain current main, call Gitea's
/pulls/{n}/update endpoint and stop. CI must rerun on the updated head.
5. If the updated PR head has all required contexts green, merge with the
non-bypass merge actor token.
The script is intentionally one-PR-per-run. Workflow/cron concurrency should
serialize invocations so two green PRs cannot merge against the same main.
"""
from __future__ import annotations
import argparse
import dataclasses
import json
import os
import sys
import urllib.error
import urllib.parse
import urllib.request
from typing import Any
def _env(key: str, *, default: str = "") -> str:
return os.environ.get(key, default)
GITEA_TOKEN = _env("GITEA_TOKEN")
GITEA_HOST = _env("GITEA_HOST")
REPO = _env("REPO")
WATCH_BRANCH = _env("WATCH_BRANCH", default="main")
QUEUE_LABEL = _env("QUEUE_LABEL", default="merge-queue")
HOLD_LABEL = _env("HOLD_LABEL", default="merge-queue-hold")
UPDATE_STYLE = _env("UPDATE_STYLE", default="merge")
REQUIRED_CONTEXTS_RAW = _env(
"REQUIRED_CONTEXTS",
default=(
"CI / all-required (pull_request),"
"sop-checklist / all-items-acked (pull_request)"
),
)
OWNER, NAME = (REPO.split("/", 1) + [""])[:2] if REPO else ("", "")
API = f"https://{GITEA_HOST}/api/v1" if GITEA_HOST else ""
class ApiError(RuntimeError):
pass
@dataclasses.dataclass(frozen=True)
class MergeDecision:
ready: bool
action: str
reason: str
def _require_runtime_env() -> None:
for key in ("GITEA_TOKEN", "GITEA_HOST", "REPO", "WATCH_BRANCH", "QUEUE_LABEL"):
if not os.environ.get(key):
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
if UPDATE_STYLE not in {"merge", "rebase"}:
sys.stderr.write("::error::UPDATE_STYLE must be merge or rebase\n")
sys.exit(2)
def api(
method: str,
path: str,
*,
body: dict | None = None,
query: dict[str, str] | None = None,
expect_json: bool = True,
) -> tuple[int, Any]:
url = f"{API}{path}"
if query:
url = f"{url}?{urllib.parse.urlencode(query)}"
data = None
headers = {
"Authorization": f"token {GITEA_TOKEN}",
"Accept": "application/json",
}
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
raw = resp.read()
status = resp.status
except urllib.error.HTTPError as exc:
raw = exc.read()
status = exc.code
if not (200 <= status < 300):
snippet = raw[:500].decode("utf-8", errors="replace") if raw else ""
raise ApiError(f"{method} {path} -> HTTP {status}: {snippet}")
if not raw:
return status, None
try:
return status, json.loads(raw)
except json.JSONDecodeError as exc:
if expect_json:
raise ApiError(f"{method} {path} -> HTTP {status} non-JSON: {exc}") from exc
return status, {"_raw": raw.decode("utf-8", errors="replace")}
def required_contexts(raw: str) -> list[str]:
return [part.strip() for part in raw.split(",") if part.strip()]
def status_state(status: dict) -> str:
return str(status.get("status") or status.get("state") or "").lower()
def latest_statuses_by_context(statuses: list[dict]) -> dict[str, dict]:
latest: dict[str, dict] = {}
for status in statuses:
context = status.get("context")
if isinstance(context, str) and context not in latest:
latest[context] = status
return latest
def required_contexts_green(
latest_statuses: dict[str, dict],
contexts: list[str],
) -> tuple[bool, list[str]]:
missing_or_bad: list[str] = []
for context in contexts:
status = latest_statuses.get(context)
state = status_state(status or {})
if state != "success":
missing_or_bad.append(f"{context}={state or 'missing'}")
return not missing_or_bad, missing_or_bad
def label_names(issue: dict) -> set[str]:
return {
label["name"]
for label in issue.get("labels", [])
if isinstance(label, dict) and isinstance(label.get("name"), str)
}
def choose_next_queued_issue(
issues: list[dict],
*,
queue_label: str,
hold_label: str = "",
) -> dict | None:
candidates = []
for issue in issues:
labels = label_names(issue)
if queue_label not in labels:
continue
if hold_label and hold_label in labels:
continue
if "pull_request" not in issue:
continue
candidates.append(issue)
candidates.sort(key=lambda issue: (issue.get("created_at") or "", int(issue["number"])))
return candidates[0] if candidates else None
def pr_contains_base_sha(commits: list[dict], base_sha: str) -> bool:
for commit in commits:
sha = commit.get("sha") or commit.get("id")
if sha == base_sha:
return True
return False
def pr_has_current_base(pr: dict, commits: list[dict], main_sha: str) -> bool:
if pr.get("merge_base") == main_sha:
return True
return pr_contains_base_sha(commits, main_sha)
def evaluate_merge_readiness(
*,
main_status: dict,
pr_status: dict,
required_contexts: list[str],
pr_has_current_base: bool,
) -> MergeDecision:
main_state = str(main_status.get("state") or "").lower()
if main_state != "success":
return MergeDecision(False, "pause", f"main status is {main_state or 'missing'}")
if not pr_has_current_base:
return MergeDecision(False, "update", "PR head does not contain current main")
pr_state = str(pr_status.get("state") or "").lower()
if pr_state != "success":
return MergeDecision(False, "wait", f"PR combined status is {pr_state or 'missing'}")
latest = latest_statuses_by_context(pr_status.get("statuses") or [])
ok, missing_or_bad = required_contexts_green(latest, required_contexts)
if not ok:
return MergeDecision(False, "wait", "required contexts not green: " + ", ".join(missing_or_bad))
return MergeDecision(True, "merge", "ready")
def get_branch_head(branch: str) -> str:
_, body = api("GET", f"/repos/{OWNER}/{NAME}/branches/{branch}")
commit = body.get("commit") if isinstance(body, dict) else None
sha = commit.get("id") if isinstance(commit, dict) else None
if not isinstance(sha, str) or len(sha) < 7:
raise ApiError(f"branch {branch} response missing commit id")
return sha
def get_combined_status(sha: str) -> dict:
_, body = api("GET", f"/repos/{OWNER}/{NAME}/commits/{sha}/status")
if not isinstance(body, dict):
raise ApiError(f"status for {sha} response not object")
return body
def list_queued_issues() -> list[dict]:
_, body = api(
"GET",
f"/repos/{OWNER}/{NAME}/issues",
query={
"state": "open",
"type": "pulls",
"labels": QUEUE_LABEL,
"limit": "50",
},
)
if not isinstance(body, list):
raise ApiError("queued issues response not list")
return body
def get_pull(pr_number: int) -> dict:
_, body = api("GET", f"/repos/{OWNER}/{NAME}/pulls/{pr_number}")
if not isinstance(body, dict):
raise ApiError(f"PR #{pr_number} response not object")
return body
def get_pull_commits(pr_number: int) -> list[dict]:
_, body = api("GET", f"/repos/{OWNER}/{NAME}/pulls/{pr_number}/commits")
if not isinstance(body, list):
raise ApiError(f"PR #{pr_number} commits response not list")
return body
def post_comment(pr_number: int, body: str, *, dry_run: bool) -> None:
print(f"::notice::comment PR #{pr_number}: {body.splitlines()[0][:160]}")
if dry_run:
return
api("POST", f"/repos/{OWNER}/{NAME}/issues/{pr_number}/comments", body={"body": body})
def update_pull(pr_number: int, *, dry_run: bool) -> None:
print(f"::notice::updating PR #{pr_number} with base branch via style={UPDATE_STYLE}")
if dry_run:
return
api(
"POST",
f"/repos/{OWNER}/{NAME}/pulls/{pr_number}/update",
query={"style": UPDATE_STYLE},
expect_json=False,
)
def merge_pull(pr_number: int, *, dry_run: bool) -> None:
payload = {
"Do": "merge",
"MergeTitleField": f"Merge PR #{pr_number} via Gitea merge queue",
"MergeMessageField": (
"Serialized merge by gitea-merge-queue after current-main, "
"SOP, and required CI checks were green."
),
}
print(f"::notice::merging PR #{pr_number}")
if dry_run:
return
api("POST", f"/repos/{OWNER}/{NAME}/pulls/{pr_number}/merge", body=payload, expect_json=False)
def process_once(*, dry_run: bool = False) -> int:
contexts = required_contexts(REQUIRED_CONTEXTS_RAW)
main_sha = get_branch_head(WATCH_BRANCH)
main_status = get_combined_status(main_sha)
if str(main_status.get("state") or "").lower() != "success":
print(f"::notice::queue paused: {WATCH_BRANCH}@{main_sha[:8]} is not green")
return 0
issue = choose_next_queued_issue(
list_queued_issues(),
queue_label=QUEUE_LABEL,
hold_label=HOLD_LABEL,
)
if not issue:
print("::notice::merge queue empty")
return 0
pr_number = int(issue["number"])
pr = get_pull(pr_number)
if pr.get("state") != "open":
print(f"::notice::PR #{pr_number} is not open; skipping")
return 0
if pr.get("base", {}).get("ref") != WATCH_BRANCH:
post_comment(pr_number, f"merge-queue: skipped; base branch is not `{WATCH_BRANCH}`.", dry_run=dry_run)
return 0
if pr.get("head", {}).get("repo_id") != pr.get("base", {}).get("repo_id"):
post_comment(pr_number, "merge-queue: skipped; fork PRs are not supported by the serialized queue.", dry_run=dry_run)
return 0
head_sha = pr.get("head", {}).get("sha")
if not isinstance(head_sha, str) or len(head_sha) < 7:
raise ApiError(f"PR #{pr_number} missing head sha")
commits = get_pull_commits(pr_number)
current_base = pr_has_current_base(pr, commits, main_sha)
pr_status = get_combined_status(head_sha)
decision = evaluate_merge_readiness(
main_status=main_status,
pr_status=pr_status,
required_contexts=contexts,
pr_has_current_base=current_base,
)
print(f"::notice::PR #{pr_number} decision={decision.action}: {decision.reason}")
if decision.action == "update":
update_pull(pr_number, dry_run=dry_run)
post_comment(
pr_number,
(
f"merge-queue: updated this branch with `{WATCH_BRANCH}` at "
f"`{main_sha[:12]}`. Waiting for CI on the refreshed head."
),
dry_run=dry_run,
)
return 0
if decision.ready:
latest_main_sha = get_branch_head(WATCH_BRANCH)
if latest_main_sha != main_sha:
print(
f"::notice::main moved {main_sha[:8]} -> {latest_main_sha[:8]}; "
"deferring to next tick"
)
return 0
merge_pull(pr_number, dry_run=dry_run)
return 0
return 0
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true")
args = parser.parse_args()
_require_runtime_env()
return process_once(dry_run=args.dry_run)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""Lint workflow bash for curl status-code capture pollution.
The bad shape is:
HTTP_CODE=$(curl ... -w '%{http_code}' ... || echo "000")
`curl -w` writes the HTTP code to stdout before returning non-zero, so
fallback output inside the same command substitution appends another code.
"""
from __future__ import annotations
import argparse
import glob
import re
import sys
from pathlib import Path
from typing import NamedTuple
SELF = ".gitea/workflows/lint-curl-status-capture.yml"
class Finding(NamedTuple):
path: str
snippet: str
BAD_STATUS_CAPTURE = re.compile(
r"""
\$\(\s*
curl\b
[^)]*
-w\s*['"]%\{http_code\}['"]
[^)]*
\|\|\s*
(?:
echo\s+['"]?000['"]?
|
printf\s+['"]000['"]
)
\s*\)
""",
re.DOTALL | re.VERBOSE,
)
def _logical_shell(content: str) -> str:
"""Collapse bash line continuations so one curl command is one string."""
return re.sub(r"\\\s*\n\s*", " ", content)
def scan_content(path: str, content: str) -> list[Finding]:
flat = _logical_shell(content)
return [
Finding(path=path, snippet=re.sub(r"\s+", " ", match.group(0)).strip()[:160])
for match in BAD_STATUS_CAPTURE.finditer(flat)
]
def scan_paths(paths: list[str]) -> list[Finding]:
findings: list[Finding] = []
for path in paths:
if path == SELF:
continue
content = Path(path).read_text(encoding="utf-8")
findings.extend(scan_content(path, content))
return findings
def default_paths() -> list[str]:
return sorted(glob.glob(".gitea/workflows/*.yml"))
def print_report(findings: list[Finding]) -> None:
if not findings:
print("OK No curl-status-capture pollution patterns detected")
return
print(f"::error::Found {len(findings)} curl-status-capture pollution site(s):")
for finding in findings:
print(
f"::error file={finding.path}::Curl status-capture pollution: "
"'|| echo/printf 000' inside a $(curl ... -w '%{http_code}' ...) "
"subshell. On non-2xx or connection failure, curl's -w writes a "
"status, then exits non-zero, then the fallback appends another "
"status. Fix: route -w into a tempfile so the exit code cannot "
"pollute stdout."
)
print(f" matched: {finding.snippet}...")
print()
print("Fix template:")
print(" set +e")
print(" curl ... -w '%{http_code}' >code.txt 2>/dev/null")
print(" set -e")
print(' HTTP_CODE=$(cat code.txt 2>/dev/null)')
print(' [ -z "$HTTP_CODE" ] && HTTP_CODE="000"')
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*", help="workflow files to scan")
args = parser.parse_args(argv)
paths = args.paths or default_paths()
findings = scan_paths(paths)
print_report(findings)
return 1 if findings else 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -1,404 +0,0 @@
#!/usr/bin/env python3
"""lint-required-no-paths — structural enforcement of
`feedback_path_filtered_workflow_cant_be_required`.
For every workflow whose status-check context appears in
`branch_protections/<branch>.status_check_contexts`, assert that the
workflow's `on:` block has NO `paths:` and NO `paths-ignore:` filter.
A required-check workflow with a paths filter silently degrades the
merge gate:
- If the PR's diff doesn't match the `paths:` glob, the workflow
never fires.
- Gitea (1.22.6) reports the required context as `pending` (never as
`skipped == success`), so the PR cannot merge.
- For a docs-only PR against `paths: ['**.go']`, the PR is
blocked forever no human action can produce a green.
The class was previously prevented only by reviewer vigilance + the
saved memory `feedback_path_filtered_workflow_cant_be_required`. This
script makes it a hard CI gate so a future PR adding `paths:` to a
required workflow fails fast at PR time, not after merge when the next
docs PR wedges main.
The lint runs as `.gitea/workflows/lint-required-no-paths.yml` on every
PR. The lint workflow ITSELF must not have a paths-filter (otherwise it
could be circumvented by a paths-non-matching PR) that's enforced by
self-reference and by the workflow's own `on:` block deliberately
omitting filters.
Sources of truth:
- `branch_protections/<branch>` `status_check_contexts` (the merge gate)
- `.gitea/workflows/*.yml` `name:` + `on:` (the workflow set)
Context-format note (Gitea 1.22.6):
Status-check contexts are formatted `{workflow_name} / {job_name_or_key} ({event})`.
We parse the workflow_name prefix and walk `.gitea/workflows/*.yml` for
a file whose `name:` attr matches. (The filename is NOT the source of
truth; `name:` is, because Gitea formats the context from `name:`.)
Exit codes:
0 no required workflow has a paths/paths-ignore filter (clean) OR
branch_protections endpoint returned 403/404 (token-scope issue;
surfaced via ::error:: but non-fatal so a missing scope doesn't
red-X every PR fix the token, not the lint).
1 at least one required workflow has a paths/paths-ignore filter
(the gate-degrading defect class).
2 env contract violation (missing GITEA_TOKEN/HOST/REPO/BRANCH).
3 workflows directory missing or workflow YAML unparseable.
4 protection response shape unexpected (non-dict body on 2xx).
Auth note: `GET /repos/.../branch_protections/{branch}` requires
repo-admin role in Gitea 1.22.6. The workflow-default `GITHUB_TOKEN`
is non-admin; we re-use `DRIFT_BOT_TOKEN` (same persona that powers
ci-required-drift.yml). If `DRIFT_BOT_TOKEN` is unavailable in a future
context, the script falls through gracefully (exit 0 + ::error::).
"""
from __future__ import annotations
import json
import os
import re
import sys
import urllib.error
import urllib.parse
import urllib.request
from pathlib import Path
from typing import Any
import yaml # PyYAML 6.0.2 — installed by the workflow before this runs.
# --------------------------------------------------------------------------
# Environment
# --------------------------------------------------------------------------
def _env(key: str, *, required: bool = True, default: str | None = None) -> str:
val = os.environ.get(key, default)
if required and not val:
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
return val or ""
GITEA_TOKEN = _env("GITEA_TOKEN", required=False)
GITEA_HOST = _env("GITEA_HOST", required=False)
REPO = _env("REPO", required=False)
BRANCH = _env("BRANCH", required=False, default="main")
WORKFLOWS_DIR = _env(
"WORKFLOWS_DIR", required=False, default=".gitea/workflows"
)
OWNER, NAME = (REPO.split("/", 1) + [""])[:2] if REPO else ("", "")
API = f"https://{GITEA_HOST}/api/v1" if GITEA_HOST else ""
def _require_runtime_env() -> None:
"""Enforce env contract — called from `run()` only. Tests import
individual functions without setting the full env contract."""
for key in ("GITEA_TOKEN", "GITEA_HOST", "REPO", "BRANCH"):
if not os.environ.get(key):
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
# --------------------------------------------------------------------------
# Tiny HTTP helper (mirrors ci-required-drift.py contract:
# raise on non-2xx and on JSON-decode-fail when JSON expected, per
# `feedback_api_helper_must_raise_not_return_dict`).
# --------------------------------------------------------------------------
class ApiError(RuntimeError):
"""Raised when a Gitea API call cannot be trusted to have succeeded."""
def api(
method: str,
path: str,
*,
body: dict | None = None,
query: dict[str, str] | None = None,
expect_json: bool = True,
) -> tuple[int, Any]:
url = f"{API}{path}"
if query:
url = f"{url}?{urllib.parse.urlencode(query)}"
data = None
headers = {
"Authorization": f"token {GITEA_TOKEN}",
"Accept": "application/json",
}
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
raw = resp.read()
status = resp.status
except urllib.error.HTTPError as e:
raw = e.read()
status = e.code
if not (200 <= status < 300):
snippet = raw[:500].decode("utf-8", errors="replace") if raw else ""
raise ApiError(f"{method} {path} → HTTP {status}: {snippet}")
if not raw:
return status, None
try:
return status, json.loads(raw)
except json.JSONDecodeError as e:
if expect_json:
raise ApiError(
f"{method} {path} → HTTP {status} but body is not JSON: {e}"
) from e
return status, {"_raw": raw.decode("utf-8", errors="replace")}
# --------------------------------------------------------------------------
# Status-check context parser
# --------------------------------------------------------------------------
# Format: "<workflow_name> / <job_name_or_key> (<event>)"
# Examples observed on molecule-core/main:
# "Secret scan / Scan diff for credential-shaped strings (pull_request)"
# "sop-tier-check / tier-check (pull_request)"
#
# Split strategy: peel off the trailing ` (<event>)` first, then split
# the leading `<workflow> / <rest>` on the FIRST ` / ` (workflow names
# come from `name:` attrs which conventionally don't embed ' / '; job
# names CAN, so we keep the rest of the slash-divided text as the job
# name). This matches Gitea's `name: ` semantics.
_CONTEXT_RE = re.compile(r"^(?P<workflow>.+?) / (?P<job>.+) \((?P<event>[^)]+)\)$")
def parse_context(ctx: str) -> tuple[str, str, str] | None:
"""Parse `<workflow> / <job> (<event>)` → (workflow, job, event) or None."""
if not ctx:
return None
m = _CONTEXT_RE.match(ctx)
if not m:
return None
return m.group("workflow"), m.group("job"), m.group("event")
# --------------------------------------------------------------------------
# workflow-name → file resolution
# --------------------------------------------------------------------------
def _iter_workflow_files() -> list[Path]:
d = Path(WORKFLOWS_DIR)
if not d.is_dir():
sys.stderr.write(f"::error::workflows directory not found: {d}\n")
sys.exit(3)
# `.yml` and `.yaml` — Gitea accepts both (rarely used `.yaml`, but
# don't silently miss it if a future port uses it).
return sorted(list(d.glob("*.yml")) + list(d.glob("*.yaml")))
def resolve_workflow_file(workflow_name: str) -> Path | None:
"""Find the YAML file whose `name:` attr matches `workflow_name`.
Returns None if no match. Filename is NOT used as a fallback
Gitea's context format uses `name:`, so a `name:`-less workflow
won't even appear in the protection list. (A YAML with no `name:`
would default the context to the file basename, but our protection
contexts on molecule-core are all `name:`-derived; we trust the
same.)
"""
for f in _iter_workflow_files():
try:
doc = yaml.safe_load(f.read_text(encoding="utf-8"))
except yaml.YAMLError as e:
sys.stderr.write(f"::error::YAML parse error in {f}: {e}\n")
sys.exit(3)
if isinstance(doc, dict) and doc.get("name") == workflow_name:
return f
return None
# --------------------------------------------------------------------------
# paths-filter detection
# --------------------------------------------------------------------------
# Triggers that accept `paths:` / `paths-ignore:` (per GitHub Actions /
# Gitea Actions docs): pull_request, pull_request_target, push.
# We don't enumerate — any sub-key named `paths` or `paths-ignore`
# inside an event mapping is flagged.
_PATHS_KEYS = ("paths", "paths-ignore")
def detect_paths_filters(workflow_path: Path) -> list[str]:
"""Walk the workflow's `on:` block and return a list of findings, one
per offending `paths`/`paths-ignore` key.
Returns:
Empty list if the workflow has no paths/paths-ignore filter
anywhere in its `on:` block. Otherwise, a list of human-readable
strings naming the event and filter key + the filter contents.
"""
try:
doc = yaml.safe_load(workflow_path.read_text(encoding="utf-8"))
except yaml.YAMLError as e:
sys.stderr.write(f"::error::YAML parse error in {workflow_path}: {e}\n")
sys.exit(3)
if not isinstance(doc, dict):
return []
on_block = doc.get("on") or doc.get(True) # PyYAML 6 quirk: `on:`
# under default constructor sometimes becomes the bool key `True`
# because YAML 1.1 treats `on` as a boolean. Tolerate both.
if on_block is None:
return []
findings: list[str] = []
# Shape A: `on: pull_request` (string shorthand) — cannot carry filters.
if isinstance(on_block, str):
return []
# Shape B: `on: [pull_request, push]` (list shorthand) — cannot carry filters.
if isinstance(on_block, list):
return []
# Shape C: `on: { event: { ... } }` — the standard mapping case.
if isinstance(on_block, dict):
# Defensive: top-level malformed `on.paths` (someone wrote
# `on: { paths: ['x'] }` thinking it's a workflow-level filter).
# This is invalid syntax, but if present, flag it — it might
# not block the workflow from registering (Gitea may ignore the
# unknown key) and would create a false sense of "filter exists"
# the lint should still surface.
for k in _PATHS_KEYS:
if k in on_block:
v = on_block[k]
findings.append(
f"top-level `on.{k}` filter (malformed but present): {v!r}"
)
for event, event_body in on_block.items():
if event in _PATHS_KEYS:
continue # already handled above
if not isinstance(event_body, dict):
# `pull_request: null` / `pull_request: [opened]` shapes —
# no place for a paths filter to live; skip.
continue
for k in _PATHS_KEYS:
if k in event_body:
v = event_body[k]
findings.append(
f"`on.{event}.{k}` filter present: {v!r}"
)
return findings
# --------------------------------------------------------------------------
# Driver
# --------------------------------------------------------------------------
def run() -> int:
"""Main lint entrypoint. Returns the process exit code.
Exit semantics (see module docstring for full table):
0 clean (no offending paths-filter on any required workflow),
OR protection unreadable (403/404) surfaced as ::error::
but treated as non-fatal so token-scope issues don't red-X
every PR.
1 at least one required workflow carries a paths/paths-ignore
filter the regression class this lint exists to prevent.
"""
_require_runtime_env()
protection_path = f"/repos/{OWNER}/{NAME}/branch_protections/{BRANCH}"
try:
_, protection = api("GET", protection_path)
except ApiError as e:
msg = str(e)
m = re.search(r"HTTP (\d{3})", msg)
http_status = int(m.group(1)) if m else None
if http_status in (403, 404):
sys.stderr.write(
f"::error::GET {protection_path} returned HTTP {http_status}"
f"DRIFT_BOT_TOKEN lacks repo-admin scope (Gitea 1.22.6 "
f"requires it for this endpoint) OR branch '{BRANCH}' has "
f"no protection configured. Cannot enumerate required "
f"checks; skipping lint with exit 0 to avoid red-X on "
f"every PR. Fix: grant repo-admin to mc-drift-bot.\n"
)
return 0
raise
if not isinstance(protection, dict):
sys.stderr.write(
f"::error::protection response for {BRANCH} not a JSON object\n"
)
return 4
contexts: list[str] = list(protection.get("status_check_contexts") or [])
if not contexts:
print(
f"::notice::branch_protections/{BRANCH} has 0 required "
f"status_check_contexts; nothing to lint. (no required contexts)"
)
return 0
print(f"::notice::Linting {len(contexts)} required context(s) for paths-filter regressions:")
for c in contexts:
print(f" - {c}")
offenders: list[tuple[str, Path, list[str]]] = []
unresolved: list[str] = []
for ctx in contexts:
parsed = parse_context(ctx)
if parsed is None:
print(
f"::warning::could not parse context '{ctx}' "
f"(expected `<workflow> / <job> (<event>)`); skipping"
)
unresolved.append(ctx)
continue
workflow_name, _job, _event = parsed
wf_path = resolve_workflow_file(workflow_name)
if wf_path is None:
print(
f"::warning::no workflow file in {WORKFLOWS_DIR} has "
f"`name: {workflow_name}` (required context '{ctx}'); "
f"skipping paths-filter check. "
f"(orphaned-context detection is ci-required-drift's job.)"
)
unresolved.append(ctx)
continue
findings = detect_paths_filters(wf_path)
if findings:
offenders.append((workflow_name, wf_path, findings))
else:
print(f"::notice::OK {wf_path.name} ({workflow_name}) — no paths filter")
if offenders:
print("")
print(f"::error::Found {len(offenders)} required workflow(s) with paths/paths-ignore filters:")
for workflow_name, wf_path, findings in offenders:
for finding in findings:
# ::error file=... lets Gitea Actions surface a per-file
# annotation in the PR UI (when annotations are wired).
print(
f"::error file={wf_path}::Required workflow "
f"'{workflow_name}' ({wf_path.name}) has a paths "
f"filter that would degrade the merge gate to a "
f"silent indefinite pending: {finding}. "
f"See feedback_path_filtered_workflow_cant_be_required. "
f"Fix: remove the filter and instead gate per-step "
f"inside the job with `if: contains(steps.changed.outputs.files, ...)` "
f"or refactor to a single-job-with-per-step-if shape."
)
return 1
print("")
print(
f"::notice::OK — all {len(contexts) - len(unresolved)} resolvable "
f"required workflow(s) clean (no paths/paths-ignore filters)."
)
if unresolved:
print(
f"::notice::{len(unresolved)} required context(s) were not "
f"resolved to a workflow file (warn-not-fail); see warnings above."
)
return 0
if __name__ == "__main__":
sys.exit(run())

View File

@ -1,369 +0,0 @@
#!/usr/bin/env python3
"""lint-workflow-yaml — catch Gitea-1.22.6-hostile workflow YAML shapes.
This script enforces six structural rules that have historically caused
silent CI failures on Gitea Actions (1.22.6) workflows that the server's
YAML parser rejects with `[W] ignore invalid workflow ...` and registers
for zero events, or shape conventions that produce ambiguous status
contexts. Each rule maps to a documented incident in saved memory.
Rules (4 fatal + 1 fatal cross-file + 1 heuristic-warn):
1. `workflow_dispatch.inputs:` block Gitea 1.22.6 mis-parses the
`inputs` keys as sibling event types and rejects the whole file.
Memory: feedback_gitea_workflow_dispatch_inputs_unsupported.
Origin: 2026-05-11 PyPI freeze (publish-runtime).
2. `on: workflow_run:` event not enumerated in Gitea 1.22.6's
supported event list (verified via modules/actions/workflows.go
enumeration; task #81). Workflow registers, fires for 0 events.
3. `name:` containing `/` breaks the
`<workflow> / <job> (<event>)` commit-status context convention;
downstream parsers (sop-tier-check, status-reaper) tokenize on `/`.
4. `name:` collision across files Gitea routes commit-status updates
by `name` and behavior on collision is undefined (status-reaper
rev1 fail-loud).
5. Cross-repo `uses: org/repo/path@ref` blocked while
`[actions].DEFAULT_ACTIONS_URL=github` is the server default;
resolves to github.com/<org-suspended>/... and 404s.
Memory: feedback_gitea_cross_repo_uses_blocked. Cross-link: task #109.
6. (HEURISTIC, warn-not-fail) Steps reference `https://api.github.com`
or `https://github.com/.../releases/download` without a
workflow-level `env.GITHUB_SERVER_URL` set to the Gitea instance.
Memory: feedback_act_runner_github_server_url.
Per `feedback_smoke_test_vendor_truth_not_shape_match`: fixtures used to
validate this lint must mirror real Gitea 1.22.6 YAML semantics, not
Python yaml-parser quirks. The test suite at tests/test_lint_workflow_yaml.py
includes a vendor-truth fixture (the exact publish-runtime regression).
Usage:
python3 .gitea/scripts/lint-workflow-yaml.py
Lint every `*.yml` in `.gitea/workflows/`.
python3 .gitea/scripts/lint-workflow-yaml.py --workflow-dir <path>
Lint a custom directory (used by tests/test_lint_workflow_yaml.py).
Exit codes:
0 clean OR only heuristic-warnings emitted.
1 at least one fatal rule (1-5) violated.
2 YAML parse error or argv usage error.
"""
from __future__ import annotations
import argparse
import collections
import glob
import os
import re
import sys
from pathlib import Path
from typing import Any, Iterable
try:
import yaml
except ImportError:
print("::error::PyYAML is required. Install with: pip install PyYAML", file=sys.stderr)
sys.exit(2)
# YAML quirk: bare `on:` at the top level parses to the Python `True`
# (because `on` is a YAML 1.1 boolean alias). Handle both keys.
def _get_on(d: dict) -> Any:
if not isinstance(d, dict):
return None
if "on" in d:
return d["on"]
if True in d:
return d[True]
return None
# ---------------------------------------------------------------------------
# Rule 1 — workflow_dispatch.inputs block (Gitea 1.22.6 parser rejects)
# ---------------------------------------------------------------------------
def check_workflow_dispatch_inputs(filename: str, doc: Any) -> list[str]:
"""Return per-violation error lines if `workflow_dispatch.inputs` is set."""
errors: list[str] = []
on = _get_on(doc)
if not isinstance(on, dict):
return errors
wd = on.get("workflow_dispatch")
if isinstance(wd, dict) and wd.get("inputs"):
errors.append(
f"::error file={filename}::Rule 1 (FATAL): "
f"`on.workflow_dispatch.inputs:` block detected. Gitea 1.22.6 "
f"silently rejects the entire workflow with `[W] ignore invalid "
f"workflow: unknown on type: map[...]`. Drop the `inputs:` block "
f"and derive parameters from tag name / env / external query. "
f"Memory: feedback_gitea_workflow_dispatch_inputs_unsupported."
)
return errors
# ---------------------------------------------------------------------------
# Rule 2 — on: workflow_run (not supported on Gitea 1.22.6)
# ---------------------------------------------------------------------------
def check_workflow_run_event(filename: str, doc: Any) -> list[str]:
"""Return per-violation error lines if `on: workflow_run:` is used."""
errors: list[str] = []
on = _get_on(doc)
if isinstance(on, dict) and "workflow_run" in on:
errors.append(
f"::error file={filename}::Rule 2 (FATAL): `on: workflow_run:` "
f"event used. Gitea 1.22.6 does NOT support `workflow_run` "
f"(verified via modules/actions/workflows.go enumeration; "
f"task #81). Workflow will fire for zero events. Use a "
f"`schedule:` cron OR a `push:` trigger with `paths:` filter "
f"on the upstream workflow file as the cross-workflow gate."
)
elif isinstance(on, list) and "workflow_run" in on:
errors.append(
f"::error file={filename}::Rule 2 (FATAL): `on: workflow_run` "
f"in event list. Not supported on Gitea 1.22.6 — task #81."
)
return errors
# ---------------------------------------------------------------------------
# Rule 3 — name: contains "/" (breaks status-context tokenization)
# ---------------------------------------------------------------------------
def check_name_with_slash(filename: str, doc: Any) -> list[str]:
"""Return per-violation error lines if workflow `name:` contains a slash."""
errors: list[str] = []
if not isinstance(doc, dict):
return errors
name = doc.get("name")
if isinstance(name, str) and "/" in name:
errors.append(
f"::error file={filename}::Rule 3 (FATAL): workflow `name: "
f"{name!r}` contains `/`. The commit-status context convention "
f"is `<workflow> / <job> (<event>)`; embedding `/` in the "
f"workflow name makes downstream parsers (sop-tier-check, "
f"status-reaper) tokenize ambiguously. Rename to use `-` or "
f"` ` instead."
)
return errors
# ---------------------------------------------------------------------------
# Rule 4 — cross-file name collision
# ---------------------------------------------------------------------------
def check_name_collision_across_files(
docs_by_file: dict[str, Any],
) -> list[str]:
"""Return per-collision error lines if two files share the same `name:`."""
errors: list[str] = []
by_name: dict[str, list[str]] = collections.defaultdict(list)
for filename, doc in docs_by_file.items():
if isinstance(doc, dict):
n = doc.get("name")
if isinstance(n, str) and n:
by_name[n].append(filename)
for n, files in sorted(by_name.items()):
if len(files) > 1:
errors.append(
f"::error::Rule 4 (FATAL): workflow `name: {n!r}` collision "
f"across {len(files)} files: {files}. Gitea routes "
f"commit-status updates by `name`; collision yields "
f"undefined behavior. Give each workflow a unique `name:`."
)
return errors
# ---------------------------------------------------------------------------
# Rule 5 — cross-repo `uses: org/repo/path@ref`
# ---------------------------------------------------------------------------
# `uses: <foo>@<ref>` — match the value form Gitea/act actually parse.
# We need to distinguish:
# - `actions/checkout@<sha>` OK (bare org/repo@ref, no subpath)
# - `./.gitea/actions/foo` OK (local path)
# - `docker://image:tag` OK (docker-image form)
# - `molecule-ai/molecule-ci/.gitea/actions/audit-force-merge@main` BAD
USES_CROSS_REPO_RE = re.compile(
r"""^
(?P<owner>[A-Za-z0-9_.\-]+)
/
(?P<repo>[A-Za-z0-9_.\-]+)
/ # mandatory subpath separator => cross-repo composite/reusable
(?P<path>[^@\s]+)
@
(?P<ref>\S+)
$""",
re.VERBOSE,
)
def _iter_uses(doc: Any) -> Iterable[str]:
"""Yield every `uses:` string from job steps in a workflow document."""
if not isinstance(doc, dict):
return
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return
for job in jobs.values():
if not isinstance(job, dict):
continue
# reusable workflow: `uses:` at the job level
if isinstance(job.get("uses"), str):
yield job["uses"]
steps = job.get("steps")
if not isinstance(steps, list):
continue
for step in steps:
if isinstance(step, dict) and isinstance(step.get("uses"), str):
yield step["uses"]
def check_cross_repo_uses(filename: str, doc: Any) -> list[str]:
"""Return per-violation error lines for cross-repo `uses:` references."""
errors: list[str] = []
for uses in _iter_uses(doc):
# Skip docker:// and local ./
if uses.startswith(("docker://", "./", "../")):
continue
m = USES_CROSS_REPO_RE.match(uses.strip())
if m:
errors.append(
f"::error file={filename}::Rule 5 (FATAL): cross-repo "
f"`uses: {uses}` detected. Gitea 1.22.6 with "
f"`[actions].DEFAULT_ACTIONS_URL=github` resolves this to "
f"github.com/{m.group('owner')}/{m.group('repo')} which "
f"404s (org suspended 2026-05-06). Inline the shared bash "
f"into `.gitea/scripts/` until task #109 (actions mirror) "
f"ships. Memory: feedback_gitea_cross_repo_uses_blocked."
)
return errors
# ---------------------------------------------------------------------------
# Rule 6 — heuristic: github.com/api refs without workflow-level
# GITHUB_SERVER_URL (WARN-not-FAIL per halt-condition 3)
# ---------------------------------------------------------------------------
# Match `https://api.github.com/...` (API call) — that's the actionable
# pattern. We intentionally do NOT match `https://github.com/.../releases/
# download/...` (jq-release pin) nor `https://github.com/${{ github.repository
# }}` (OCI label) because those are documented benign references on current
# main and would 100% false-positive (3 hits, per Phase 1 audit).
GITHUB_API_REF_RE = re.compile(
r"https://api\.github\.com\b|https://github\.com/api/",
re.IGNORECASE,
)
def _has_workflow_level_server_url(doc: Any) -> bool:
if not isinstance(doc, dict):
return False
env = doc.get("env")
if isinstance(env, dict) and "GITHUB_SERVER_URL" in env:
return True
return False
def check_github_server_url_missing(filename: str, doc: Any, raw: str) -> list[str]:
"""Return warn-lines (NOT errors) if api.github.com is referenced without
workflow-level GITHUB_SERVER_URL. Heuristic false-positives possible.
"""
warns: list[str] = []
if not GITHUB_API_REF_RE.search(raw):
return warns
if _has_workflow_level_server_url(doc):
return warns
warns.append(
f"::warning file={filename}::Rule 6 (WARN, heuristic): file "
f"references `https://api.github.com` without a workflow-level "
f"`env.GITHUB_SERVER_URL: https://git.moleculesai.app`. The "
f"act_runner default for `${{{{ github.server_url }}}}` is "
f"github.com, which can break actions that auth-condition on "
f"server_url (e.g. actions/setup-go). If this curl is "
f"intentionally hitting GitHub (e.g. public release pin), ignore. "
f"Memory: feedback_act_runner_github_server_url."
)
return warns
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def main(argv: list[str] | None = None) -> int:
p = argparse.ArgumentParser(
description="Lint Gitea Actions workflow YAML for 1.22.6-hostile shapes."
)
p.add_argument(
"--workflow-dir",
default=".gitea/workflows",
help="Directory of workflow *.yml files (default: .gitea/workflows).",
)
args = p.parse_args(argv)
wf_dir = Path(args.workflow_dir)
if not wf_dir.exists():
# Empty / missing dir = nothing to lint, not a failure.
print(f"::notice::No workflow directory at {wf_dir}; skipping.")
return 0
yml_paths = sorted(
glob.glob(str(wf_dir / "*.yml")) + glob.glob(str(wf_dir / "*.yaml"))
)
if not yml_paths:
print(f"::notice::No workflow files in {wf_dir}; nothing to lint.")
return 0
fatal_errors: list[str] = []
warnings: list[str] = []
docs_by_file: dict[str, Any] = {}
for path in yml_paths:
rel = os.path.relpath(path)
try:
raw = Path(path).read_text()
doc = yaml.safe_load(raw)
except yaml.YAMLError as e:
fatal_errors.append(
f"::error file={rel}::YAML parse error: {e}. Cannot lint "
f"a file the parser rejects."
)
continue
docs_by_file[rel] = doc
# Per-file checks
fatal_errors.extend(check_workflow_dispatch_inputs(rel, doc))
fatal_errors.extend(check_workflow_run_event(rel, doc))
fatal_errors.extend(check_name_with_slash(rel, doc))
fatal_errors.extend(check_cross_repo_uses(rel, doc))
warnings.extend(check_github_server_url_missing(rel, doc, raw))
# Cross-file checks
fatal_errors.extend(check_name_collision_across_files(docs_by_file))
# Emit warnings first (non-blocking)
for w in warnings:
print(w)
if not fatal_errors:
n = len(yml_paths)
print(
f"::notice::lint-workflow-yaml: {n} workflow file(s) checked, "
f"no fatal Gitea-1.22.6-hostile shapes. "
f"({len(warnings)} heuristic warning(s) emitted.)"
)
return 0
# Emit fatal errors
print(
f"::error::lint-workflow-yaml: {len(fatal_errors)} fatal violation(s) "
f"across {len(yml_paths)} workflow file(s). See rule documentation "
f"in .gitea/scripts/lint-workflow-yaml.py docstring."
)
for e in fatal_errors:
print(e)
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,509 +0,0 @@
#!/usr/bin/env python3
"""lint_bp_context_emit_match — Tier 2f per internal#350.
Rule
----
For a given protected branch, every context in
`branch_protections/<branch>.status_check_contexts` MUST be emitted
by at least one workflow in `.gitea/workflows/*.yml`. Two contexts
match when:
1. The workflow's `name:` equals the context's workflow-part (the
prefix before ` / `).
2. Some job in that workflow has a `name:` (or default-fallback
job-key) equal to the context's job-part (between ` / ` and
` (`).
3. The workflow's `on:` block includes the context's event-part
(in parens at the end), with Gitea's event-name mapping:
- `pull_request` and `pull_request_target` BOTH emit
`(pull_request)` contexts (verified empirically on
molecule-core/main).
- `push` emits `(push)`.
A BP context with no emitter blocks merges forever Gitea treats
absent-as-`pending`, NOT absent-as-`skipped`-as-`success`. This is
the phantom-required-check class
(`feedback_phantom_required_check_after_gitea_migration`).
The inverse direction (emitter without BP context) is INFORMATIONAL
only Tier 2g handles that direction at PR-time. Flagging it here
on a daily schedule would falsely surface every transitional state
during a BP rollout.
How the gate works
------------------
Daily scheduled run + workflow_dispatch:
1. GET `branch_protections/{BRANCH}` (needs DRIFT_BOT_TOKEN with
repo-admin scope; same persona as ci-required-drift.yml).
Graceful-degrade on 403/404 per Tier 2a contract.
2. Walk `.gitea/workflows/*.yml` via PyYAML AST. For each workflow,
enumerate its emitted contexts: `{workflow.name} / {job.name or
job-key} ({event})` for each event in `on:` that emits a status.
3. For each BP context, look for an emitter match. Aggregate
orphans.
4. If orphans exist:
- File or PATCH a `[ci-bp-drift]` issue (idempotency contract:
search for exact title prefix, edit existing if open).
- Apply labels `tier:high` + `ci-bp-drift` (lookup IDs per
repo; per `feedback_tier_label_ids_are_per_repo`).
- Exit 1.
5. If no orphans:
- Close any existing `[ci-bp-drift]` issue with a clean-state
comment.
- Exit 0.
Exit codes
----------
0 clean OR API 403/404 (graceful-degrade, surfaces ::error::).
1 at least one BP context has no emitter.
2 env contract violation, workflows-dir missing, or YAML parse
error.
Env
---
GITEA_TOKEN DRIFT_BOT_TOKEN (repo-admin for branch_protections)
GITEA_HOST e.g. git.moleculesai.app
REPO owner/name
BRANCH defaults to `main`
WORKFLOWS_DIR defaults to `.gitea/workflows`
DRIFT_LABEL defaults to `ci-bp-drift`
Memory cross-links
------------------
- internal#350 (the RFC that specs this lint)
- feedback_phantom_required_check_after_gitea_migration
- feedback_tier_label_ids_are_per_repo
- reference_post_suspension_pipeline
"""
from __future__ import annotations
import json
import os
import re
import sys
import urllib.error
import urllib.parse
import urllib.request
from pathlib import Path
from typing import Any
try:
import yaml
except ImportError:
sys.stderr.write(
"::error::PyYAML is required. Install with: pip install PyYAML\n"
)
sys.exit(2)
# Status-check context regex (mirrors lint-required-no-paths.py).
_CONTEXT_RE = re.compile(
r"^(?P<workflow>.+?) / (?P<job>.+) \((?P<event>[^)]+)\)$"
)
# Map a workflow `on:` event-key to the context's event-part. Gitea's
# emitter convention (verified on molecule-core):
# - pull_request → `(pull_request)`
# - pull_request_target → `(pull_request)` (same surface)
# - push → `(push)`
# - schedule → no PR status; scheduled runs don't post
# commit-statuses unless the workflow itself does so explicitly.
# - workflow_dispatch → manually dispatched runs may or may not
# emit; safest to treat as "no PR status" (informational notice
# only).
_EVENT_MAP = {
"pull_request": "pull_request",
"pull_request_target": "pull_request",
"push": "push",
}
# ---------------------------------------------------------------------------
# Env
# ---------------------------------------------------------------------------
def _env(key: str, default: str | None = None) -> str:
v = os.environ.get(key, default)
return v if v is not None else ""
def _require_env(key: str) -> str:
v = os.environ.get(key)
if not v:
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
return v
# ---------------------------------------------------------------------------
# API helper. Mirrors lint-required-no-paths.py's contract: returns
# (status, payload) tuple with status ∈ {"ok", "not_found", "forbidden",
# "error"}.
# ---------------------------------------------------------------------------
def api(
method: str,
path: str,
*,
body: dict | None = None,
query: dict[str, str] | None = None,
) -> tuple[str, Any]:
host = _env("GITEA_HOST")
token = _env("GITEA_TOKEN")
url = f"https://{host}/api/v1{path}"
if query:
url = f"{url}?{urllib.parse.urlencode(query)}"
data = None
headers = {
"Authorization": f"token {token}",
"Accept": "application/json",
}
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(
url, method=method, data=data, headers=headers
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
raw = resp.read()
if not raw:
return ("ok", None)
return ("ok", json.loads(raw))
except urllib.error.HTTPError as e:
if e.code == 404:
return ("not_found", None)
if e.code in (401, 403):
return ("forbidden", None)
return ("error", None)
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
return ("error", None)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _get_on(d: Any) -> Any:
"""YAML 1.1 boolean quirk: bare `on:` may parse to True. Handle both."""
if not isinstance(d, dict):
return None
if "on" in d:
return d["on"]
if True in d:
return d[True]
return None
def _on_events(doc: Any) -> set[str]:
"""Return the set of event keys in a workflow's `on:` block.
Accepts all three shapes (string / list / mapping). String/list
shapes can't carry filters but they DO emit. Returns the
Gitea-mapped event names per `_EVENT_MAP`.
"""
on = _get_on(doc)
raw_events: set[str] = set()
if on is None:
return raw_events
if isinstance(on, str):
raw_events.add(on)
elif isinstance(on, list):
for e in on:
if isinstance(e, str):
raw_events.add(e)
elif isinstance(on, dict):
for k in on:
if isinstance(k, str):
raw_events.add(k)
return {_EVENT_MAP[e] for e in raw_events if e in _EVENT_MAP}
def _job_display(jbody: dict, jkey: str) -> str:
"""Return job's `name:` if set, else fall back to the job-key.
Gitea formats status contexts with the job's `name:` when set;
when unset it uses the job key. Matches lint-required-no-paths
convention.
"""
n = jbody.get("name") if isinstance(jbody, dict) else None
if isinstance(n, str) and n:
return n
return jkey
def workflow_contexts(doc: Any) -> set[str]:
"""Return the set of contexts a workflow emits."""
contexts: set[str] = set()
if not isinstance(doc, dict):
return contexts
wf_name = doc.get("name")
if not isinstance(wf_name, str) or not wf_name:
return contexts # no name => no addressable context
events = _on_events(doc)
if not events:
return contexts
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return contexts
for jkey, jbody in jobs.items():
if jkey == "__lines__": # tolerate line-tracking annotations
continue
if not isinstance(jbody, dict):
continue
disp = _job_display(jbody, jkey)
for ev in events:
contexts.add(f"{wf_name} / {disp} ({ev})")
return contexts
def parse_context(ctx: str) -> tuple[str, str, str] | None:
m = _CONTEXT_RE.match(ctx)
if not m:
return None
return (m.group("workflow"), m.group("job"), m.group("event"))
def _iter_workflow_files(wf_dir: Path) -> list[Path]:
return sorted(list(wf_dir.glob("*.yml")) + list(wf_dir.glob("*.yaml")))
# ---------------------------------------------------------------------------
# Issue idempotency — search for an open issue with the canonical
# title prefix; PATCH if found, POST if not. Mirrors ci-required-drift.
# ---------------------------------------------------------------------------
def _canonical_title(repo: str, branch: str) -> str:
return f"[ci-bp-drift] {repo}/{branch}: BP→emitter mismatch"
def _ensure_labels(repo: str, names: list[str]) -> list[int]:
status, labels = api("GET", f"/repos/{repo}/labels", query={"limit": "50"})
if status != "ok" or not isinstance(labels, list):
return []
out: list[int] = []
by_name = {l["name"]: l["id"] for l in labels if isinstance(l, dict)}
for n in names:
if n in by_name:
out.append(by_name[n])
return out
def file_or_update_issue(
repo: str, branch: str, orphans: list[str], emitter_orphans: list[str]
) -> None:
title = _canonical_title(repo, branch)
body_lines = [
f"BP→emitter drift detected on `{branch}` at "
f"{os.environ.get('GITHUB_RUN_URL', '(run url unavailable)')}.",
"",
f"## Orphan BP contexts ({len(orphans)})",
"",
"These contexts are required by branch protection but NO workflow "
"emits them. PRs merging into this branch will wait forever for a "
"status that never arrives (Gitea treats absent-as-`pending`, NOT "
"absent-as-`skipped`). See "
"`feedback_phantom_required_check_after_gitea_migration`.",
"",
]
for o in orphans:
body_lines.append(f"- `{o}`")
if emitter_orphans:
body_lines += [
"",
f"## Workflows emitting contexts NOT in BP ({len(emitter_orphans)})",
"",
"Informational — Tier 2g handles this direction at PR-time. "
"Listed here for completeness.",
"",
]
for o in emitter_orphans:
body_lines.append(f"- `{o}`")
body_lines += [
"",
"Fix options:",
" 1. PATCH `branch_protections/{branch}.status_check_contexts` "
" to remove the orphan.",
" 2. Restore the emitting workflow (if it was deleted/renamed).",
"",
"Linted by `.gitea/workflows/lint-bp-context-emit-match.yml` "
"(Tier 2f, internal#350).",
]
body = "\n".join(body_lines)
# Idempotency search — find an open issue with the canonical title.
status, hits = api(
"GET",
f"/repos/{repo}/issues",
query={
"type": "issues",
"state": "open",
"q": title,
},
)
existing = None
if status == "ok" and isinstance(hits, list):
for h in hits:
if (
isinstance(h, dict)
and h.get("state") == "open"
and isinstance(h.get("title"), str)
and h["title"].startswith(title)
):
existing = h
break
label_ids = _ensure_labels(repo, ["ci-bp-drift", "tier:high"])
if existing:
api(
"PATCH",
f"/repos/{repo}/issues/{existing['number']}",
body={"body": body, "labels": label_ids} if label_ids else {"body": body},
)
print(
f"::notice::Updated existing drift issue "
f"#{existing['number']}: {existing.get('html_url', '')}"
)
else:
status, posted = api(
"POST",
f"/repos/{repo}/issues",
body={"title": title, "body": body, "labels": label_ids},
)
if status == "ok" and isinstance(posted, dict):
print(
f"::notice::Filed new drift issue "
f"#{posted.get('number')}: {posted.get('html_url', '')}"
)
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def run() -> int:
_require_env("GITEA_TOKEN")
_require_env("GITEA_HOST")
repo = _require_env("REPO")
branch = _env("BRANCH", "main")
wf_dir = Path(_env("WORKFLOWS_DIR", ".gitea/workflows"))
if not wf_dir.is_dir():
sys.stderr.write(f"::error::workflows directory not found: {wf_dir}\n")
return 2
# 1. Pull BP.
status, bp = api("GET", f"/repos/{repo}/branch_protections/{branch}")
if status == "forbidden":
sys.stderr.write(
f"::error::GET branch_protections/{branch} returned HTTP 403 — "
f"DRIFT_BOT_TOKEN lacks repo-admin scope (Gitea 1.22.6 requires "
f"it for this endpoint). Skipping lint with exit 0 to avoid "
f"red-X on every run. Fix: grant repo-admin to mc-drift-bot. "
f"Per Tier 2a contract.\n"
)
return 0
if status == "not_found":
print(
f"::notice::branch '{branch}' has no protection configured; "
f"nothing to lint."
)
return 0
if status != "ok" or not isinstance(bp, dict):
sys.stderr.write(
f"::error::branch_protections/{branch} response unexpected; "
f"status={status}. Treating as transient; exit 0.\n"
)
return 0
bp_contexts: list[str] = list(bp.get("status_check_contexts") or [])
if not bp_contexts:
print(
f"::notice::branch_protections/{branch} has 0 required "
f"status_check_contexts; nothing to lint."
)
return 0
# 2. Enumerate emitter contexts from all workflows.
all_emitter: set[str] = set()
for path in _iter_workflow_files(wf_dir):
try:
doc = yaml.safe_load(path.read_text(encoding="utf-8"))
except yaml.YAMLError as e:
sys.stderr.write(
f"::error file={path}::YAML parse error: {e}; skipping.\n"
)
continue
all_emitter |= workflow_contexts(doc)
print(
f"::notice::Linting {len(bp_contexts)} BP context(s) for {branch} "
f"against {len(all_emitter)} workflow-emitted context(s)."
)
bp_set = set(bp_contexts)
# 3. Find orphans (BP-side: required but no emitter).
bp_orphans = sorted(bp_set - all_emitter)
# Informational: workflow emits but BP doesn't list. Tier 2g
# territory at PR-time. We list these as NOTICE only.
emitter_orphans = sorted(all_emitter - bp_set)
if bp_orphans:
print(
f"::error::Found {len(bp_orphans)} BP context(s) with no "
f"emitter — these would block merges forever (Gitea treats "
f"absent-as-pending, not skipped):"
)
for o in bp_orphans:
# Closest-match hint: name a workflow whose name-part is a
# near-match (lev-1 typo, or same workflow with a different
# event).
parsed = parse_context(o)
hint = ""
if parsed:
wf, _job, _ev = parsed
candidates = sorted(
{c for c in all_emitter if c.startswith(wf + " / ")}
)
if candidates:
hint = (
f" — closest emitter(s): {', '.join(candidates[:3])}"
)
print(f"::error:: - {o}{hint}")
if emitter_orphans:
print(
f"::notice::Also: {len(emitter_orphans)} workflow-emitted "
f"context(s) not in BP (informational; Tier 2g handles at "
f"PR-time):"
)
for o in emitter_orphans:
print(f"::notice:: - {o}")
# File / patch tracking issue.
try:
file_or_update_issue(repo, branch, bp_orphans, emitter_orphans)
except Exception as e:
sys.stderr.write(
f"::error::failed to file drift issue: {e}\n"
)
return 1
if emitter_orphans:
print(
f"::notice::{len(emitter_orphans)} workflow-emitted context(s) "
f"not in BP (informational; Tier 2g handles at PR-time):"
)
for o in emitter_orphans:
print(f"::notice:: - {o}")
print(
f"::notice::BP/emitter match clean: all {len(bp_contexts)} required "
f"context(s) have an emitter."
)
return 0
if __name__ == "__main__":
sys.exit(run())

View File

@ -1,438 +0,0 @@
#!/usr/bin/env python3
"""lint_continue_on_error_tracking — Tier 2e per internal#350.
Rule
----
Every `continue-on-error: true` directive in `.gitea/workflows/*.yml`
must be accompanied by a tracker reference comment within 2 lines
(above OR below the directive's line). The reference is one of:
* `# mc#NNNN` — molecule-core issue
* `# internal#NNNN` — molecule-ai/internal issue
The referenced issue must satisfy ALL of:
1. Exists (HTTP 200 on `/repos/{owner}/{name}/issues/{num}`)
2. `state == "open"`
3. `created_at` is MAX_AGE_DAYS days ago (default 14)
A passing reference establishes an audit trail and a forced renewal
cadence after 14 days the issue must either be CLOSED (the masked
defect was fixed) or the comment must point at a NEW tracker
(deliberate decision to keep masking, requires a paper-trail).
The class this prevents
-----------------------
Phase-3-masked failures. `continue-on-error: true` on `platform-build`
had been hiding mc#664-class regressions for ~3 weeks before #656
surfaced them on 2026-05-12. A 14-day cap forces a tracker review
cycle and surfaces mask-drift within at most 14 days of the original
defect.
Behaviour-based gate
--------------------
We parse via PyYAML AST (per `feedback_behavior_based_ast_gates`) to
detect `continue-on-error: <truthy>` at job-key level, then map each
location back to its source line via PyYAML's line-tracking loader.
Comments are scanned from the raw text within a 2-line window of
that source line. Reformatting (block-scalar vs flow-style) does not
break the rule because the source-line anchor is the directive's
own line.
Exit codes
----------
0 every `continue-on-error: true` has a passing tracker, OR
the issue-API endpoint returned 403/404 (token-scope; graceful
degrade per Tier 2a contract surface via ::error:: on stderr
but don't red-X every PR over auth).
1 at least one violation (missing/closed/too-old/non-existent
tracker).
2 env contract violation, YAML parse error, or workflows-dir
missing.
Env
---
GITEA_TOKEN read scope on the configured repos.
Auto-injected `GITHUB_TOKEN` works for same-repo
issue reads; for `internal#NNN` we need a token
with `molecule-ai/internal` read scope. Use
DRIFT_BOT_TOKEN (same persona as other Tier 2
lints).
GITEA_HOST e.g. git.moleculesai.app
REPO `owner/name` for `mc#NNNN` lookups
INTERNAL_REPO `owner/name` for `internal#NNNN` lookups
(defaults to derived `molecule-ai/internal`)
WORKFLOWS_DIR defaults to `.gitea/workflows`
MAX_AGE_DAYS defaults to 14
Memory cross-links
------------------
- internal#350 (the RFC that specs this lint)
- mc#664 (the masked-3-weeks empirical case)
- feedback_chained_defects_in_never_tested_workflows
- feedback_behavior_based_ast_gates
- feedback_strict_root_only_after_class_a
"""
from __future__ import annotations
import json
import os
import re
import sys
import urllib.error
import urllib.parse
import urllib.request
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any
try:
import yaml
except ImportError:
sys.stderr.write(
"::error::PyYAML is required. Install with: pip install PyYAML\n"
)
sys.exit(2)
# ---------------------------------------------------------------------------
# Tracker comment regex.
# Matches: `# mc#1234`, `# internal#42`, `# mc#1234 - description`
# Also matches trackers embedded mid-sentence: `# see mc#1234 for details`
# Does NOT match: `# mc1234` (missing inner #), `mc#1234` (no leading
# comment `#`), `# MC#1234` (case-sensitive). The search is line-wide,
# not just at the comment-marker prefix — fixes false-negative when
# the tracker appears mid-sentence (e.g. `internal#350` after prose).
TRACKER_RE = re.compile(
r"(?P<slug>mc|internal)#(?P<num>\d+)\b"
)
# Truthy continue-on-error values we treat as "true". PyYAML decodes
# `continue-on-error: true` to Python `True`. `continue-on-error: "true"`
# decodes to the string "true" — Gitea's evaluator coerces strings,
# so we treat string-`"true"` (case-insensitive) as truthy too.
def _is_truthy_coe(v: Any) -> bool:
if v is True:
return True
if isinstance(v, str) and v.strip().lower() == "true":
return True
return False
# ---------------------------------------------------------------------------
# Env contract
# ---------------------------------------------------------------------------
def _env(key: str, default: str | None = None) -> str:
v = os.environ.get(key, default)
return v if v is not None else ""
def _require_env(key: str) -> str:
v = os.environ.get(key)
if not v:
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
return v
# ---------------------------------------------------------------------------
# PyYAML line-tracking loader. yaml.SafeLoader nodes carry
# `start_mark.line` (0-based); using construct_mapping with `deep=True`
# preserves that on every node. We need the line of each
# `continue-on-error` key so we can scan the source for comments
# near it.
# ---------------------------------------------------------------------------
class _LineLoader(yaml.SafeLoader):
"""SafeLoader that annotates every dict with `__line__: {key: line}`."""
def _construct_mapping(loader: yaml.SafeLoader, node: yaml.MappingNode) -> dict:
mapping = loader.construct_mapping(node, deep=True)
# Annotate per-key source lines so we can locate `continue-on-error`.
lines: dict[str, int] = {}
for k_node, _v_node in node.value:
try:
key = loader.construct_object(k_node, deep=True)
except Exception:
continue
if isinstance(key, (str, int, bool)):
lines[str(key)] = k_node.start_mark.line + 1 # 1-based
if isinstance(mapping, dict):
mapping["__lines__"] = lines
return mapping
_LineLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _construct_mapping
)
# ---------------------------------------------------------------------------
# Issue lookup
# ---------------------------------------------------------------------------
def fetch_issue(slug_kind: str, num: int) -> tuple[str, dict | None]:
"""Return `(status, payload_or_none)`.
status {"ok", "not_found", "forbidden", "error"}.
"""
repo = (
_env("REPO") if slug_kind == "mc" else _env("INTERNAL_REPO")
)
if not repo:
# Fall through gracefully — caller treats as 403 (token-scope).
return ("forbidden", None)
host = _env("GITEA_HOST")
token = _env("GITEA_TOKEN")
url = f"https://{host}/api/v1/repos/{repo}/issues/{num}"
req = urllib.request.Request(
url,
headers={
"Authorization": f"token {token}",
"Accept": "application/json",
},
)
try:
with urllib.request.urlopen(req, timeout=20) as resp:
return ("ok", json.loads(resp.read()))
except urllib.error.HTTPError as e:
if e.code == 404:
return ("not_found", None)
if e.code in (401, 403):
return ("forbidden", None)
return ("error", None)
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
return ("error", None)
# ---------------------------------------------------------------------------
# Locate every continue-on-error: <truthy> in a workflow doc, with line.
# ---------------------------------------------------------------------------
def find_coe_truthies(
doc: Any, raw_lines: list[str]
) -> list[tuple[str, int]]:
"""Return list of (job_key, source_line_1based).
`doc` is the LineLoader-parsed mapping. We descend `jobs.<key>` and
return only those whose value is truthy per `_is_truthy_coe`.
Job-step continue-on-error is intentionally NOT considered: it
suppresses step-level failure rollup only, not job-level. The
masking class this lint targets is the job-level rollup.
"""
out: list[tuple[str, int]] = []
if not isinstance(doc, dict):
return out
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return out
for jkey, jbody in jobs.items():
if jkey == "__lines__":
continue
if not isinstance(jbody, dict):
continue
if "continue-on-error" not in jbody:
continue
v = jbody["continue-on-error"]
if not _is_truthy_coe(v):
continue
line = jbody.get("__lines__", {}).get("continue-on-error")
if not line:
# PyYAML line-tracking shouldn't miss but guard for safety.
# Fall back to grepping the raw text.
line = _grep_first_coe_line(raw_lines, jkey) or 1
out.append((str(jkey), int(line)))
return out
def _grep_first_coe_line(raw_lines: list[str], jkey: str) -> int | None:
"""Fallback: find the first `continue-on-error:` line after a `jkey:` line."""
saw_job = False
for i, line in enumerate(raw_lines, start=1):
if re.match(rf"^\s*{re.escape(jkey)}\s*:", line):
saw_job = True
continue
if saw_job and "continue-on-error" in line:
return i
return None
# ---------------------------------------------------------------------------
# Scan window for tracker comment
# ---------------------------------------------------------------------------
WINDOW = 2 # lines above OR below the directive's line (inclusive)
def find_tracker_in_window(
raw_lines: list[str], line_1based: int
) -> tuple[str, int] | None:
"""Return (slug, num) if a `# mc#NNN`/`# internal#NNN` appears
in raw_lines within ±WINDOW lines of `line_1based`. None otherwise.
We scan the directive's own line (it may carry an inline comment
like `continue-on-error: true # mc#3`) plus ±WINDOW.
"""
lo = max(1, line_1based - WINDOW)
hi = min(len(raw_lines), line_1based + WINDOW)
for i in range(lo, hi + 1):
line = raw_lines[i - 1]
# Only the comment portion (after `#`) is considered, so
# trailing-inline comments on the directive line are matched.
m = TRACKER_RE.search(line)
if m:
return (m.group("slug"), int(m.group("num")))
return None
# ---------------------------------------------------------------------------
# Tracker validation
# ---------------------------------------------------------------------------
def validate_tracker(
slug: str, num: int, max_age_days: int
) -> tuple[bool, str]:
"""Return (ok?, reason). On 403, ok=True is returned with reason
explaining graceful-degrade caller treats 403 as a non-fatal
skip (same as Tier 2a contract).
"""
status, payload = fetch_issue(slug, num)
if status == "forbidden":
sys.stderr.write(
f"::error::issue {slug}#{num} unreadable (HTTP 403 — token "
f"scope). Cannot validate; skipping this check to avoid "
f"red-X on every PR. Fix the token, not the lint.\n"
)
return (True, "forbidden — skipped")
if status == "not_found":
return (False, f"{slug}#{num} does not exist (404)")
if status == "error":
sys.stderr.write(
f"::error::issue {slug}#{num} fetch errored — treating as "
f"unverified, skipping this check.\n"
)
return (True, "fetch-error — skipped")
assert payload is not None
state = payload.get("state", "")
if state != "open":
return (False, f"{slug}#{num} state={state!r} (must be open)")
created = payload.get("created_at", "")
try:
# Gitea returns ISO-8601 with timezone; Python 3.11+
# fromisoformat handles `Z` suffix natively from 3.11. Older
# runtimes need explicit replace.
created_dt = datetime.fromisoformat(created.replace("Z", "+00:00"))
except ValueError:
return (False, f"{slug}#{num} created_at unparseable: {created!r}")
age = datetime.now(timezone.utc) - created_dt
# Inclusive boundary at MAX_AGE_DAYS: `age.days` truncates to a
# whole-day floor, so an issue created 14d 0h 5m ago has
# `age.days == 14` and passes; one created 15d 0h 0m ago has
# `age.days == 15` and fails. This is the convention specified
# in internal#350 ("≤14 days old").
if age.days > max_age_days:
return (
False,
f"{slug}#{num} is {age.days} days old (>{max_age_days}d cap). "
f"Close-or-renew the tracker.",
)
return (True, f"{slug}#{num} open, {age.days}d old, ≤{max_age_days}d")
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def _iter_workflow_files(wf_dir: Path) -> list[Path]:
return sorted(list(wf_dir.glob("*.yml")) + list(wf_dir.glob("*.yaml")))
def run() -> int:
wf_dir = Path(_env("WORKFLOWS_DIR", ".gitea/workflows"))
max_age = int(_env("MAX_AGE_DAYS", "14"))
# Defaults for INTERNAL_REPO when unset (best-effort guess based on
# the convention `mc#` = same repo, `internal#` = molecule-ai/internal).
if not os.environ.get("INTERNAL_REPO"):
os.environ["INTERNAL_REPO"] = "molecule-ai/internal"
if not wf_dir.is_dir():
sys.stderr.write(
f"::error::workflows directory not found: {wf_dir}\n"
)
return 2
yml_files = _iter_workflow_files(wf_dir)
if not yml_files:
print(f"::notice::no workflow files under {wf_dir}; nothing to lint.")
return 0
violations: list[str] = []
notices: list[str] = []
total_coe_true = 0
for path in yml_files:
raw = path.read_text(encoding="utf-8")
raw_lines = raw.splitlines()
try:
doc = yaml.load(raw, Loader=_LineLoader)
except yaml.YAMLError as e:
sys.stderr.write(
f"::error file={path}::YAML parse error: {e}. Skipping "
f"this file (lint-workflow-yaml will catch separately).\n"
)
continue
coe_locs = find_coe_truthies(doc, raw_lines)
for jkey, line in coe_locs:
total_coe_true += 1
tracker = find_tracker_in_window(raw_lines, line)
if tracker is None:
violations.append(
f"::error file={path},line={line}::lint-continue-on-error-"
f"tracking (Tier 2e): job '{jkey}' has "
f"`continue-on-error: true` at line {line} with no "
f"`# mc#NNNN` or `# internal#NNNN` tracker comment "
f"within {WINDOW} lines. Add a tracker reference so "
f"this mask has a forced 14-day renewal cycle. "
f"Memory: feedback_chained_defects_in_never_tested_workflows."
)
continue
slug, num = tracker
ok, reason = validate_tracker(slug, num, max_age)
if ok:
notices.append(
f"::notice::{path.name} job '{jkey}' (line {line}): "
f"{reason}"
)
else:
violations.append(
f"::error file={path},line={line}::lint-continue-on-error-"
f"tracking (Tier 2e): job '{jkey}' "
f"`continue-on-error: true` references {slug}#{num}, "
f"but {reason}. FIX: close/fix the underlying defect "
f"and flip continue-on-error: false, OR file a fresh "
f"tracker and update the comment."
)
for n in notices:
print(n)
if violations:
print(
f"::error::lint-continue-on-error-tracking: "
f"{len(violations)} violation(s) across {len(yml_files)} "
f"workflow file(s) (of {total_coe_true} `continue-on-error: "
f"true` directives in total)."
)
for v in violations:
print(v)
return 1
print(
f"::notice::lint-continue-on-error-tracking: "
f"all {total_coe_true} `continue-on-error: true` directive(s) "
f"have valid trackers (open, ≤{max_age}d old)."
)
return 0
if __name__ == "__main__":
sys.exit(run())

View File

@ -1,361 +0,0 @@
#!/usr/bin/env python3
"""lint_mask_pr_atomicity — Tier 2d structural enforcement per internal#350.
Rule
----
A PR whose diff touches `.gitea/workflows/ci.yml` AND modifies EITHER:
- any `continue-on-error:` value, OR
- the `all-required` sentinel job's `needs:` block
must EITHER:
- Touch BOTH atomically in the same PR (preferred), OR
- Cross-link the paired PR via a literal `Paired: #NNN` reference in
the PR body OR in any commit message between BASE_SHA and HEAD_SHA.
The class this prevents
-----------------------
PR#665 (interim `continue-on-error: true` on `platform-build`) and
PR#668 (sentinel-`needs` demotion of the same job) were designed as a
pair but merged solo #665 landed at 04:47Z 2026-05-12, #668 was still
open at 05:07Z when the main-red watchdog (#674) fired. Result: ~20
minutes of `main` red and a cascade of false-positives on unrelated PRs.
The lint operates on the YAML AST (PyYAML), not grep, per
`feedback_behavior_based_ast_gates`: a refactor that moves `continue-on-error`
between job keys, or renames the `all-required` job, would still be
detected because we walk the parsed structure.
Why this works on Gitea 1.22.6
------------------------------
We don't use any 1.22.6-missing endpoints (no `/actions/runs/*`, no
`branch_protections/*` Tier 2f/g need those; Tier 2d does not). All
required inputs come from the workflow `pull_request` event payload
(BASE_SHA, HEAD_SHA, PR_BODY) and from local git via `git show`/`git log`.
The auto-injected `GITHUB_TOKEN` is enough; we don't need
DRIFT_BOT_TOKEN.
Exit codes
----------
0 ci.yml not in diff, OR diff is no-op for the rule predicates,
OR atomicity satisfied (both touched), OR a valid `Paired: #NNN`
reference is present.
1 exactly ONE of {coe, sentinel-needs} touched AND no valid
`Paired: #NNN` reference. The split-pair regression class.
2 env contract violation (BASE_SHA / HEAD_SHA missing) or YAML
parse error on either side.
Env
---
BASE_SHA PR base (pull_request.base.sha)
HEAD_SHA PR head (pull_request.head.sha)
PR_BODY pull_request.body (may be empty)
CI_WORKFLOW_PATH defaults to `.gitea/workflows/ci.yml`
SENTINEL_JOB_KEY defaults to `all-required`
Memory cross-links
------------------
- internal#350 (the RFC that specs this lint)
- PR#665 / PR#668 (the empirical split-pair)
- mc#664 (the main-red incident)
- feedback_strict_root_only_after_class_a
- feedback_behavior_based_ast_gates
"""
from __future__ import annotations
import os
import re
import subprocess
import sys
from typing import Any
try:
import yaml
except ImportError:
sys.stderr.write(
"::error::PyYAML is required. Install with: pip install PyYAML\n"
)
sys.exit(2)
# ---------------------------------------------------------------------------
# YAML quirk: bare `on:` at the top level becomes Python `True` because
# `on` is a YAML 1.1 boolean. Not used here but documented for future
# editors who copy from this module.
# ---------------------------------------------------------------------------
# `Paired: #NNN` reference. `#` is mandatory, NNN must be digits. Any
# surrounding markdown/whitespace is fine. The match is case-sensitive
# on `Paired:` because lower-case `paired:` collides with conversational
# prose ("paired: see comment above") and the convention is the exact
# capitalisation.
PAIRED_RE = re.compile(r"\bPaired:\s*#(?P<num>\d+)\b")
# ---------------------------------------------------------------------------
# Env contract
# ---------------------------------------------------------------------------
def _env(key: str, default: str | None = None) -> str:
v = os.environ.get(key, default)
return v if v is not None else ""
def _require_env(key: str) -> str:
v = os.environ.get(key)
if not v:
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
return v
# ---------------------------------------------------------------------------
# git-show helper. Returns None when the path doesn't exist on that side
# (new file, deleted file, or rename — git returns exit 128 with "fatal:
# path not in tree"). We treat None as "no rule predicate triggered on
# that side".
# ---------------------------------------------------------------------------
def git_show(sha: str, path: str) -> str | None:
r = subprocess.run(
["git", "show", f"{sha}:{path}"],
capture_output=True,
text=True,
)
if r.returncode != 0:
return None
return r.stdout
def git_log_messages(base_sha: str, head_sha: str) -> str:
r = subprocess.run(
["git", "log", "--format=%B", f"{base_sha}..{head_sha}"],
capture_output=True,
text=True,
)
if r.returncode != 0:
return ""
return r.stdout
def git_diff_paths(base_sha: str, head_sha: str) -> list[str]:
r = subprocess.run(
["git", "diff", "--name-only", f"{base_sha}..{head_sha}"],
capture_output=True,
text=True,
)
if r.returncode != 0:
return []
return [p for p in r.stdout.splitlines() if p.strip()]
# ---------------------------------------------------------------------------
# Predicate 1 — any `continue-on-error` value changed between base and head
# ---------------------------------------------------------------------------
def _collect_coe(doc: Any) -> dict[str, Any]:
"""Walk every job in `jobs.*` and collect its continue-on-error value.
Returns a dict {job_key: coe_value}. Missing keys are absent from
the dict (NOT `False` distinguishes "added the key" from
"unchanged absent"). Job-step `continue-on-error` is NOT considered
only job-level, because that's the value that masks job status
rollup, which is the class this lint targets.
"""
out: dict[str, Any] = {}
if not isinstance(doc, dict):
return out
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return out
for k, j in jobs.items():
if not isinstance(j, dict):
continue
if "continue-on-error" in j:
out[k] = j["continue-on-error"]
return out
def coe_changed(base_doc: Any, head_doc: Any) -> tuple[bool, list[str]]:
"""Return (changed?, [reasons]) describing per-job coe diffs."""
base = _collect_coe(base_doc)
head = _collect_coe(head_doc)
reasons: list[str] = []
all_keys = set(base) | set(head)
for k in sorted(all_keys):
b = base.get(k, "<absent>")
h = head.get(k, "<absent>")
if b != h:
reasons.append(f"job '{k}' continue-on-error: {b!r}{h!r}")
return (bool(reasons), reasons)
# ---------------------------------------------------------------------------
# Predicate 2 — sentinel job's `needs:` changed
# ---------------------------------------------------------------------------
def _collect_needs(doc: Any, sentinel_key: str) -> list[str] | None:
"""Return the sentinel job's needs list (sorted) or None if absent."""
if not isinstance(doc, dict):
return None
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return None
j = jobs.get(sentinel_key)
if not isinstance(j, dict):
return None
needs = j.get("needs")
if needs is None:
return []
if isinstance(needs, str):
return [needs]
if isinstance(needs, list):
# Sort because `needs:` is order-insensitive at the engine
# level; a reorder is not a semantic change and shouldn't
# trip the lint.
return sorted(str(x) for x in needs)
return None
def sentinel_needs_changed(
base_doc: Any, head_doc: Any, sentinel_key: str
) -> tuple[bool, str]:
"""Return (changed?, reason)."""
base = _collect_needs(base_doc, sentinel_key)
head = _collect_needs(head_doc, sentinel_key)
if base == head:
return (False, "")
return (
True,
f"sentinel '{sentinel_key}'.needs: {base!r}{head!r}",
)
# ---------------------------------------------------------------------------
# Predicate 3 — `Paired: #NNN` present in body or any commit message
# ---------------------------------------------------------------------------
def find_paired_refs(pr_body: str, commit_log: str) -> list[str]:
"""Return list of `#NNN` strings found (deduped, sorted)."""
found: set[str] = set()
for src in (pr_body, commit_log):
for m in PAIRED_RE.finditer(src or ""):
found.add(m.group("num"))
return sorted(found)
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def _parse(content: str | None, label: str) -> Any:
if content is None:
return None
try:
return yaml.safe_load(content)
except yaml.YAMLError as e:
sys.stderr.write(f"::error::YAML parse error on {label}: {e}\n")
sys.exit(2)
def run() -> int:
base_sha = _require_env("BASE_SHA")
head_sha = _require_env("HEAD_SHA")
pr_body = _env("PR_BODY", "")
ci_path = _env("CI_WORKFLOW_PATH", ".gitea/workflows/ci.yml")
sentinel_key = _env("SENTINEL_JOB_KEY", "all-required")
# Step 0 — is ci.yml even in the diff? If not, the lint doesn't apply.
changed_paths = git_diff_paths(base_sha, head_sha)
if ci_path not in changed_paths:
print(
f"::notice::{ci_path} not in PR diff; lint-mask-pr-atomicity "
f"skipped (no atomicity risk)."
)
return 0
base_yml = git_show(base_sha, ci_path)
head_yml = git_show(head_sha, ci_path)
base_doc = _parse(base_yml, f"{ci_path}@{base_sha}")
head_doc = _parse(head_yml, f"{ci_path}@{head_sha}")
# If the file is newly added (no base), no flip is possible — every
# value is "newly introduced", not "changed". Tier 2e covers the
# tracking-issue check for new continue-on-error: true. Exit 0.
if base_doc is None:
print(
f"::notice::{ci_path} newly added in this PR; no flip to "
f"analyse — lint-mask-pr-atomicity skipped."
)
return 0
# If the file is deleted on head, ditto — no atomicity question.
if head_doc is None:
print(
f"::notice::{ci_path} deleted in this PR; "
f"lint-mask-pr-atomicity skipped."
)
return 0
coe_yes, coe_reasons = coe_changed(base_doc, head_doc)
needs_yes, needs_reason = sentinel_needs_changed(
base_doc, head_doc, sentinel_key
)
if not coe_yes and not needs_yes:
print(
f"::notice::{ci_path} touched but neither continue-on-error "
f"nor sentinel '{sentinel_key}'.needs changed — no atomicity "
f"risk. OK."
)
return 0
if coe_yes and needs_yes:
print(
f"::notice::Atomic change detected: both continue-on-error "
f"AND sentinel '{sentinel_key}'.needs touched in same PR. OK."
)
for r in coe_reasons:
print(f" - {r}")
print(f" - {needs_reason}")
return 0
# Exactly one side touched — require Paired: #NNN reference.
commit_log = git_log_messages(base_sha, head_sha)
paired = find_paired_refs(pr_body, commit_log)
one_side = "continue-on-error" if coe_yes else f"sentinel '{sentinel_key}'.needs"
other_side = (
f"sentinel '{sentinel_key}'.needs" if coe_yes else "continue-on-error"
)
if paired:
print(
f"::notice::Split-pair detected ({one_side} changed without "
f"{other_side}), but Paired reference(s) present: "
f"{', '.join('#' + n for n in paired)}. OK."
)
for r in coe_reasons:
print(f" - {r}")
if needs_reason:
print(f" - {needs_reason}")
return 0
# The failure mode this lint exists to prevent.
print(
f"::error file={ci_path}::lint-mask-pr-atomicity (Tier 2d): "
f"PR touches {one_side} in {ci_path} but NOT {other_side}, "
f"and no `Paired: #NNN` reference was found in the PR body or "
f"in commit messages between {base_sha[:8]}..{head_sha[:8]}. "
f"This is the PR#665+#668 split-pair regression class "
f"(see internal#350, mc#664). FIX: either (a) include the "
f"matching {other_side} change in the same PR (preferred), or "
f"(b) add `Paired: #NNN` (literal, capital P, with `#`) to the "
f"PR body or a commit message referencing the paired PR."
)
for r in coe_reasons:
print(f" - {r}")
if needs_reason:
print(f" - {needs_reason}")
return 1
if __name__ == "__main__":
sys.exit(run())

View File

@ -1,681 +0,0 @@
#!/usr/bin/env python3
"""lint-pre-flip-continue-on-error — block a PR that flips a job from
``continue-on-error: true`` to ``continue-on-error: false`` (or removes
the key while the base had it ``true``) without proof that the job's
recent runs on the target branch are actually green.
Empirical class PR #656 / mc#664:
PR #656 (RFC internal#219 Phase 4) flipped 5 ``platform-build``-class
jobs ``continue-on-error: true false`` on the basis of a
"verified green on main via combined-status check". But that "green"
was the LIE produced by the prior ``continue-on-error: true``:
Gitea Quirk #10 (internal#342 + dup #287) — when a step inside a
job marked ``continue-on-error: true`` fails, the job-level status
is still rolled up as ``success``. So the precondition the PR
claimed to verify was structurally fooled by the bug being
flipped.
mc#664 then captured the surfaced defects (2 unrelated, mutually-
masked regressions):
Class 1: sqlmock helper drift since 2f36bb9a (24 days old)
Class 2: OFFSEC-001 contract collision since 7d1a189f (1 day old)
Codified 04:35Z as hongming-pc2 charter §SOP-N rule (e)
"run-log-grep-before-flip": pull the actual run log + grep for
``--- FAIL`` / ``FAIL\\s`` BEFORE flipping; don't trust the masked
combined-status.
This script structurally enforces that rule at PR time.
How it works (one PR tick):
1. Parse the diff: compare ``.gitea/workflows/*.yml`` at PR base
vs PR head. For each file present in both, parse the YAML AST
and walk ``jobs.<key>.continue-on-error`` on each side. A
"flip" is base {true} AND head {false, None/absent}. We
coerce truthy/falsy per YAML semantics (PyYAML normalizes
``true``/``True``/``yes`` to ``True``).
2. For each flipped job, derive its commit-status context name as
``"{workflow.name} / {job.name or job.key} (push)"`` that's
how Gitea Actions emits the context for runs on
``main``/``staging`` (push event, see also expected_context()
in ci-required-drift.py).
3. Pull the last N commits of the target branch (PR base), fetch
combined commit-status per commit, scan ``statuses[]`` for
contexts matching ANY of the flipped jobs. For each match,
fetch the actual run log via the web-UI route
``{server_url}/{repo}/actions/runs/{run_id}/jobs/{job_idx}/logs``
(per memory ``reference_gitea_actions_log_fetch`` Gitea 1.22.6
lacks REST ``/actions/runs/*`` endpoints; the web-UI route is the
only working path; see ``reference_gitea_1_22_6_lacks_rest_rerun_endpoints``).
4. Grep each log for the Go-test failure markers ``--- FAIL`` /
``FAIL\\s+<package>`` AND the bash-step error sentinel
``::error::``. If ANY recent log shows any of these AND the
status itself reads ``success``, the job was masked. ``::error::``
the flip with the offending test name + offending run URL +
the regression commit (HEAD of the run).
5. Exit 1 if any flips have at least one masked run; exit 0
otherwise.
Halt-on-noise contract:
- If a recent log fetch 404s (already-pruned-via-act_runner-gc,
transient gitea-web outage): emit ``::warning::`` and treat the
run as "log unavailable" does NOT block the flip; logged so
a curious reviewer can re-run.
- If a flipped job has ZERO recent runs on the target branch (newly
added workflow): emit ``::warning::`` "no run history to verify"
and allow the flip. This is the only way a NEW workflow can ever
ship with ``continue-on-error: false``; otherwise we'd have a
chicken-and-egg.
Behavior-based AST gate per ``feedback_behavior_based_ast_gates``:
- YAML parsed via PyYAML safe_load on BOTH sides of the diff
- No grep-by-line formatting changes (comment churn, key order)
don't false-positive a flip
- Job-key match so a rename ``platform-build core-be-build``
appears as a DELETE + an ADD, not a flip (the delete side has no
new value to compare against; the add side has no base side).
Run locally (works against this repo, requires PyYAML + Gitea token
that can read combined-commit-status):
GITEA_TOKEN=... GITEA_HOST=git.moleculesai.app \\
REPO=molecule-ai/molecule-core BASE_REF=main \\
BASE_SHA=$(git rev-parse origin/main) \\
HEAD_SHA=$(git rev-parse HEAD) \\
python3 .gitea/scripts/lint_pre_flip_continue_on_error.py \\
--dry-run
Cross-links: PR#656, mc#664, PR#665 (the interim re-mask),
Quirk #10 (internal#342 + dup #287), hongming-pc2 charter §SOP-N
rule (e), feedback_strict_root_only_after_class_a,
feedback_no_shared_persona_token_use.
"""
from __future__ import annotations
import argparse
import json
import os
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from typing import Any
import yaml # PyYAML 6.0.2 — installed by the workflow before this runs.
# --------------------------------------------------------------------------
# Environment (read at module-import; runtime contract enforced in main())
# --------------------------------------------------------------------------
def _env(key: str, *, default: str = "") -> str:
return os.environ.get(key, default)
GITEA_TOKEN = _env("GITEA_TOKEN")
GITEA_HOST = _env("GITEA_HOST")
REPO = _env("REPO")
BASE_REF = _env("BASE_REF", default="main")
BASE_SHA = _env("BASE_SHA")
HEAD_SHA = _env("HEAD_SHA")
# How many recent commits to scan on the target branch. 5 by default;
# enough to catch a job that only fails intermittently, not so many
# that the script paginates needlessly. Per spec.
RECENT_COMMITS_N = int(_env("RECENT_COMMITS_N", default="5"))
OWNER, NAME = (REPO.split("/", 1) + [""])[:2] if REPO else ("", "")
API = f"https://{GITEA_HOST}/api/v1" if GITEA_HOST else ""
WEB = f"https://{GITEA_HOST}" if GITEA_HOST else ""
# Failure markers we grep for in the run log.
# --- FAIL — Go test failure marker
# FAIL\s — `FAIL github.com/x/y` package-level rollup
# ::error:: — bash-step `::error::` lines (the lint-curl-status-capture
# pattern: a `python3 <<PY` block writing `::error::` then
# sys.exit(1); also any shell `echo "::error::..."` from
# jobs that wrap pytest/eslint/etc. and convert
# non-zero exits into masked-by-CoE status)
FAIL_PATTERNS = (
"--- FAIL",
"FAIL\t",
"FAIL ",
"::error::",
)
def _require_runtime_env() -> None:
for key in ("GITEA_TOKEN", "GITEA_HOST", "REPO", "BASE_REF", "BASE_SHA", "HEAD_SHA"):
if not os.environ.get(key):
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
# --------------------------------------------------------------------------
# Tiny HTTP helper (no requests dependency)
# Mirrors the api()/ApiError contract in ci-required-drift.py +
# main-red-watchdog.py per feedback_api_helper_must_raise_not_return_dict.
# --------------------------------------------------------------------------
class ApiError(RuntimeError):
"""Raised when a Gitea API/web call cannot be trusted to have succeeded.
Soft-failure on non-2xx is the duplicate-write bug factory in
find-or-create flows (PR #112 Five-Axis). Here it would mean a
transient gitea-web 502 silently allows a flip whose recent runs
we couldn't actually verify — exactly the regression class this
lint exists to close.
"""
def http(
method: str,
url: str,
*,
body: dict | None = None,
headers: dict[str, str] | None = None,
expect_json: bool = True,
timeout: int = 30,
) -> tuple[int, Any, bytes]:
"""Tiny HTTP helper around urllib.
Returns (status, parsed_or_None, raw_bytes). Raises ApiError on any
non-2xx response. ``expect_json=False`` returns raw bytes in the
parsed slot (for log-fetch from the web-UI which returns text/plain).
"""
final_headers = {
"Authorization": f"token {GITEA_TOKEN}",
"Accept": "application/json" if expect_json else "text/plain",
}
if headers:
final_headers.update(headers)
data = None
if body is not None:
data = json.dumps(body).encode("utf-8")
final_headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, method=method, data=data, headers=final_headers)
try:
with urllib.request.urlopen(req, timeout=timeout) as resp:
raw = resp.read()
status = resp.status
except urllib.error.HTTPError as e:
raw = e.read() or b""
status = e.code
if not (200 <= status < 300):
snippet = raw[:500].decode("utf-8", errors="replace") if raw else ""
raise ApiError(f"{method} {url} → HTTP {status}: {snippet}")
if not expect_json:
return status, raw, raw
if not raw:
return status, None, raw
try:
return status, json.loads(raw), raw
except json.JSONDecodeError as e:
raise ApiError(f"{method} {url} → HTTP {status} but body is not JSON: {e}") from e
def api(method: str, path: str, *, body: dict | None = None, query: dict[str, str] | None = None) -> tuple[int, Any]:
"""Read-shaped Gitea REST helper. Path is API-relative (``/repos/...``)."""
url = f"{API}{path}"
if query:
url = f"{url}?{urllib.parse.urlencode(query)}"
status, parsed, _ = http(method, url, body=body, expect_json=True)
return status, parsed
# --------------------------------------------------------------------------
# YAML parsing — coerce truthy/falsy for continue-on-error
# --------------------------------------------------------------------------
def _coerce_coe(val: Any) -> bool:
"""Coerce a continue-on-error YAML value to bool.
PyYAML safe_load normalizes ``true``/``True``/``yes``/``on`` to
Python ``True`` and ``false``/``False``/``no``/``off`` / absence
to ``False`` (we treat absence/None as False here too that's the
GitHub Actions default semantics).
Edge cases:
- String ``"true"`` (quoted in YAML) kept as the string
``"true"``, falsy under bool() but a flip we DO care about
catching. Normalize string forms case-insensitively to bool
so the diff is consistent with the runtime behavior of
Gitea Actions, which YAML-parses the same way.
"""
if isinstance(val, bool):
return val
if val is None:
return False
if isinstance(val, str):
return val.strip().lower() in ("true", "yes", "on", "1")
return bool(val)
def jobs_coe_map(workflow_doc: dict) -> dict[str, bool]:
"""Return ``{job_key: continue_on_error_bool}`` for every job in
the workflow. Job-level ``continue-on-error`` only does NOT
descend into per-step ``continue-on-error`` (step-level CoE
masking is a separate class and is handled by the test suite
+ reviewer, not by this gate see Future Work in the workflow
YAML).
"""
out: dict[str, bool] = {}
jobs = workflow_doc.get("jobs")
if not isinstance(jobs, dict):
return out
for key, job in jobs.items():
if not isinstance(job, dict):
continue
out[key] = _coerce_coe(job.get("continue-on-error"))
return out
def workflow_name(workflow_doc: dict, *, fallback: str = "") -> str:
"""Top-level ``name:`` of the workflow. Falls back to the filename
(without extension) per Gitea Actions semantics."""
n = workflow_doc.get("name")
if isinstance(n, str) and n.strip():
return n.strip()
return fallback
def job_display_name(workflow_doc: dict, job_key: str) -> str:
"""``jobs.<key>.name`` if present, else the key. Mirrors
expected_context() in ci-required-drift.py."""
job = workflow_doc.get("jobs", {}).get(job_key)
if isinstance(job, dict):
n = job.get("name")
if isinstance(n, str) and n.strip():
return n.strip()
return job_key
def context_name(workflow_name_str: str, job_name_str: str, event: str = "push") -> str:
"""Render the commit-status context the way Gitea Actions emits it.
Default ``event="push"`` because recent-runs-on-main are push events;
callers can override to ``"pull_request"`` for PR-context lookups."""
return f"{workflow_name_str} / {job_name_str} ({event})"
# --------------------------------------------------------------------------
# Diff detection — flips, not arbitrary changes
# --------------------------------------------------------------------------
def detect_flips(
base_workflows: dict[str, str],
head_workflows: dict[str, str],
) -> list[dict]:
"""Compare per-file CoE maps; return a list of flip records.
Inputs are ``{path: yaml_text}`` for both sides. Output records
have the shape::
{
"workflow_path": ".gitea/workflows/ci.yml",
"workflow_name": "CI",
"job_key": "platform-build",
"job_name": "Platform (Go)",
"context": "CI / Platform (Go) (push)",
}
A flip is base[CoE] {True} AND head[CoE] {False}. Files
only present on one side are skipped adding a new workflow
with ``CoE: false`` is fine (no history to mask), and removing
a workflow can't possibly flip anything.
"""
flips: list[dict] = []
for path, base_text in base_workflows.items():
if path not in head_workflows:
continue
try:
base_doc = yaml.safe_load(base_text) or {}
head_doc = yaml.safe_load(head_workflows[path]) or {}
except yaml.YAMLError as e:
# Don't block on a parse error — the YAML lint workflows
# catch invalid YAML separately. Just warn so the failing
# file is visible.
sys.stderr.write(f"::warning file={path}::YAML parse error: {e}\n")
continue
if not isinstance(base_doc, dict) or not isinstance(head_doc, dict):
continue
base_map = jobs_coe_map(base_doc)
head_map = jobs_coe_map(head_doc)
wf_name = workflow_name(head_doc, fallback=os.path.basename(path).rsplit(".", 1)[0])
for job_key, base_val in base_map.items():
if job_key not in head_map:
continue # job removed — not a flip
if base_val is True and head_map[job_key] is False:
flips.append({
"workflow_path": path,
"workflow_name": wf_name,
"job_key": job_key,
"job_name": job_display_name(head_doc, job_key),
"context": context_name(wf_name, job_display_name(head_doc, job_key), "push"),
})
return flips
# --------------------------------------------------------------------------
# Git: snapshot every .gitea/workflows/*.yml at a SHA (no checkout)
# --------------------------------------------------------------------------
def _git(*args: str, cwd: str | None = None) -> str:
"""Run ``git`` and return stdout (text)."""
result = subprocess.run(
["git", *args],
capture_output=True,
text=True,
check=False,
cwd=cwd,
)
if result.returncode != 0:
raise RuntimeError(f"git {args!r} failed: {result.stderr.strip()}")
return result.stdout
def workflows_at_sha(sha: str, *, repo_dir: str | None = None) -> dict[str, str]:
"""Read every ``.gitea/workflows/*.yml`` blob at ``sha``.
Uses ``git ls-tree`` + ``git show`` so we never need to check out
the SHA (the workflow runs on the PR head; the base SHA is
fetched, not checked out).
"""
out: dict[str, str] = {}
listing = _git("ls-tree", "-r", "--name-only", sha, ".gitea/workflows/", cwd=repo_dir)
for line in listing.splitlines():
line = line.strip()
if not line.endswith((".yml", ".yaml")):
continue
try:
blob = _git("show", f"{sha}:{line}", cwd=repo_dir)
except RuntimeError:
# Symlink or other non-blob; skip.
continue
out[line] = blob
return out
# --------------------------------------------------------------------------
# Gitea: recent commits + per-commit combined status + log fetch
# --------------------------------------------------------------------------
def recent_commits_on_branch(branch: str, n: int) -> list[str]:
"""Last `n` commit SHAs on ``branch`` (oldest→newest is fine; we
treat them as a set). Uses the REST ``/commits`` endpoint with
``sha=branch&limit=n``."""
_, body = api(
"GET",
f"/repos/{OWNER}/{NAME}/commits",
query={"sha": branch, "limit": str(n)},
)
if not isinstance(body, list):
raise ApiError(f"/commits for {branch} returned non-list: {type(body).__name__}")
out: list[str] = []
for c in body:
if isinstance(c, dict):
sha = c.get("sha") or (c.get("commit", {}) or {}).get("id")
if isinstance(sha, str) and len(sha) >= 7:
out.append(sha)
return out
def combined_status(sha: str) -> dict:
"""Combined commit status for a SHA. Same shape as
``main-red-watchdog.get_combined_status``."""
_, body = api("GET", f"/repos/{OWNER}/{NAME}/commits/{sha}/status")
if not isinstance(body, dict):
raise ApiError(f"combined-status for {sha} not a dict")
return body
def _entry_state(s: dict) -> str:
"""Per-entry state — Gitea 1.22.6 schema asymmetry: top-level
uses ``state``, per-entry uses ``status``. Defensive fallback per
main-red-watchdog.py line 233."""
return s.get("status") or s.get("state") or ""
def fetch_log(target_url: str) -> str | None:
"""Fetch a job log given its web-UI ``target_url`` (e.g.
``/molecule-ai/molecule-core/actions/runs/13494/jobs/0``).
Per ``reference_gitea_actions_log_fetch``: append ``/logs`` to the
job route. Per ``reference_gitea_1_22_6_lacks_rest_rerun_endpoints``:
Gitea 1.22.6 lacks the REST ``/api/v1/.../actions/runs/*`` path; the
web-UI route is the only working endpoint until 1.24+.
Returns the log text on success, ``None`` on 404 / log-pruned /
network error (caller treats None as "log unavailable, warn-not-fail").
"""
if not target_url:
return None
# Normalize: target_url may be relative ("/owner/repo/...") or
# absolute. Both need ``/logs`` appended to the job sub-path.
if target_url.startswith("/"):
url = f"{WEB}{target_url}"
else:
url = target_url
if not url.endswith("/logs"):
url = f"{url}/logs"
try:
_, body, _ = http("GET", url, expect_json=False, timeout=60)
except ApiError as e:
sys.stderr.write(f"::warning::log fetch failed for {url}: {e}\n")
return None
if isinstance(body, bytes):
return body.decode("utf-8", errors="replace")
return None
def grep_fail_markers(log_text: str) -> list[str]:
"""Return up to 5 sample matching lines for any FAIL_PATTERNS hit.
Empty list = clean log."""
matches: list[str] = []
for line in log_text.splitlines():
for pat in FAIL_PATTERNS:
if pat in line:
# Truncate to keep error output bounded.
matches.append(line.strip()[:240])
break
if len(matches) >= 5:
break
return matches
# --------------------------------------------------------------------------
# Verification: for one flip, scan recent runs on BASE_REF
# --------------------------------------------------------------------------
def verify_flip(flip: dict, branch: str, n: int) -> dict:
"""Scan the last ``n`` commits on ``branch``. For each commit whose
combined status contains a context matching ``flip["context"]``,
fetch the run log and grep for FAIL markers.
Returns::
{
"flip": flip,
"checked_commits": int, # how many commits had a matching context
"masked_runs": [ # runs where log shows FAIL despite status==success
{"sha": "...", "status": "success", "target_url": "...", "samples": [...]},
...
],
"fail_runs": [ # runs where status itself is failure/error
{"sha": "...", "status": "failure", "target_url": "...", "samples": [...]},
...
],
"warnings": [str], # log-unavailable warnings (not blocking)
}
Blocking condition: ``masked_runs`` OR ``fail_runs`` non-empty.
A ``success`` status with a clean log is the only "OK to flip"
outcome (per hongming-pc2 §SOP-N rule (e)).
"""
target_context = flip["context"]
result = {
"flip": flip,
"checked_commits": 0,
"masked_runs": [],
"fail_runs": [],
"warnings": [],
}
shas = recent_commits_on_branch(branch, n)
if not shas:
result["warnings"].append(
f"no recent commits on {branch} (cannot verify flip)"
)
return result
for sha in shas:
try:
status_doc = combined_status(sha)
except ApiError as e:
result["warnings"].append(f"combined-status for {sha}: {e}")
continue
statuses = status_doc.get("statuses") or []
# First entry matching the context name. Newest SHAs come
# first; one entry per context per SHA is the usual shape.
for s in statuses:
if not isinstance(s, dict):
continue
if s.get("context") != target_context:
continue
result["checked_commits"] += 1
state = _entry_state(s)
target_url = s.get("target_url") or ""
log_text = fetch_log(target_url)
if log_text is None:
result["warnings"].append(
f"log unavailable for {sha} {target_context}"
)
# Still record the status itself if it's red — that's
# a hard signal that doesn't need log access.
if state in ("failure", "error"):
result["fail_runs"].append({
"sha": sha,
"status": state,
"target_url": target_url,
"samples": ["[log unavailable; status itself is " + state + "]"],
})
break
samples = grep_fail_markers(log_text)
if state in ("failure", "error"):
result["fail_runs"].append({
"sha": sha,
"status": state,
"target_url": target_url,
"samples": samples or ["[no FAIL markers found but status is " + state + "]"],
})
elif samples and state == "success":
# The bug class: status==success while log shows FAIL.
# That's exactly Quirk #10 (continue-on-error masking).
result["masked_runs"].append({
"sha": sha,
"status": state,
"target_url": target_url,
"samples": samples,
})
# Either way, we matched one context entry for this SHA;
# don't keep looping `statuses[]`.
break
if result["checked_commits"] == 0:
result["warnings"].append(
f"no runs of {target_context!r} found in the last {n} commits on "
f"{branch} — cannot verify; allowing flip with warning"
)
return result
# --------------------------------------------------------------------------
# Report rendering
# --------------------------------------------------------------------------
def render_flip_report(verdict: dict) -> str:
flip = verdict["flip"]
lines = [
f"job: {flip['job_key']} ({flip['context']})",
f" workflow: {flip['workflow_path']}",
f" checked_commits: {verdict['checked_commits']}",
]
for run in verdict["fail_runs"]:
url = run["target_url"]
# target_url may be relative; render the absolute form for
# click-through.
if url.startswith("/"):
url = f"{WEB}{url}"
lines.append(f" fail run {run['sha'][:10]} (status={run['status']}): {url}")
for sample in run["samples"]:
lines.append(f" | {sample}")
for run in verdict["masked_runs"]:
url = run["target_url"]
if url.startswith("/"):
url = f"{WEB}{url}"
lines.append(
f" MASKED run {run['sha'][:10]} (status=success, log shows FAIL): {url}"
)
for sample in run["samples"]:
lines.append(f" | {sample}")
for w in verdict["warnings"]:
lines.append(f" warning: {w}")
return "\n".join(lines)
# --------------------------------------------------------------------------
# Main
# --------------------------------------------------------------------------
def _parse_args(argv: list[str] | None = None) -> argparse.Namespace:
p = argparse.ArgumentParser(
prog="lint-pre-flip-continue-on-error",
description="Block a PR that flips continue-on-error true→false "
"without proof recent runs are actually green.",
)
p.add_argument(
"--dry-run",
action="store_true",
help="Detect + print findings to stdout; never exit non-zero. "
"Useful for local testing.",
)
return p.parse_args(argv)
def main(argv: list[str] | None = None) -> int:
args = _parse_args(argv)
_require_runtime_env()
base_workflows = workflows_at_sha(BASE_SHA)
head_workflows = workflows_at_sha(HEAD_SHA)
flips = detect_flips(base_workflows, head_workflows)
if not flips:
print("::notice::no continue-on-error true→false flips in this PR")
return 0
print(f"::notice::detected {len(flips)} continue-on-error true→false flip(s); verifying recent runs on {BASE_REF}")
bad_flips: list[dict] = []
for flip in flips:
verdict = verify_flip(flip, BASE_REF, RECENT_COMMITS_N)
report = render_flip_report(verdict)
if verdict["fail_runs"] or verdict["masked_runs"]:
print(f"::error file={flip['workflow_path']}::flip of {flip['job_key']} "
f"({flip['context']}) blocked — recent runs on {BASE_REF} show "
f"FAIL markers OR are red. Pull each run log below + grep "
f"`--- FAIL` / `FAIL ` / `::error::` — DON'T trust the masked "
f"combined-status. See hongming-pc2 charter §SOP-N rule (e). "
f"PR#656 / mc#664 reference class.")
bad_flips.append(verdict)
else:
print(f"::notice::flip of {flip['job_key']} ({flip['context']}) is safe — "
f"{verdict['checked_commits']} recent run(s), no FAIL markers")
# Always print the per-flip detail block so the human-readable
# report is in the run log for both safe and unsafe flips.
print(f"::group::flip detail: {flip['job_key']}")
print(report)
print("::endgroup::")
if bad_flips and not args.dry_run:
print(f"::error::{len(bad_flips)}/{len(flips)} flip(s) failed pre-flip verification")
return 1
if bad_flips and args.dry_run:
print(f"::warning::[dry-run] {len(bad_flips)}/{len(flips)} flip(s) WOULD fail; exit 0 forced")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,526 +0,0 @@
#!/usr/bin/env python3
"""lint_required_context_exists_in_bp — Tier 2g per internal#350.
Rule
----
When a PR adds a NEW commit-status emission (a context that didn't
exist on the base side), the workflow file must carry one of three
directive comments adjacent to the new job:
(a) `# bp-required: yes`
The new context MUST already be in
`branch_protections/<branch>.status_check_contexts`. Verified
via Gitea API at PR time.
(b) `# bp-required: pending #NNN`
Acknowledged asymmetry; references an OPEN tracking issue that
will follow up with the BP PATCH.
(c) `# bp-exempt: <free-text reason>`
Informational job, not intended to be a required gate.
No directive on a new emitter FAIL with a 3-option fix-hint.
The class this prevents
-----------------------
PR#656 added `CI / all-required (pull_request)` as a sentinel context
that workflows emit, but BP did NOT list it. When `platform-build`
failed, `all-required` failed, but BP let the PR merge anyway
cascade to mc#664. With this lint, PR#656 would have been blocked
until either the BP PATCH ran alongside OR the author added a
`bp-required: pending` directive.
Why directives MUST live in the workflow YAML
---------------------------------------------
The directive comment lives with the emitter so a scheduled
audit (Tier 2f, daily) can read the same source. PR-body-only
directives invisibly evaporate on merge the asymmetry would
return to undetected. PR-body claims are advisory; workflow-file
comments are the contract.
How "new emission" is detected
------------------------------
Diff base..head over `.gitea/workflows/*.yml`. For each YAML file
that's added or modified:
- Parse both base-side and head-side via PyYAML AST.
- Enumerate emitted contexts on each side using the same rules as
Tier 2f (workflow.name + job.name|key + event-mapping).
- `new_contexts = head_contexts - base_contexts`.
If `new_contexts` is empty after de-dup, no rule applies pass.
Per `feedback_behavior_based_ast_gates`: comment scanning uses raw
text in a small window around the job-key line, NOT regex over the
full file. This avoids matching `bp-required:` mentioned in a
comment unrelated to the new job.
Exit codes
----------
0 no new emissions, all new emissions have valid directives,
or BP read errored (graceful-degrade per Tier 2a contract).
1 at least one new emission lacks a directive, or has
`bp-required: yes` but the context is missing from BP.
2 env contract violation or YAML parse error.
Env
---
BASE_SHA PR base SHA
HEAD_SHA PR head SHA
GITEA_TOKEN DRIFT_BOT_TOKEN (repo-admin for BP read)
GITEA_HOST e.g. git.moleculesai.app
REPO owner/name
BRANCH defaults to `main`
WORKFLOWS_DIR defaults to `.gitea/workflows`
Memory cross-links
------------------
- internal#350 (the RFC that specs this lint)
- PR#656 (the empirical case that prompted Tier 2g)
- mc#664 (the surfaced cascade)
- feedback_phantom_required_check_after_gitea_migration (Tier 2f cousin)
- feedback_behavior_based_ast_gates
"""
from __future__ import annotations
import json
import os
import re
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from typing import Any
try:
import yaml
except ImportError:
sys.stderr.write(
"::error::PyYAML is required. Install with: pip install PyYAML\n"
)
sys.exit(2)
# Directive comment patterns. We match `# bp-required:` OR `# bp-exempt:`,
# both with optional surrounding whitespace and case-sensitive on the
# `bp-` prefix (convention).
BP_REQUIRED_YES_RE = re.compile(
r"#\s*bp-required:\s*yes\b", re.IGNORECASE
)
BP_REQUIRED_PENDING_RE = re.compile(
r"#\s*bp-required:\s*pending\s*#(?P<num>\d+)\b", re.IGNORECASE
)
BP_EXEMPT_RE = re.compile(
r"#\s*bp-exempt:\s*\S", re.IGNORECASE
)
# Gitea event-mapping (same as Tier 2f).
_EVENT_MAP = {
"pull_request": "pull_request",
"pull_request_target": "pull_request",
"push": "push",
}
# ---------------------------------------------------------------------------
# Env
# ---------------------------------------------------------------------------
def _env(key: str, default: str | None = None) -> str:
v = os.environ.get(key, default)
return v if v is not None else ""
def _require_env(key: str) -> str:
v = os.environ.get(key)
if not v:
sys.stderr.write(f"::error::missing required env var: {key}\n")
sys.exit(2)
return v
# ---------------------------------------------------------------------------
# API helper (same contract as Tier 2f).
# ---------------------------------------------------------------------------
def api(
method: str,
path: str,
*,
body: dict | None = None,
query: dict[str, str] | None = None,
) -> tuple[str, Any]:
host = _env("GITEA_HOST")
token = _env("GITEA_TOKEN")
url = f"https://{host}/api/v1{path}"
if query:
url = f"{url}?{urllib.parse.urlencode(query)}"
data = None
headers = {
"Authorization": f"token {token}",
"Accept": "application/json",
}
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
raw = resp.read()
if not raw:
return ("ok", None)
return ("ok", json.loads(raw))
except urllib.error.HTTPError as e:
if e.code == 404:
return ("not_found", None)
if e.code in (401, 403):
return ("forbidden", None)
return ("error", None)
except (urllib.error.URLError, TimeoutError, json.JSONDecodeError):
return ("error", None)
# ---------------------------------------------------------------------------
# git helpers
# ---------------------------------------------------------------------------
def git_show(sha: str, path: str) -> str | None:
r = subprocess.run(
["git", "show", f"{sha}:{path}"], capture_output=True, text=True
)
if r.returncode != 0:
return None
return r.stdout
def git_diff_paths(base: str, head: str) -> list[str]:
r = subprocess.run(
["git", "diff", "--name-only", f"{base}..{head}"],
capture_output=True,
text=True,
)
if r.returncode != 0:
return []
return [p for p in r.stdout.splitlines() if p.strip()]
# ---------------------------------------------------------------------------
# Workflow context enumeration (mirror Tier 2f).
# ---------------------------------------------------------------------------
def _get_on(d: Any) -> Any:
if not isinstance(d, dict):
return None
if "on" in d:
return d["on"]
if True in d:
return d[True]
return None
def _on_events(doc: Any) -> set[str]:
on = _get_on(doc)
raw: set[str] = set()
if on is None:
return raw
if isinstance(on, str):
raw.add(on)
elif isinstance(on, list):
for e in on:
if isinstance(e, str):
raw.add(e)
elif isinstance(on, dict):
for k in on:
if isinstance(k, str):
raw.add(k)
return {_EVENT_MAP[e] for e in raw if e in _EVENT_MAP}
def _job_display(jbody: dict, jkey: str) -> str:
n = jbody.get("name") if isinstance(jbody, dict) else None
if isinstance(n, str) and n:
return n
return jkey
def workflow_contexts(doc: Any) -> set[str]:
if not isinstance(doc, dict):
return set()
wf_name = doc.get("name")
if not isinstance(wf_name, str) or not wf_name:
return set()
events = _on_events(doc)
if not events:
return set()
jobs = doc.get("jobs")
if not isinstance(jobs, dict):
return set()
out: set[str] = set()
for jkey, jbody in jobs.items():
if jkey == "__lines__":
continue
if not isinstance(jbody, dict):
continue
disp = _job_display(jbody, jkey)
for ev in events:
out.add(f"{wf_name} / {disp} ({ev})")
return out
# ---------------------------------------------------------------------------
# Find the source line of a job-key in a workflow YAML's raw text.
# Used to scan for nearby directive comments.
# ---------------------------------------------------------------------------
def _find_job_key_line(raw_lines: list[str], jkey: str) -> int | None:
"""Return 1-based line of `<jkey>:` under jobs:."""
in_jobs = False
jobs_indent = -1
for i, line in enumerate(raw_lines, start=1):
stripped = line.lstrip()
if stripped.startswith("jobs:"):
in_jobs = True
jobs_indent = len(line) - len(stripped)
continue
if in_jobs:
# Job key is the next indent level under `jobs:`.
indent = len(line) - len(stripped)
if stripped and indent <= jobs_indent:
# Left the jobs: block
in_jobs = False
continue
if re.match(rf"^\s*{re.escape(jkey)}\s*:", line):
return i
return None
_DIRECTIVE_WINDOW = 3 # lines above the job-key line (inclusive)
def find_directive_for_job(
raw_text: str, jkey: str
) -> tuple[str, str | None] | None:
"""Return (kind, value) tuple for the first directive in a small
window above the job-key line.
kind {"required-yes", "required-pending", "exempt"}.
value is the pending-issue number for required-pending, else None.
Returns None if no directive found.
We scan ABOVE the line only (the convention is the directive
precedes the job matches how `# mc#NNN` comments are placed
above `continue-on-error: true`). We don't scan inside the job
body because steps can produce false positives.
"""
lines = raw_text.splitlines()
line_no = _find_job_key_line(lines, jkey)
if line_no is None:
return None
lo = max(1, line_no - _DIRECTIVE_WINDOW)
for i in range(lo, line_no):
line = lines[i - 1]
m = BP_REQUIRED_PENDING_RE.search(line)
if m:
return ("required-pending", m.group("num"))
if BP_REQUIRED_YES_RE.search(line):
return ("required-yes", None)
if BP_EXEMPT_RE.search(line):
return ("exempt", None)
return None
# ---------------------------------------------------------------------------
# Map a context back to its emitting (workflow_path, job_key) pair so
# we know WHERE to look for the directive comment.
# ---------------------------------------------------------------------------
def _resolve_emitter(
ctx: str, head_workflows: dict[str, tuple[str, Any]]
) -> tuple[str, str] | None:
"""Return (file_path, job_key) emitting ctx, or None."""
m = re.match(r"^(?P<wf>.+?) / (?P<job>.+) \((?P<event>[^)]+)\)$", ctx)
if not m:
return None
target_wf = m.group("wf")
target_job_disp = m.group("job")
for path, (_raw, doc) in head_workflows.items():
if not isinstance(doc, dict):
continue
if doc.get("name") != target_wf:
continue
jobs = doc.get("jobs") or {}
if not isinstance(jobs, dict):
continue
for jkey, jbody in jobs.items():
if jkey == "__lines__":
continue
if not isinstance(jbody, dict):
continue
disp = _job_display(jbody, jkey)
if disp == target_job_disp:
return (path, jkey)
return None
# ---------------------------------------------------------------------------
# Driver
# ---------------------------------------------------------------------------
def run() -> int:
base_sha = _require_env("BASE_SHA")
head_sha = _require_env("HEAD_SHA")
_require_env("GITEA_TOKEN")
_require_env("GITEA_HOST")
repo = _require_env("REPO")
branch = _env("BRANCH", "main")
wf_dir = _env("WORKFLOWS_DIR", ".gitea/workflows")
# Step 1 — find workflow files changed in the PR.
changed = git_diff_paths(base_sha, head_sha)
changed_workflows = [
p
for p in changed
if p.startswith(wf_dir + "/")
and (p.endswith(".yml") or p.endswith(".yaml"))
]
if not changed_workflows:
print(
"::notice::no workflow file changes in this PR; "
"lint-required-context-exists-in-bp skipped."
)
return 0
# Step 2 — load base+head + compute new contexts.
head_workflows: dict[str, tuple[str, Any]] = {}
new_contexts: set[str] = set()
for path in changed_workflows:
base_raw = git_show(base_sha, path)
head_raw = git_show(head_sha, path)
if head_raw is None:
# File deleted on head — no new emission contribution.
continue
try:
head_doc = yaml.safe_load(head_raw)
except yaml.YAMLError as e:
sys.stderr.write(
f"::error file={path}::YAML parse error on head: {e}\n"
)
return 2
head_workflows[path] = (head_raw, head_doc)
head_ctx = workflow_contexts(head_doc)
base_ctx: set[str] = set()
if base_raw is not None:
try:
base_doc = yaml.safe_load(base_raw)
except yaml.YAMLError:
base_doc = None
if base_doc is not None:
base_ctx = workflow_contexts(base_doc)
new_contexts |= (head_ctx - base_ctx)
if not new_contexts:
print(
"::notice::no new context emissions detected in this PR; "
"lint-required-context-exists-in-bp skipped."
)
return 0
# Step 3 — fetch BP context list.
status, bp = api("GET", f"/repos/{repo}/branch_protections/{branch}")
bp_contexts: set[str] = set()
if status == "forbidden":
sys.stderr.write(
f"::error::GET branch_protections/{branch} returned HTTP 403 — "
f"DRIFT_BOT_TOKEN lacks repo-admin scope. Cannot verify "
f"bp-required directives; skipping lint with exit 0 per "
f"Tier 2a contract. Fix the token, not the lint.\n"
)
return 0
elif status == "not_found":
# Branch has no protection — nothing to verify against; the
# bp-required: yes directive can't be satisfied. Treat as
# graceful-skip rather than red-X.
print(
f"::notice::branch '{branch}' has no protection; cannot verify "
f"bp-required directives. Skipping (exit 0)."
)
return 0
elif status == "ok" and isinstance(bp, dict):
bp_contexts = set(bp.get("status_check_contexts") or [])
else:
sys.stderr.write(
f"::error::branch_protections/{branch} response unexpected; "
f"status={status}. Treating as transient; exit 0.\n"
)
return 0
# Step 4 — validate each new emission's directive.
violations: list[str] = []
for ctx in sorted(new_contexts):
emitter = _resolve_emitter(ctx, head_workflows)
if emitter is None:
# Shouldn't happen — we just derived ctx from head_workflows.
# Belt-and-suspenders fallback.
violations.append(
f"::error::new emission '{ctx}' (could not resolve emitter "
f"file/job — bug in lint?)"
)
continue
file_path, jkey = emitter
raw_text, _ = head_workflows[file_path]
directive = find_directive_for_job(raw_text, jkey)
if directive is None:
violations.append(
f"::error file={file_path}::lint-required-context-exists-in-bp "
f"(Tier 2g): NEW emission `{ctx}` (job '{jkey}') has no "
f"directive comment. Add ONE of these comments on the line "
f"directly above `{jkey}:` (within {_DIRECTIVE_WINDOW} lines):\n"
f" - `# bp-required: yes` — and ensure the context is "
f"already in branch_protections/{branch}.status_check_contexts.\n"
f" - `# bp-required: pending #NNN` — acknowledged asymmetry, "
f"references the tracking issue for the BP PATCH.\n"
f" - `# bp-exempt: <reason>` — informational job, not a gate.\n"
f"Memory: internal#350 (PR#656 + mc#664 empirical case)."
)
continue
kind, value = directive
if kind == "exempt":
print(f"::notice::{ctx}: bp-exempt directive present, OK.")
continue
if kind == "required-pending":
print(
f"::notice::{ctx}: bp-required: pending #{value}"
f"acknowledged asymmetry, OK."
)
continue
if kind == "required-yes":
if ctx in bp_contexts:
print(
f"::notice::{ctx}: bp-required: yes, and context is in "
f"BP, OK."
)
else:
violations.append(
f"::error file={file_path}::lint-required-context-exists-in-bp "
f"(Tier 2g): job '{jkey}' has `bp-required: yes` "
f"directive but its emitted context `{ctx}` is NOT in "
f"`branch_protections/{branch}.status_check_contexts`. "
f"FIX: either (a) add `{ctx}` to BP (Owners-tier PATCH), "
f"or (b) downgrade the directive to "
f"`# bp-required: pending #NNN` referencing the tracker "
f"for the pending BP PATCH."
)
if violations:
print(
f"::error::lint-required-context-exists-in-bp: "
f"{len(violations)} violation(s) across "
f"{len(changed_workflows)} changed workflow file(s)."
)
for v in violations:
print(v)
return 1
print(
f"::notice::lint-required-context-exists-in-bp: "
f"{len(new_contexts)} new emission(s) all directive-validated."
)
return 0
if __name__ == "__main__":
sys.exit(run())

View File

@ -222,20 +222,9 @@ def is_red(status: dict) -> tuple[bool, list[dict]]:
combined = status.get("state")
statuses = status.get("statuses") or []
red_states = {"failure", "error"}
# Schema asymmetry: top-level combined uses `state`, but per-entry
# items in `statuses[]` use `status` in Gitea 1.22.6. Prefer
# `status`; fall back to `state` defensively. Verified empirically
# 2026-05-12 03:42Z. Pre-rev4 code only read `state` from per-entry
# items → failed[] always empty → render_body always showed the
# "no per-context entries were in a red state" fallback even when
# the combined-state correctly flagged red. See
# `feedback_smoke_test_vendor_truth_not_shape_match`.
def _entry_state(s: dict) -> str:
return s.get("status") or s.get("state") or ""
failed = [
s for s in statuses
if isinstance(s, dict) and _entry_state(s) in red_states
if isinstance(s, dict) and s.get("state") in red_states
]
return (combined in red_states or bool(failed), failed)
@ -324,9 +313,7 @@ def render_body(sha: str, failed: list[dict], debug: dict) -> str:
else:
for s in failed:
ctx = s.get("context", "(no context)")
# Per-entry key is `status` in Gitea 1.22.6, not `state`
# (see _entry_state in is_red). Fallback for forward-compat.
state = s.get("status") or s.get("state") or "(no state)"
state = s.get("state", "(no state)")
url = s.get("target_url") or ""
desc = (s.get("description") or "").strip()
entry = f"- **{ctx}** — `{state}`"
@ -559,11 +546,7 @@ def run_once(*, dry_run: bool = False) -> int:
"combined_state": status.get("state"),
"failed_contexts": [s.get("context") for s in failed],
"all_contexts": [
# Per-entry key is `status` in Gitea 1.22.6, not `state`.
# Pre-rev4 debug output reported `state: None` for every
# context, making run logs useless for triage.
{"context": s.get("context"),
"state": s.get("status") or s.get("state")}
{"context": s.get("context"), "state": s.get("state")}
for s in (status.get("statuses") or [])
if isinstance(s, dict)
],

View File

@ -1,829 +0,0 @@
#!/usr/bin/env python3
# sop-checklist-gate — evaluate whether a PR has peer-acked each
# SOP-checklist item. Posts a commit-status that branch protection
# can require.
#
# RFC#351 Step 2 of 6 (implementation MVP).
#
# Invoked by .gitea/workflows/sop-checklist-gate.yml on:
# - pull_request_target: [opened, edited, synchronize, reopened]
# - issue_comment: [created, edited, deleted]
#
# Flow:
# 1. Load .gitea/sop-checklist-config.yaml (from BASE ref — trusted).
# 2. GET /repos/{R}/pulls/{N} — author, head.sha, tier label
# 3. GET /repos/{R}/issues/{N}/comments — extract /sop-ack and /sop-revoke
# 4. For each checklist item:
# a. Is the section marker present in PR body? (author answered)
# b. Is there ≥1 unrevoked /sop-ack from a non-author whose
# team-membership matches required_teams?
# 5. POST /repos/{R}/statuses/{sha} — context
# `sop-checklist / all-items-acked (pull_request)`,
# state=success | failure | pending, description=`acked: N/M …`.
#
# Trust boundary (mirrors RFC#324 §A4):
# This script is loaded from the BASE branch. The workflow's
# actions/checkout step pins ref=base.sha. PR-HEAD code is never
# executed. We only HTTP-call the Gitea API.
#
# Token scope:
# - read:repository / read:organization to enumerate PR + comments
# + team membership (Gitea 1.22.6 quirk: team-membership endpoint
# returns 403 if token owner is not in the team; see review-check.sh
# for the same gotcha — we surface the same fail-closed message).
# - write:repository for `POST /repos/{R}/statuses/{sha}`. Unlike
# RFC#324's pattern (which uses the JOB's own pass/fail as the
# status), we POST the status explicitly because the gate posts
# a single multi-item status with a richer description than a
# bare success/failure context can carry.
#
# Slug normalization rules (canonical form: kebab-case):
# - Lowercase
# - Whitespace + underscores → single dash
# - Strip non [a-z0-9-] characters
# - Collapse adjacent dashes
# - Strip leading/trailing dashes
# - If the result is a digit string (e.g. "1"), look up via
# config.items[*].numeric_alias to get the kebab-case slug.
#
# Examples:
# "Comprehensive_Testing" → "comprehensive-testing"
# "comprehensive testing" → "comprehensive-testing"
# "1" → "comprehensive-testing"
# "Five-Axis-Review" → "five-axis-review"
#
# Revoke semantics:
# /sop-revoke <slug> [reason] — most-recent comment per (slug, user)
# wins. So if Alice posts /sop-ack X then later /sop-revoke X, her ack
# for X is invalidated. Bob's prior /sop-ack X is unaffected. If Alice
# posts /sop-revoke X then later /sop-ack X again, the ack is restored.
from __future__ import annotations
import argparse
import json
import os
import re
import sys
import urllib.error
import urllib.parse
import urllib.request
from typing import Any
# ---------------------------------------------------------------------------
# Slug normalization
# ---------------------------------------------------------------------------
_NORMALIZE_REPLACE_RE = re.compile(r"[\s_]+")
_NORMALIZE_STRIP_RE = re.compile(r"[^a-z0-9-]")
_NORMALIZE_DASH_RE = re.compile(r"-+")
def normalize_slug(raw: str, numeric_aliases: dict[int, str] | None = None) -> str:
"""Normalize a user-supplied slug to canonical kebab-case form.
See module header for the rules.
If the input is a pure digit string AND numeric_aliases is provided,
the alias mapping is consulted. Unknown digits return "" so the caller
can flag the comment as unparseable.
"""
if raw is None:
return ""
s = raw.strip().lower()
s = _NORMALIZE_REPLACE_RE.sub("-", s)
s = _NORMALIZE_STRIP_RE.sub("", s)
s = _NORMALIZE_DASH_RE.sub("-", s)
s = s.strip("-")
if s.isdigit() and numeric_aliases is not None:
return numeric_aliases.get(int(s), "")
return s
# ---------------------------------------------------------------------------
# Comment parsing — /sop-ack and /sop-revoke
# ---------------------------------------------------------------------------
# A directive must be on its own line. Permits leading whitespace.
# Optional trailing note after the slug for /sop-ack and required reason
# for /sop-revoke (RFC#351 open question 4 — reason is captured but not
# yet validated; future iteration may require a min-length).
_DIRECTIVE_RE = re.compile(
r"^[ \t]*/(sop-ack|sop-revoke)[ \t]+([A-Za-z0-9_\- ]+?)(?:[ \t]+(.*))?[ \t]*$",
re.MULTILINE,
)
def parse_directives(
comment_body: str,
numeric_aliases: dict[int, str],
) -> list[tuple[str, str, str]]:
"""Extract /sop-ack and /sop-revoke directives from a comment body.
Returns a list of (kind, canonical_slug, note) tuples where:
kind is "sop-ack" or "sop-revoke"
canonical_slug is the normalized form (or "" if unparseable)
note is the trailing free-text (may be "")
"""
out: list[tuple[str, str, str]] = []
if not comment_body:
return out
for m in _DIRECTIVE_RE.finditer(comment_body):
kind = m.group(1)
raw_slug = (m.group(2) or "").strip()
# If the raw match included trailing words, the regex non-greedy
# captured only the first token; strip again for safety.
# We split on whitespace to keep the FIRST word as the slug, and
# everything after as the note.
parts = raw_slug.split()
if not parts:
continue
first = parts[0]
# If the slug-capture greedily matched multiple words (e.g.
# "comprehensive testing"), preserve normalize behavior: join
# the WHOLE first-word-token only; trailing words get appended to
# the note. The regex limits group(2) to [A-Za-z0-9_\- ] so we
# may have multi-word forms here — normalize handles them.
if len(parts) > 1:
# User wrote "/sop-ack comprehensive testing extra-note"
# → treat "comprehensive testing" as the slug source if it
# normalizes to a known item; otherwise treat "comprehensive"
# as slug and "testing extra-note" as note. We defer the
# disambiguation to the caller via the returned canonical
# slug. For simplicity: try the WHOLE captured string first.
canonical = normalize_slug(raw_slug, numeric_aliases)
else:
canonical = normalize_slug(first, numeric_aliases)
note_from_group = (m.group(3) or "").strip()
# If we collapsed multi-word slug into kebab and there's a
# trailing-text group too, append it.
out.append((kind, canonical, note_from_group))
return out
# ---------------------------------------------------------------------------
# PR body section detection
# ---------------------------------------------------------------------------
def section_marker_present(body: str, marker: str) -> bool:
"""Return True if `marker` appears in `body` case-insensitively
on a non-empty line (i.e. the author actually filled it in).
We require the marker substring AND non-whitespace content on the
same line OR within the next line this prevents trivially-empty
checklists like:
## SOP-Checklist
- [ ] **Comprehensive testing performed**:
- [ ] **Local-postgres E2E run**:
from auto-passing the section-present check. The peer-ack is still
required, but answering with empty content is captured as a soft
finding via the section-present test alone.
"""
if not body or not marker:
return False
body_lower = body.lower()
marker_lower = marker.lower()
idx = body_lower.find(marker_lower)
if idx < 0:
return False
# Walk to end of line.
line_end = body.find("\n", idx)
if line_end < 0:
line_end = len(body)
line = body[idx + len(marker):line_end]
# Strip the colon + checkbox tail patterns; require at least one
# non-whitespace, non-punctuation char.
stripped = re.sub(r"[\s\*:\-\[\]]+", "", line)
if stripped:
return True
# Fall through: check the NEXT line (multi-line answers).
next_line_end = body.find("\n", line_end + 1)
if next_line_end < 0:
next_line_end = len(body)
next_line = body[line_end + 1:next_line_end]
stripped_next = re.sub(r"[\s\*:\-\[\]]+", "", next_line)
return bool(stripped_next)
# ---------------------------------------------------------------------------
# Ack-state computation
# ---------------------------------------------------------------------------
def compute_ack_state(
comments: list[dict[str, Any]],
pr_author: str,
items_by_slug: dict[str, dict[str, Any]],
numeric_aliases: dict[int, str],
team_membership_probe: "callable[[str, list[str]], list[str]]",
) -> dict[str, dict[str, Any]]:
"""Compute per-item ack state.
Each comment is processed in chronological order. The most-recent
directive per (commenter, slug) wins.
Returns a dict keyed by canonical slug:
{
"comprehensive-testing": {
"ackers": ["bob"], # non-author, team-verified
"rejected_ackers": { # debugging info
"self_ack": ["alice"],
"unknown_slug": [],
"not_in_team": ["eve"],
}
},
...
}
"""
# Step 1: collapse directives per (commenter, slug) — most recent wins.
# comments are expected to come in chronological order from the
# API (Gitea returns oldest-first by default for issues/{N}/comments).
latest_directive: dict[tuple[str, str], str] = {} # (user, slug) → kind
unparseable_per_user: dict[str, int] = {}
for c in comments:
body = c.get("body", "") or ""
user = (c.get("user") or {}).get("login", "")
if not user:
continue
for kind, slug, _note in parse_directives(body, numeric_aliases):
if not slug:
unparseable_per_user[user] = unparseable_per_user.get(user, 0) + 1
continue
latest_directive[(user, slug)] = kind
# Step 2: build candidate ackers per slug.
# Filter out self-acks and unknown slugs.
ackers_per_slug: dict[str, list[str]] = {s: [] for s in items_by_slug}
rejected_self: dict[str, list[str]] = {s: [] for s in items_by_slug}
rejected_unknown: dict[str, list[str]] = {s: [] for s in items_by_slug}
pending_team_check: dict[str, list[str]] = {s: [] for s in items_by_slug}
for (user, slug), kind in latest_directive.items():
if kind != "sop-ack":
continue # revokes leave the (user,slug) state as "no ack"
if slug not in items_by_slug:
# Slug normalized to something not in our config — store
# under a synthetic key for diagnostic surfacing. Don't add
# to any item.
continue
if user == pr_author:
rejected_self[slug].append(user)
continue
pending_team_check[slug].append(user)
# Step 3: team membership probe per slug (batched per slug to keep
# API call count down — same user may ack multiple items but the
# required_teams differ per item, so we MUST probe per (user, item)).
rejected_not_in_team: dict[str, list[str]] = {s: [] for s in items_by_slug}
for slug, candidates in pending_team_check.items():
if not candidates:
continue
required = items_by_slug[slug]["required_teams"]
approved = team_membership_probe(slug, candidates) # returns subset
rejected_not_in_team[slug] = [u for u in candidates if u not in approved]
ackers_per_slug[slug] = approved
# Stash required teams for description rendering.
items_by_slug[slug]["_required_resolved"] = required
return {
slug: {
"ackers": ackers_per_slug[slug],
"rejected": {
"self_ack": rejected_self[slug],
"not_in_team": rejected_not_in_team[slug],
},
}
for slug in items_by_slug
}
# ---------------------------------------------------------------------------
# Gitea API client
# ---------------------------------------------------------------------------
class GiteaClient:
def __init__(self, host: str, token: str):
self.base = f"https://{host}/api/v1"
self.token = token
# Cache team-name → team-id resolutions per org.
self._team_id_cache: dict[tuple[str, str], int | None] = {}
def _req(
self,
method: str,
path: str,
body: dict[str, Any] | None = None,
ok_codes: tuple[int, ...] = (200, 201, 204),
) -> tuple[int, Any]:
url = self.base + path
data = None
headers = {
"Authorization": f"token {self.token}",
"Accept": "application/json",
}
if body is not None:
data = json.dumps(body).encode("utf-8")
headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, method=method, data=data, headers=headers)
try:
with urllib.request.urlopen(req, timeout=20) as r:
raw = r.read()
code = r.getcode()
except urllib.error.HTTPError as e:
code = e.code
raw = e.read()
try:
parsed = json.loads(raw.decode("utf-8")) if raw else None
except json.JSONDecodeError:
parsed = raw.decode("utf-8", errors="replace") if raw else None
return code, parsed
def get_pr(self, owner: str, repo: str, pr: int) -> dict[str, Any]:
code, data = self._req("GET", f"/repos/{owner}/{repo}/pulls/{pr}")
if code != 200:
raise RuntimeError(f"GET pulls/{pr} → HTTP {code}: {data!r}")
return data
def get_issue_comments(
self, owner: str, repo: str, issue: int
) -> list[dict[str, Any]]:
# Paginate. Gitea default page size 50.
out: list[dict[str, Any]] = []
page = 1
while True:
code, data = self._req(
"GET",
f"/repos/{owner}/{repo}/issues/{issue}/comments?limit=50&page={page}",
)
if code != 200:
raise RuntimeError(
f"GET issues/{issue}/comments page={page} → HTTP {code}: {data!r}"
)
if not data:
break
out.extend(data)
if len(data) < 50:
break
page += 1
return out
def resolve_team_id(self, org: str, team_name: str) -> int | None:
key = (org, team_name)
if key in self._team_id_cache:
return self._team_id_cache[key]
code, data = self._req("GET", f"/orgs/{org}/teams/search?q={urllib.parse.quote(team_name)}")
team_id = None
if code == 200 and isinstance(data, dict):
for t in data.get("data", []):
if t.get("name") == team_name:
team_id = t.get("id")
break
if team_id is None and code == 200 and isinstance(data, list):
for t in data:
if t.get("name") == team_name:
team_id = t.get("id")
break
self._team_id_cache[key] = team_id
return team_id
def is_team_member(self, team_id: int, login: str) -> bool | None:
"""Return True / False / None (unknown — 403 from API)."""
code, _ = self._req(
"GET", f"/teams/{team_id}/members/{urllib.parse.quote(login)}"
)
if code in (200, 204):
return True
if code == 404:
return False
# 403 means the token owner isn't in this team, so the API
# refuses to confirm membership. Fail-closed at the caller.
return None
def post_status(
self,
owner: str,
repo: str,
sha: str,
state: str,
context: str,
description: str,
target_url: str = "",
) -> None:
body = {
"state": state,
"context": context,
"description": description[:140], # Gitea truncates to 255 but be safe
"target_url": target_url or "",
}
code, data = self._req(
"POST",
f"/repos/{owner}/{repo}/statuses/{sha}",
body=body,
ok_codes=(201,),
)
if code not in (200, 201):
raise RuntimeError(
f"POST statuses/{sha} → HTTP {code}: {data!r}"
)
# ---------------------------------------------------------------------------
# Config loader (PyYAML-free — config file is intentionally tiny + flat)
# ---------------------------------------------------------------------------
def load_config(path: str) -> dict[str, Any]:
"""Load .gitea/sop-checklist-config.yaml.
Uses PyYAML if available, otherwise falls back to a built-in
minimal parser sufficient for our flat config shape. Bundling
PyYAML on the runner is one apt install away but we avoid the
dep by keeping the config shape constrained.
"""
try:
import yaml # type: ignore[import-not-found]
with open(path) as f:
return yaml.safe_load(f)
except ImportError:
return _load_config_minimal(path)
def _load_config_minimal(path: str) -> dict[str, Any]:
"""Minimal YAML subset parser for our config shape.
Supports: top-level scalar:value, top-level map-of-map (e.g.
tier_failure_mode), top-level list of maps (items:), and within an
item map: scalars + lists of scalars. Does NOT support nested lists,
YAML anchors, multi-doc, or flow style.
"""
with open(path) as f:
lines = f.readlines()
return _parse_minimal_yaml(lines)
def _parse_minimal_yaml(lines: list[str]) -> dict[str, Any]: # noqa: C901
"""Hand-rolled subset parser. See _load_config_minimal docstring."""
# Strip comments + blank lines but preserve indentation.
cleaned: list[tuple[int, str]] = []
for raw in lines:
# Don't strip a "#" that is inside a quoted value.
body = raw.rstrip("\n")
# Remove trailing comment.
idx = body.find("#")
if idx >= 0 and (idx == 0 or body[idx - 1] in " \t"):
body = body[:idx].rstrip()
if not body.strip():
continue
indent = len(body) - len(body.lstrip(" "))
cleaned.append((indent, body.strip()))
root: dict[str, Any] = {}
i = 0
n = len(cleaned)
def parse_scalar(s: str) -> Any:
s = s.strip()
if s.startswith('"') and s.endswith('"'):
return s[1:-1]
if s.startswith("'") and s.endswith("'"):
return s[1:-1]
if s.lower() in ("true", "yes"):
return True
if s.lower() in ("false", "no"):
return False
try:
return int(s)
except ValueError:
pass
return s
def parse_inline_list(s: str) -> list[Any]:
s = s.strip()
if not (s.startswith("[") and s.endswith("]")):
return [parse_scalar(s)]
inner = s[1:-1]
if not inner.strip():
return []
return [parse_scalar(x.strip()) for x in inner.split(",")]
while i < n:
indent, line = cleaned[i]
if indent != 0:
i += 1
continue
if ":" not in line:
i += 1
continue
key, _, rest = line.partition(":")
key = key.strip()
rest = rest.strip()
if rest == "":
# Block — could be map or list.
i += 1
# Look ahead for first child.
if i < n and cleaned[i][1].startswith("- "):
# List of items.
items: list[Any] = []
while i < n and cleaned[i][0] > indent and cleaned[i][1].startswith("- "):
item_indent = cleaned[i][0]
first_kv = cleaned[i][1][2:].strip() # strip "- "
item: dict[str, Any] = {}
if ":" in first_kv:
k, _, v = first_kv.partition(":")
k = k.strip()
v = v.strip()
if v == "":
item[k] = ""
elif v.startswith(">-") or v.startswith(">"):
# Folded scalar continues on subsequent indented lines
collected: list[str] = []
i += 1
while i < n and cleaned[i][0] > item_indent:
collected.append(cleaned[i][1])
i += 1
item[k] = " ".join(collected)
items.append(item)
continue
elif v.startswith("["):
item[k] = parse_inline_list(v)
else:
item[k] = parse_scalar(v)
i += 1
# Subsequent k:v lines at deeper indent belong to this item.
while i < n and cleaned[i][0] > item_indent and not cleaned[i][1].startswith("- "):
sub_indent, sub_line = cleaned[i]
if ":" in sub_line:
k, _, v = sub_line.partition(":")
k = k.strip()
v = v.strip()
if v == "":
item[k] = ""
i += 1
elif v.startswith(">-") or v.startswith(">"):
collected = []
i += 1
while i < n and cleaned[i][0] > sub_indent:
collected.append(cleaned[i][1])
i += 1
item[k] = " ".join(collected)
elif v.startswith("["):
item[k] = parse_inline_list(v)
i += 1
else:
item[k] = parse_scalar(v)
i += 1
else:
i += 1
items.append(item)
root[key] = items
else:
# Sub-map.
submap: dict[str, Any] = {}
while i < n and cleaned[i][0] > indent:
sub_indent, sub_line = cleaned[i]
if ":" in sub_line:
k, _, v = sub_line.partition(":")
k = k.strip().strip('"').strip("'")
v = v.strip()
if v.startswith("[") and v.endswith("]"):
submap[k] = parse_inline_list(v)
else:
submap[k] = parse_scalar(v)
i += 1
root[key] = submap
else:
# Inline scalar or list.
if rest.startswith("[") and rest.endswith("]"):
root[key] = parse_inline_list(rest)
else:
root[key] = parse_scalar(rest)
i += 1
return root
# ---------------------------------------------------------------------------
# Main entry point
# ---------------------------------------------------------------------------
def render_status(
items: list[dict[str, Any]],
ack_state: dict[str, dict[str, Any]],
body_state: dict[str, bool],
) -> tuple[str, str]:
"""Return (state, description) for the commit-status post.
state is "success" if every item has at least one valid ack
(body section presence is informational only peer-ack is the
real gate). tier:low PRs receive state="success" (soft-fail no
acks required); the description carries "[info tier:low]" prefix.
"""
n = len(items)
fully_acked = [
it["slug"] for it in items if ack_state[it["slug"]]["ackers"]
]
missing = [
it["slug"] for it in items if not ack_state[it["slug"]]["ackers"]
]
missing_body = [it["slug"] for it in items if not body_state.get(it["slug"], False)]
desc_parts = [f"acked: {len(fully_acked)}/{n}"]
if missing:
# Show up to 3 missing slugs to stay inside the 140-char budget.
shown = ", ".join(missing[:3])
if len(missing) > 3:
shown += f", +{len(missing) - 3}"
desc_parts.append(f"missing: {shown}")
if missing_body:
shown = ", ".join(missing_body[:3])
if len(missing_body) > 3:
shown += f", +{len(missing_body) - 3}"
desc_parts.append(f"body-unfilled: {shown}")
state = "success" if not missing and not missing_body else "failure"
return state, "".join(desc_parts)
def get_tier_mode(pr: dict[str, Any], cfg: dict[str, Any]) -> str:
"""Read tier label, return 'hard' or 'soft' per cfg.tier_failure_mode."""
labels = pr.get("labels") or []
tier_labels = [l.get("name", "") for l in labels if (l.get("name", "") or "").startswith("tier:")]
mode_map = cfg.get("tier_failure_mode") or {}
default_mode = cfg.get("default_mode", "hard")
for tl in tier_labels:
if tl in mode_map:
return mode_map[tl]
return default_mode
def main(argv: list[str] | None = None) -> int:
p = argparse.ArgumentParser()
p.add_argument("--owner", required=True)
p.add_argument("--repo", required=True)
p.add_argument("--pr", type=int, required=True)
p.add_argument("--config", default=".gitea/sop-checklist-config.yaml")
p.add_argument("--gitea-host", default="git.moleculesai.app")
p.add_argument(
"--dry-run",
action="store_true",
help="Compute state but do not POST the status.",
)
p.add_argument(
"--status-context",
default="sop-checklist / all-items-acked (pull_request)",
)
p.add_argument(
"--exit-on-state",
action="store_true",
help=(
"If set, exit non-zero when state=failure. Default OFF so the "
"job-level conclusion is independent of ack-state — the only "
"thing BP sees is the POSTed status. Useful for local debugging."
),
)
args = p.parse_args(argv)
token = os.environ.get("GITEA_TOKEN", "")
if not token and not args.dry_run:
print("::error::GITEA_TOKEN env required", file=sys.stderr)
return 2
cfg = load_config(args.config)
items: list[dict[str, Any]] = cfg["items"]
items_by_slug = {it["slug"]: it for it in items}
numeric_aliases = {
int(it["numeric_alias"]): it["slug"] for it in items if it.get("numeric_alias")
}
client = GiteaClient(args.gitea_host, token) if token else None
if not client:
print("::error::No client (dry-run without token has nothing to do)", file=sys.stderr)
return 2
pr = client.get_pr(args.owner, args.repo, args.pr)
if pr.get("state") != "open":
print(f"::notice::PR #{args.pr} is {pr.get('state')} — gate is a no-op")
return 0
author = (pr.get("user") or {}).get("login", "")
head_sha = (pr.get("head") or {}).get("sha", "")
body = pr.get("body", "") or ""
if not author or not head_sha:
print("::error::PR payload missing user.login or head.sha", file=sys.stderr)
return 1
comments = client.get_issue_comments(args.owner, args.repo, args.pr)
# Build team-membership probe closure that caches results per
# (user, team-id) so a user acking multiple items only triggers
# one membership lookup per team.
team_member_cache: dict[tuple[str, int], bool | None] = {}
def probe(slug: str, users: list[str]) -> list[str]:
item = items_by_slug[slug]
team_names: list[str] = item["required_teams"]
# Resolve names → ids. NOTE: orgs/{org}/teams/search may not be
# available — fall back to the list endpoint.
team_ids: list[int] = []
for tn in team_names:
tid = client.resolve_team_id(args.owner, tn)
if tid is None:
# Try the list endpoint as a fallback.
code, data = client._req( # noqa: SLF001
"GET", f"/orgs/{args.owner}/teams"
)
if code == 200 and isinstance(data, list):
for t in data:
if t.get("name") == tn:
tid = t.get("id")
client._team_id_cache[(args.owner, tn)] = tid # noqa: SLF001
break
if tid is not None:
team_ids.append(tid)
else:
print(
f"::warning::could not resolve team-id for '{tn}' "
f"in org '{args.owner}' — item '{slug}' will fail closed",
file=sys.stderr,
)
approved: list[str] = []
for u in users:
for tid in team_ids:
cache_key = (u, tid)
if cache_key not in team_member_cache:
team_member_cache[cache_key] = client.is_team_member(tid, u)
result = team_member_cache[cache_key]
if result is True:
approved.append(u)
break
if result is None:
print(
f"::warning::team-probe for {u} in team-id {tid} returned 403 "
"(token owner not in that team — fail-closed per RFC#324)",
file=sys.stderr,
)
# Treat as not-in-team for this user/team pair; loop
# may still find membership in another team.
return approved
ack_state = compute_ack_state(comments, author, items_by_slug, numeric_aliases, probe)
body_state = {it["slug"]: section_marker_present(body, it["pr_section_marker"]) for it in items}
state, description = render_status(items, ack_state, body_state)
mode = get_tier_mode(pr, cfg)
if mode == "soft":
# tier:low: acks are informational only — post success so BP gate passes.
# Description carries "[info tier:low]" prefix so reviewers know acks
# were not required (vs a tier:medium+ PR that truly passed all acks).
state = "success"
description = f"[info tier:low] {description}"
# Diagnostics to job log.
print(f"::notice::PR #{args.pr} author={author} head={head_sha[:7]} mode={mode}")
for it in items:
slug = it["slug"]
ackers = ack_state[slug]["ackers"]
if ackers:
print(f"::notice:: [PASS] {slug} — acked by {','.join(ackers)}")
else:
r = ack_state[slug]["rejected"]
extras: list[str] = []
if r["self_ack"]:
extras.append(f"self-acks-rejected:{','.join(r['self_ack'])}")
if r["not_in_team"]:
extras.append(f"not-in-team:{','.join(r['not_in_team'])}")
extra = " (" + "; ".join(extras) + ")" if extras else ""
print(f"::notice:: [WAIT] {slug} — no valid peer-ack yet{extra}")
print(f"::notice::posting status: state={state} desc={description!r}")
if args.dry_run:
print("::notice::--dry-run: not posting status")
if args.exit_on_state:
return 0 if state in ("success", "pending") else 1
return 0
target_url = f"https://{args.gitea_host}/{args.owner}/{args.repo}/pulls/{args.pr}"
client.post_status(
args.owner, args.repo, head_sha,
state=state, context=args.status_context,
description=description, target_url=target_url,
)
print(f"::notice::status posted: {args.status_context}{state}")
# By default exit 0 — the POSTed status IS the gate, NOT the job
# conclusion. If the job exits 1 BP will see TWO failure signals
# (one from the job's auto-status, one from our POST), making the
# description less actionable. --exit-on-state restores the old
# behavior for local debugging.
if args.exit_on_state:
return 0 if state in ("success", "pending") else 1
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -96,27 +96,16 @@ API="https://${GITEA_HOST}/api/v1"
AUTH="Authorization: token ${GITEA_TOKEN}"
echo "::notice::tier-check start: repo=$OWNER/$NAME pr=$PR_NUMBER author=$PR_AUTHOR"
# Sanity: token resolves to a user.
# Use || true on the jq pipeline so that set -euo pipefail (line 45) does not
# cause the script to exit prematurely when the token is empty/invalid — the
# if check below handles that case gracefully. Without || true, a 401 from an
# empty/invalid token causes jq to exit 1, triggering set -e and exiting the
# entire script before SOP_FAIL_OPEN can be evaluated (the check is in the jq-
# install block; if jq is already on PATH, that block is skipped entirely).
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""') || true
# Sanity: token resolves to a user
WHOAMI=$(curl -sS -H "$AUTH" "${API}/user" | jq -r '.login // ""')
if [ -z "$WHOAMI" ]; then
echo "::error::GITEA_TOKEN cannot resolve a user via /api/v1/user — check the token scope and that the secret is wired correctly."
if [ "${SOP_FAIL_OPEN:-}" = "1" ]; then
echo "::warning::SOP_FAIL_OPEN=1 — exiting 0 so CI does not block."
exit 0
fi
exit 1
fi
echo "::notice::token resolves to user: $WHOAMI"
# 1. Read tier label. || true ensures set -euo pipefail does not abort the
# script if curl or jq fails (e.g. 401 from empty token).
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name') || true
# 1. Read tier label
LABELS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/issues/${PR_NUMBER}/labels" | jq -r '.[].name')
TIER=""
for L in $LABELS; do
case "$L" in
@ -187,25 +176,17 @@ fi
# 4. Resolve all team names → IDs
# /orgs/{org}/teams/{slug}/... endpoints don't exist on Gitea 1.22;
# we use /teams/{id}.
# set +e prevents set -e from aborting the script if curl fails (e.g. empty token).
ORG_TEAMS_FILE=$(mktemp)
trap 'rm -f "$ORG_TEAMS_FILE"' EXIT
set +e
HTTP_CODE=$(curl -sS -o "$ORG_TEAMS_FILE" -w '%{http_code}' -H "$AUTH" \
"${API}/orgs/${OWNER}/teams")
_HTTP_EXIT=$?
set -e
debug "teams-list HTTP=$HTTP_CODE (curl exit=$_HTTP_EXIT) size=$(wc -c <"$ORG_TEAMS_FILE")"
debug "teams-list HTTP=$HTTP_CODE size=$(wc -c <"$ORG_TEAMS_FILE")"
if [ "${SOP_DEBUG:-}" = "1" ]; then
echo " [debug] teams-list body (first 300 chars):" >&2
head -c 300 "$ORG_TEAMS_FILE" >&2; echo >&2
fi
if [ "$_HTTP_EXIT" -ne 0 ] || [ "$HTTP_CODE" != "200" ]; then
echo "::error::GET /orgs/${OWNER}/teams failed (curl exit=$_HTTP_EXIT HTTP=$HTTP_CODE) — token may lack read:org scope or be invalid."
if [ "${SOP_FAIL_OPEN:-}" = "1" ]; then
echo "::warning::SOP_FAIL_OPEN=1 — exiting 0 so CI does not block."
exit 0
fi
if [ "$HTTP_CODE" != "200" ]; then
echo "::error::GET /orgs/${OWNER}/teams returned HTTP $HTTP_CODE — token likely lacks read:org scope."
exit 1
fi
@ -250,22 +231,9 @@ for _t in $_all_teams; do
debug "team-id: $_t$_id"
done
# 5. Read approving reviewers. set +e disables set -e temporarily so that curl
# failures (e.g. empty/invalid token → HTTP 401) do not abort the script before
# SOP_FAIL_OPEN is evaluated. set -e is restored immediately after.
set +e
# 5. Read approving reviewers
REVIEWS=$(curl -sS -H "$AUTH" "${API}/repos/${OWNER}/${NAME}/pulls/${PR_NUMBER}/reviews")
_REVIEWS_EXIT=$?
set -e
if [ $_REVIEWS_EXIT -ne 0 ] || [ -z "$REVIEWS" ]; then
echo "::error::Failed to fetch reviews (curl exit=$_REVIEWS_EXIT) — token may be invalid or unreachable."
if [ "${SOP_FAIL_OPEN:-}" = "1" ]; then
echo "::warning::SOP_FAIL_OPEN=1 — exiting 0 so CI does not block."
exit 0
fi
exit 1
fi
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]') || true
APPROVERS=$(echo "$REVIEWS" | jq -r '[.[] | select(.state=="APPROVED") | .user.login] | unique | .[]')
if [ -z "$APPROVERS" ]; then
echo "::error::No approving reviews on this PR. Set SOP_DEBUG=1 and re-run for diagnostics."
exit 1

View File

@ -19,18 +19,13 @@ What this script does, per `.gitea/workflows/status-reaper.yml` invocation:
downstream Gitea uses ` / ` as the workflow/job separator).
Classify each by whether `on:` contains a `push:` trigger.
2. List the last N (=30, rev3 widened from 10) commits on
WATCH_BRANCH via GET /repos/{o}/{r}/commits?sha={branch}&limit={N}.
rev2 sweeps N commits per tick instead of HEAD only schedule
workflows post `failure` to whatever SHA was HEAD when they
COMPLETED, so by the next */5 tick main has often moved forward
and the red gets stranded on a stale commit. rev3 widens the
window from 10 30 because schedule workflows post `failure`
RETROACTIVELY (5-15 min after their merge); a 10-commit window
is narrower than the merge-cadence during a burst, so reds land
OUTSIDE the window before reaper sees them (Phase 1+2 evidence:
rev2 run 17057 at 02:46Z saw 185/0 contexts on 10 SHAs; direct
probe ~30min later showed ~25 fails on those same 10 SHAs).
2. List the last N (=10) commits on WATCH_BRANCH via
GET /repos/{o}/{r}/commits?sha={branch}&limit={N}. rev2 sweeps
N commits per tick instead of HEAD only schedule workflows
post `failure` to whatever SHA was HEAD when they COMPLETED, so
by the next */5 tick main has often moved forward and the red
gets stranded on a stale commit (Phase 1+2 evidence: rev1 saw
`compensated:0` every tick across ~6 cycles).
3. For EACH SHA in the list:
- GET combined commit status. Per-SHA error isolation
@ -452,18 +447,7 @@ def reap(
if not isinstance(s, dict):
continue
context = s.get("context") or ""
# Schema asymmetry: Gitea 1.22.6 returns the TOP-LEVEL combined
# aggregate as `combined.state` but each per-context entry in
# `combined.statuses[]` uses the key `status`, NOT `state`.
# Prefer `status`; fall back to `state` so a future Gitea
# version (or a test fixture written against the wrong key)
# still flows through the compensation path. Verified empirically
# via direct API probe 2026-05-12 03:42Z:
# /repos/.../commits/{sha}/status entries → key is "status".
# Pre-rev4 code read "state" only → returned "" → bypassed the
# `state != "failure"` guard → compensation path unreachable.
# See `feedback_smoke_test_vendor_truth_not_shape_match`.
state = s.get("status") or s.get("state") or ""
state = s.get("state") or ""
# Only `failure` is the bug shape. `error`/`pending`/`success`
# left alone — they have other meanings.
@ -518,17 +502,7 @@ def reap(
# already stale enough that the schedule-run that posted them has long
# since been overwritten by a real push trigger. See `reference_post_
# suspension_pipeline` for the merge-cadence baseline.
#
# rev3 (2026-05-12, hongming-pc2 GO 03:25Z): widened from 10 → 30.
# rev2 (limit=10) shipped 01:48Z and ran 6/6 ticks post-merge with
# `compensated:0` despite ~25 stranded reds visible on those same 10
# SHAs ~30min later. Root cause: schedule workflows post `failure`
# RETROACTIVELY 5-15 min after their merge, so by the time reaper's
# next */5 tick lands, the stranded red is on a SHA that has already
# fallen out of a 10-commit window during a burst-merge period.
# Trades window-width-cheap for cadence-loady (per hongming-pc2):
# kept `*/5` cron unchanged; only the window-N is widened.
DEFAULT_SWEEP_LIMIT = 30
DEFAULT_SWEEP_LIMIT = 10
def list_recent_commit_shas(branch: str, limit: int) -> list[str]:

View File

@ -1,114 +0,0 @@
import importlib.util
import sys
from pathlib import Path
SCRIPT = Path(__file__).resolve().parents[1] / "gitea-merge-queue.py"
spec = importlib.util.spec_from_file_location("gitea_merge_queue", SCRIPT)
mq = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = mq
spec.loader.exec_module(mq)
def test_latest_statuses_dedupes_by_context_newest_first():
statuses = [
{"context": "CI / all-required (pull_request)", "status": "failure"},
{"context": "sop-checklist / all-items-acked (pull_request)", "state": "success"},
{"context": "CI / all-required (pull_request)", "status": "success"},
]
latest = mq.latest_statuses_by_context(statuses)
assert latest["CI / all-required (pull_request)"]["status"] == "failure"
assert latest["sop-checklist / all-items-acked (pull_request)"]["state"] == "success"
def test_required_contexts_green_rejects_missing_and_pending():
latest = mq.latest_statuses_by_context([
{"context": "CI / all-required (pull_request)", "status": "success"},
{"context": "sop-checklist / all-items-acked (pull_request)", "status": "pending"},
])
ok, missing_or_bad = mq.required_contexts_green(
latest,
[
"CI / all-required (pull_request)",
"sop-checklist / all-items-acked (pull_request)",
"qa-review / approved (pull_request)",
],
)
assert ok is False
assert missing_or_bad == [
"sop-checklist / all-items-acked (pull_request)=pending",
"qa-review / approved (pull_request)=missing",
]
def test_choose_next_pr_sorts_by_queue_label_timestamp_then_number():
issues = [
{
"number": 12,
"pull_request": {},
"labels": [{"name": "merge-queue"}],
"created_at": "2026-05-13T05:00:00Z",
"updated_at": "2026-05-13T06:00:00Z",
},
{
"number": 9,
"pull_request": {},
"labels": [{"name": "merge-queue"}],
"created_at": "2026-05-13T04:00:00Z",
"updated_at": "2026-05-13T07:00:00Z",
},
{
"number": 7,
"labels": [{"name": "merge-queue"}],
"created_at": "2026-05-13T03:00:00Z",
},
]
selected = mq.choose_next_queued_issue(issues, queue_label="merge-queue")
assert selected["number"] == 9
def test_pr_needs_update_when_base_sha_absent_from_commits():
commits = [
{"sha": "head"},
{"sha": "parent"},
]
assert mq.pr_contains_base_sha(commits, "mainsha") is False
assert mq.pr_contains_base_sha(commits, "parent") is True
def test_merge_decision_requires_main_green_pr_green_and_current_base():
required = ["CI / all-required (pull_request)"]
main_status = {"state": "success", "statuses": []}
pr_status = {
"state": "success",
"statuses": [{"context": "CI / all-required (pull_request)", "status": "success"}],
}
decision = mq.evaluate_merge_readiness(
main_status=main_status,
pr_status=pr_status,
required_contexts=required,
pr_has_current_base=True,
)
assert decision.ready is True
assert decision.action == "merge"
def test_merge_decision_updates_stale_pr_before_merge():
decision = mq.evaluate_merge_readiness(
main_status={"state": "success", "statuses": []},
pr_status={"state": "success", "statuses": [{"context": "CI / all-required (pull_request)", "status": "success"}]},
required_contexts=["CI / all-required (pull_request)"],
pr_has_current_base=False,
)
assert decision.ready is False
assert decision.action == "update"

View File

@ -1,505 +0,0 @@
"""Unit tests for .gitea/scripts/lint_pre_flip_continue_on_error.py.
These tests pin the pure-logic surface (flip detection + per-flip
verdict aggregation) without making real HTTP calls. The end-to-end
git ls-tree + Gitea API path is exercised by running the workflow
against real PRs.
Run locally::
python3 -m unittest .gitea/scripts/tests/test_lint_pre_flip_continue_on_error.py -v
Mirrors the pattern in scripts/ops/test_check_migration_collisions.py
+ scripts/test_build_runtime_package.py.
"""
from __future__ import annotations
import importlib.util
import os
import sys
import unittest
from pathlib import Path
from unittest import mock
# Load the script as a module without invoking main(). Tests must NOT
# depend on the full runtime env contract (GITEA_TOKEN etc.), so we
# import individual functions and stub the network surface explicitly.
SCRIPT_PATH = Path(__file__).resolve().parent.parent / "lint_pre_flip_continue_on_error.py"
spec = importlib.util.spec_from_file_location("lpfc", SCRIPT_PATH)
lpfc = importlib.util.module_from_spec(spec)
spec.loader.exec_module(lpfc)
# --------------------------------------------------------------------------
# Fixtures: minimal valid workflow YAML on each side of a "diff"
# --------------------------------------------------------------------------
CI_YML_BASE = """\
name: CI
on:
push:
branches: [main]
jobs:
platform-build:
name: Platform (Go)
runs-on: ubuntu-latest
continue-on-error: true
steps:
- run: echo platform
canvas-build:
name: Canvas (Next.js)
runs-on: ubuntu-latest
continue-on-error: true
steps:
- run: echo canvas
all-required:
runs-on: ubuntu-latest
continue-on-error: true
needs: [platform-build, canvas-build]
steps:
- run: echo ok
"""
CI_YML_HEAD_FLIPPED = """\
name: CI
on:
push:
branches: [main]
jobs:
platform-build:
name: Platform (Go)
runs-on: ubuntu-latest
continue-on-error: false
steps:
- run: echo platform
canvas-build:
name: Canvas (Next.js)
runs-on: ubuntu-latest
continue-on-error: false
steps:
- run: echo canvas
all-required:
runs-on: ubuntu-latest
continue-on-error: true
needs: [platform-build, canvas-build]
steps:
- run: echo ok
"""
CI_YML_HEAD_NO_DIFF = CI_YML_BASE # identical to base, no flip
# --------------------------------------------------------------------------
# 1. CoE coercion (truthy/falsy/quoted/absent)
# --------------------------------------------------------------------------
class TestCoerceCoE(unittest.TestCase):
def test_python_bool_true(self):
self.assertTrue(lpfc._coerce_coe(True))
def test_python_bool_false(self):
self.assertFalse(lpfc._coerce_coe(False))
def test_none_is_false(self):
# GitHub Actions default: absent == false.
self.assertFalse(lpfc._coerce_coe(None))
def test_string_true_lowercase(self):
# Quoted "true" in YAML — Gitea Actions normalizes to True.
self.assertTrue(lpfc._coerce_coe("true"))
def test_string_True_titlecase(self):
self.assertTrue(lpfc._coerce_coe("True"))
def test_string_yes(self):
# YAML 1.1 truthy form.
self.assertTrue(lpfc._coerce_coe("yes"))
def test_string_false(self):
self.assertFalse(lpfc._coerce_coe("false"))
def test_string_random_falsy(self):
# An unrecognized string is treated as falsy — safer than
# silently coercing "maybe" to True and false-positiving a
# flip.
self.assertFalse(lpfc._coerce_coe("maybe"))
# --------------------------------------------------------------------------
# 2. Diff detection — flips, not arbitrary changes
# --------------------------------------------------------------------------
class TestDetectFlips(unittest.TestCase):
def test_no_flip_in_diff_passes(self):
# Acceptance test #1: PR doesn't flip continue-on-error → 0 flips.
flips = lpfc.detect_flips(
{".gitea/workflows/ci.yml": CI_YML_BASE},
{".gitea/workflows/ci.yml": CI_YML_HEAD_NO_DIFF},
)
self.assertEqual(flips, [])
def test_flip_detected_in_one_file(self):
flips = lpfc.detect_flips(
{".gitea/workflows/ci.yml": CI_YML_BASE},
{".gitea/workflows/ci.yml": CI_YML_HEAD_FLIPPED},
)
# Two jobs flipped: platform-build, canvas-build. all-required
# is still true on both sides.
self.assertEqual(len(flips), 2)
keys = sorted(f["job_key"] for f in flips)
self.assertEqual(keys, ["canvas-build", "platform-build"])
def test_context_name_render(self):
flips = lpfc.detect_flips(
{".gitea/workflows/ci.yml": CI_YML_BASE},
{".gitea/workflows/ci.yml": CI_YML_HEAD_FLIPPED},
)
platform = next(f for f in flips if f["job_key"] == "platform-build")
self.assertEqual(platform["context"], "CI / Platform (Go) (push)")
self.assertEqual(platform["workflow_name"], "CI")
def test_context_falls_back_to_job_key_when_no_name(self):
base = "name: WF\njobs:\n foo:\n continue-on-error: true\n runs-on: x\n steps: []\n"
head = "name: WF\njobs:\n foo:\n continue-on-error: false\n runs-on: x\n steps: []\n"
flips = lpfc.detect_flips({"a.yml": base}, {"a.yml": head})
self.assertEqual(len(flips), 1)
self.assertEqual(flips[0]["context"], "WF / foo (push)")
def test_no_flip_when_only_one_side_has_file(self):
# Newly added workflow file — head has CoE:false, base has no
# file. Adding a new workflow with CoE:false is fine; there's
# nothing to mask.
flips = lpfc.detect_flips(
{}, # base has no workflow files
{".gitea/workflows/new.yml": CI_YML_HEAD_FLIPPED},
)
self.assertEqual(flips, [])
def test_no_flip_when_job_removed(self):
# Job exists on base, not on head — a removal, not a flip.
head = """\
name: CI
jobs:
canvas-build:
name: Canvas (Next.js)
continue-on-error: true
runs-on: ubuntu-latest
steps: []
"""
flips = lpfc.detect_flips(
{".gitea/workflows/ci.yml": CI_YML_BASE},
{".gitea/workflows/ci.yml": head},
)
self.assertEqual(flips, [])
def test_no_flip_when_job_added_with_false(self):
# New job on head with CoE:false — no base side; not a flip.
head_with_new = CI_YML_BASE.replace(
" all-required:",
" newjob:\n name: New Job\n continue-on-error: false\n"
" runs-on: x\n steps: []\n"
" all-required:",
)
flips = lpfc.detect_flips(
{".gitea/workflows/ci.yml": CI_YML_BASE},
{".gitea/workflows/ci.yml": head_with_new},
)
self.assertEqual(flips, [])
def test_yaml_parse_error_warns_not_raises(self):
# Malformed YAML on head — should warn (stderr) and skip,
# not raise.
bad_head = "name: CI\njobs:\n :::\n"
# Capture stderr so the test isn't noisy.
with mock.patch.object(sys, "stderr"):
flips = lpfc.detect_flips(
{".gitea/workflows/ci.yml": CI_YML_BASE},
{".gitea/workflows/ci.yml": bad_head},
)
self.assertEqual(flips, [])
# --------------------------------------------------------------------------
# 3. grep_fail_markers — the regex / substring matcher
# --------------------------------------------------------------------------
class TestGrepFailMarkers(unittest.TestCase):
def test_clean_log_returns_empty(self):
log = "===== test run starting =====\nPASS\nok example.com/foo 1.234s\n"
self.assertEqual(lpfc.grep_fail_markers(log), [])
def test_go_minus_minus_minus_fail_caught(self):
log = "ok example.com/foo 1.234s\n--- FAIL: TestBar (0.01s)\n bar_test.go:42:\n"
matches = lpfc.grep_fail_markers(log)
self.assertEqual(len(matches), 1)
self.assertIn("FAIL: TestBar", matches[0])
def test_go_package_fail_caught(self):
log = "FAIL\texample.com/baz\t1.234s\n"
matches = lpfc.grep_fail_markers(log)
self.assertEqual(len(matches), 1)
self.assertIn("FAIL", matches[0])
def test_bash_error_directive_caught(self):
# `lint-curl-status-capture` pattern: a python heredoc inside a
# bash step that prints `::error::` then sys.exit(1). With
# continue-on-error:true the job rolls up as success despite
# this line. THAT's the masking we're trying to catch.
log = "Running scan...\n::error::Found 3 curl-status-capture pollution site(s):\n"
matches = lpfc.grep_fail_markers(log)
self.assertEqual(len(matches), 1)
self.assertIn("::error::", matches[0])
def test_caps_matches_at_max_5(self):
log = "\n".join(["--- FAIL: T%d" % i for i in range(20)])
matches = lpfc.grep_fail_markers(log)
self.assertEqual(len(matches), 5)
# --------------------------------------------------------------------------
# 4. verify_flip — single-flip verdict assembly (network surface stubbed)
# --------------------------------------------------------------------------
def _stub_status(context: str, state: str, target_url: str = "/owner/repo/actions/runs/1/jobs/0") -> dict:
"""Build a single-context combined-status response."""
return {
"state": state,
"statuses": [
{"context": context, "status": state, "target_url": target_url, "description": ""}
],
}
FLIP_FIXTURE = {
"workflow_path": ".gitea/workflows/ci.yml",
"workflow_name": "CI",
"job_key": "platform-build",
"job_name": "Platform (Go)",
"context": "CI / Platform (Go) (push)",
}
class TestVerifyFlip(unittest.TestCase):
def test_flip_with_clean_history_passes(self):
# Acceptance test #2: flip detected, last 5 runs clean → exit 0.
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=["sha1", "sha2", "sha3"]):
with mock.patch.object(
lpfc, "combined_status",
side_effect=[_stub_status(FLIP_FIXTURE["context"], "success") for _ in range(3)],
):
with mock.patch.object(lpfc, "fetch_log", return_value="ok example.com/foo 1s\nPASS\n"):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(verdict["fail_runs"], [])
self.assertEqual(verdict["masked_runs"], [])
self.assertEqual(verdict["checked_commits"], 3)
self.assertEqual(verdict["warnings"], [])
def test_flip_with_recent_fail_blocks(self):
# Acceptance test #3: flip detected, recent run has --- FAIL → exit 1.
# Setup: 3 commits, the most recent run's log shows --- FAIL
# but the STATUS is success (Quirk #10 mask). That's the
# masked_runs case.
log_with_fail = "ok example.com/foo 1s\n--- FAIL: TestSqlmock (0.01s)\n sqlmock_test.go:42:\n"
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=["sha1", "sha2", "sha3"]):
with mock.patch.object(
lpfc, "combined_status",
side_effect=[_stub_status(FLIP_FIXTURE["context"], "success") for _ in range(3)],
):
with mock.patch.object(lpfc, "fetch_log", side_effect=[log_with_fail, "PASS\n", "PASS\n"]):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(len(verdict["masked_runs"]), 1)
self.assertEqual(verdict["masked_runs"][0]["sha"], "sha1")
self.assertTrue(any("TestSqlmock" in s for s in verdict["masked_runs"][0]["samples"]))
self.assertEqual(verdict["fail_runs"], [])
def test_red_status_alone_blocks(self):
# Status itself is `failure` — block without needing log
# markers. (Belt-and-braces: even with a clean log, a `failure`
# status means the job's exit code was non-zero.)
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=["sha1"]):
with mock.patch.object(
lpfc, "combined_status",
return_value=_stub_status(FLIP_FIXTURE["context"], "failure"),
):
with mock.patch.object(lpfc, "fetch_log", return_value="some unrelated text\n"):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(len(verdict["fail_runs"]), 1)
self.assertEqual(verdict["fail_runs"][0]["status"], "failure")
def test_unreadable_log_warns_not_blocks(self):
# Acceptance test #5: log fetch 404 (None) → warn, not block.
# Status is `success`, log is None — we can't tell, so we warn
# and allow.
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=["sha1"]):
with mock.patch.object(
lpfc, "combined_status",
return_value=_stub_status(FLIP_FIXTURE["context"], "success"),
):
with mock.patch.object(lpfc, "fetch_log", return_value=None):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(verdict["fail_runs"], [])
self.assertEqual(verdict["masked_runs"], [])
self.assertTrue(any("log unavailable" in w for w in verdict["warnings"]))
def test_unreadable_log_with_failure_status_still_blocks(self):
# Edge case: log fetch fails BUT the status itself is `failure`.
# We can still block — the status alone is sufficient signal,
# we don't need the log to confirm.
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=["sha1"]):
with mock.patch.object(
lpfc, "combined_status",
return_value=_stub_status(FLIP_FIXTURE["context"], "failure"),
):
with mock.patch.object(lpfc, "fetch_log", return_value=None):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(len(verdict["fail_runs"]), 1)
self.assertIn("log unavailable", verdict["fail_runs"][0]["samples"][0])
def test_zero_runs_history_warns_allows(self):
# No commits with a matching context — newly added workflow.
# Allow with warning.
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=["sha1", "sha2"]):
with mock.patch.object(
lpfc, "combined_status",
return_value={"state": "success", "statuses": []}, # no matching context
):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(verdict["checked_commits"], 0)
self.assertEqual(verdict["fail_runs"], [])
self.assertEqual(verdict["masked_runs"], [])
self.assertTrue(any("no runs of" in w for w in verdict["warnings"]))
def test_zero_commits_warns_allows(self):
# Empty branch (newly created repo, e.g.). Allow with warning.
with mock.patch.object(lpfc, "recent_commits_on_branch", return_value=[]):
verdict = lpfc.verify_flip(FLIP_FIXTURE, "main", 5)
self.assertEqual(verdict["checked_commits"], 0)
self.assertEqual(verdict["fail_runs"], [])
self.assertEqual(verdict["masked_runs"], [])
self.assertTrue(any("no recent commits" in w for w in verdict["warnings"]))
# --------------------------------------------------------------------------
# 5. Multiple-flip aggregation in main()
# --------------------------------------------------------------------------
class TestMainAggregation(unittest.TestCase):
"""Tests that `main()` aggregates multiple flips and exits 1 when
ANY one of them has a masked or red recent run. Acceptance test #4.
We stub at the verify_flip + workflows_at_sha + _require_runtime_env
boundary so we don't need real git or HTTP.
"""
def setUp(self):
# The actual env values are irrelevant — _require_runtime_env
# is stubbed out — but the module reads OWNER/NAME at import
# time. Patch the runtime env contract to a no-op for the
# duration of each test.
self._patches = [
mock.patch.object(lpfc, "_require_runtime_env", return_value=None),
mock.patch.object(lpfc, "BASE_REF", "main"),
mock.patch.object(lpfc, "BASE_SHA", "deadbeefcafe"),
mock.patch.object(lpfc, "HEAD_SHA", "feedfaceabad"),
mock.patch.object(lpfc, "RECENT_COMMITS_N", 5),
]
for p in self._patches:
p.start()
self.addCleanup(lambda: [p.stop() for p in self._patches])
def test_multiple_flips_aggregated_one_bad_blocks(self):
# PR flips 3 jobs; 1 has a recent fail → exit 1, naming that job.
flips = [
{"workflow_path": ".gitea/workflows/ci.yml", "workflow_name": "CI",
"job_key": "platform-build", "job_name": "Platform (Go)",
"context": "CI / Platform (Go) (push)"},
{"workflow_path": ".gitea/workflows/ci.yml", "workflow_name": "CI",
"job_key": "canvas-build", "job_name": "Canvas (Next.js)",
"context": "CI / Canvas (Next.js) (push)"},
{"workflow_path": ".gitea/workflows/ci.yml", "workflow_name": "CI",
"job_key": "python-lint", "job_name": "Python Lint & Test",
"context": "CI / Python Lint & Test (push)"},
]
clean = {"flip": flips[0], "checked_commits": 5, "masked_runs": [],
"fail_runs": [], "warnings": []}
bad = {"flip": flips[1], "checked_commits": 5,
"masked_runs": [{"sha": "abc1234567", "status": "success",
"target_url": "/x/y/actions/runs/1/jobs/0",
"samples": ["--- FAIL: TestSqlmock"]}],
"fail_runs": [], "warnings": []}
also_clean = {"flip": flips[2], "checked_commits": 5, "masked_runs": [],
"fail_runs": [], "warnings": []}
with mock.patch.object(lpfc, "workflows_at_sha", return_value={}):
with mock.patch.object(lpfc, "detect_flips", return_value=flips):
with mock.patch.object(lpfc, "verify_flip",
side_effect=[clean, bad, also_clean]):
# Capture stdout to assert on naming.
captured = []
with mock.patch("builtins.print", side_effect=lambda *a, **k: captured.append(" ".join(str(x) for x in a))):
rc = lpfc.main([])
self.assertEqual(rc, 1)
# The blocking error message must name the failing job.
joined = "\n".join(captured)
self.assertIn("canvas-build", joined)
# And it must mention the empirical class so a reviewer can
# cross-link the right RFC.
self.assertTrue("mc#664" in joined or "PR#656" in joined)
def test_no_flips_in_diff_exits_zero(self):
# Acceptance test #1 at main() level: empty flips → exit 0.
with mock.patch.object(lpfc, "workflows_at_sha", return_value={}):
with mock.patch.object(lpfc, "detect_flips", return_value=[]):
rc = lpfc.main([])
self.assertEqual(rc, 0)
def test_all_flips_clean_exits_zero(self):
flips = [{"workflow_path": ".gitea/workflows/ci.yml", "workflow_name": "CI",
"job_key": "platform-build", "job_name": "Platform (Go)",
"context": "CI / Platform (Go) (push)"}]
clean = {"flip": flips[0], "checked_commits": 5, "masked_runs": [],
"fail_runs": [], "warnings": []}
with mock.patch.object(lpfc, "workflows_at_sha", return_value={}):
with mock.patch.object(lpfc, "detect_flips", return_value=flips):
with mock.patch.object(lpfc, "verify_flip", return_value=clean):
rc = lpfc.main([])
self.assertEqual(rc, 0)
def test_dry_run_forces_exit_zero_even_with_bad_flip(self):
# --dry-run never fails, even when verification finds masked runs.
flips = [{"workflow_path": ".gitea/workflows/ci.yml", "workflow_name": "CI",
"job_key": "platform-build", "job_name": "Platform (Go)",
"context": "CI / Platform (Go) (push)"}]
bad = {"flip": flips[0], "checked_commits": 5,
"masked_runs": [{"sha": "abc1234567", "status": "success",
"target_url": "/x/y/actions/runs/1/jobs/0",
"samples": ["--- FAIL: TestSqlmock"]}],
"fail_runs": [], "warnings": []}
with mock.patch.object(lpfc, "workflows_at_sha", return_value={}):
with mock.patch.object(lpfc, "detect_flips", return_value=flips):
with mock.patch.object(lpfc, "verify_flip", return_value=bad):
rc = lpfc.main(["--dry-run"])
self.assertEqual(rc, 0)
# --------------------------------------------------------------------------
# 6. Context-name rendering (the format Gitea Actions actually emits)
# --------------------------------------------------------------------------
class TestContextName(unittest.TestCase):
def test_push_event(self):
self.assertEqual(
lpfc.context_name("CI", "Platform (Go)", "push"),
"CI / Platform (Go) (push)",
)
def test_pull_request_event(self):
self.assertEqual(
lpfc.context_name("CI", "Platform (Go)", "pull_request"),
"CI / Platform (Go) (pull_request)",
)
def test_workflow_name_falls_back_to_filename(self):
# No top-level `name:` → falls back to filename minus extension.
doc = {"jobs": {"foo": {"continue-on-error": True}}}
self.assertEqual(
lpfc.workflow_name(doc, fallback="my-workflow"),
"my-workflow",
)
if __name__ == "__main__":
unittest.main()

View File

@ -1,550 +0,0 @@
#!/usr/bin/env python3
# Unit tests for sop-checklist-gate.py
#
# Run: python3 .gitea/scripts/tests/test_sop_checklist_gate.py
# or: pytest .gitea/scripts/tests/test_sop_checklist_gate.py
#
# RFC#351 Step 2 of 6 — implementation MVP. Tests cover:
# - slug normalization (the 4 example variants in the script header)
# - parse_directives (ack, revoke, with/without note, mid-comment, etc.)
# - section_marker_present (empty answer rejected, filled answer ok)
# - compute_ack_state (self-ack rejected, team probe applied, revoke
# invalidates own prior ack, peer's ack survives unrevoked)
# - render_status (state + description format)
# - get_tier_mode (label-driven, default fallback)
# - load_config (default config parses cleanly with both PyYAML and
# the bundled minimal parser)
#
# All tests run WITHOUT touching the Gitea API — the team-probe
# callable is dependency-injected.
from __future__ import annotations
import os
import sys
import tempfile
import unittest
# Resolve sibling script regardless of where pytest is invoked from.
HERE = os.path.dirname(os.path.abspath(__file__))
PARENT = os.path.dirname(HERE) # .gitea/scripts
sys.path.insert(0, PARENT)
import importlib.util # noqa: E402
_spec = importlib.util.spec_from_file_location(
"sop_checklist_gate", os.path.join(PARENT, "sop-checklist-gate.py")
)
sop = importlib.util.module_from_spec(_spec)
_spec.loader.exec_module(sop) # type: ignore[union-attr]
# ---------------------------------------------------------------------------
# Test fixtures
# ---------------------------------------------------------------------------
CONFIG_PATH = os.path.join(PARENT, "..", "sop-checklist-config.yaml")
def _items() -> list[dict]:
cfg = sop.load_config(CONFIG_PATH)
return cfg["items"]
def _items_by_slug() -> dict[str, dict]:
return {it["slug"]: it for it in _items()}
def _numeric_aliases() -> dict[int, str]:
return {
int(it["numeric_alias"]): it["slug"]
for it in _items()
if it.get("numeric_alias")
}
def _comment(user: str, body: str) -> dict:
return {"user": {"login": user}, "body": body}
# ---------------------------------------------------------------------------
# normalize_slug
# ---------------------------------------------------------------------------
class TestNormalizeSlug(unittest.TestCase):
def test_kebab_already(self):
self.assertEqual(sop.normalize_slug("comprehensive-testing"), "comprehensive-testing")
def test_underscore_to_dash(self):
self.assertEqual(sop.normalize_slug("comprehensive_testing"), "comprehensive-testing")
def test_space_to_dash(self):
self.assertEqual(sop.normalize_slug("comprehensive testing"), "comprehensive-testing")
def test_uppercase_to_lower(self):
self.assertEqual(sop.normalize_slug("Comprehensive-Testing"), "comprehensive-testing")
def test_mixed_separators(self):
self.assertEqual(sop.normalize_slug("Comprehensive_Testing"), "comprehensive-testing")
self.assertEqual(sop.normalize_slug("FIVE_axis review"), "five-axis-review")
def test_collapse_repeated_dashes(self):
self.assertEqual(sop.normalize_slug("comprehensive--testing"), "comprehensive-testing")
self.assertEqual(sop.normalize_slug("comprehensive testing"), "comprehensive-testing")
def test_strip_trailing_punctuation(self):
self.assertEqual(sop.normalize_slug("comprehensive-testing."), "comprehensive-testing")
self.assertEqual(sop.normalize_slug("comprehensive-testing!"), "comprehensive-testing")
def test_numeric_shorthand_known(self):
self.assertEqual(
sop.normalize_slug("1", _numeric_aliases()),
"comprehensive-testing",
)
self.assertEqual(
sop.normalize_slug("3", _numeric_aliases()),
"staging-smoke",
)
self.assertEqual(
sop.normalize_slug("7", _numeric_aliases()),
"memory-consulted",
)
def test_numeric_shorthand_unknown_returns_empty(self):
# "8" is out of range → empty so caller can flag as unparseable.
self.assertEqual(sop.normalize_slug("8", _numeric_aliases()), "")
def test_numeric_without_alias_table_keeps_digits(self):
# No alias table → return the digits as-is.
self.assertEqual(sop.normalize_slug("1"), "1")
def test_empty_input(self):
self.assertEqual(sop.normalize_slug(""), "")
self.assertEqual(sop.normalize_slug(" "), "")
self.assertEqual(sop.normalize_slug(None), "")
# ---------------------------------------------------------------------------
# parse_directives
# ---------------------------------------------------------------------------
class TestParseDirectives(unittest.TestCase):
def setUp(self):
self.aliases = _numeric_aliases()
def test_simple_ack(self):
d = sop.parse_directives("/sop-ack comprehensive-testing", self.aliases)
self.assertEqual(d, [("sop-ack", "comprehensive-testing", "")])
def test_simple_revoke(self):
d = sop.parse_directives("/sop-revoke staging-smoke", self.aliases)
self.assertEqual(d, [("sop-revoke", "staging-smoke", "")])
def test_ack_with_note(self):
d = sop.parse_directives(
"/sop-ack comprehensive-testing LGTM the test covers all edge cases",
self.aliases,
)
self.assertEqual(len(d), 1)
self.assertEqual(d[0][0], "sop-ack")
self.assertEqual(d[0][1], "comprehensive-testing")
self.assertIn("LGTM", d[0][2])
def test_numeric_shorthand(self):
d = sop.parse_directives("/sop-ack 1", self.aliases)
self.assertEqual(d, [("sop-ack", "comprehensive-testing", "")])
def test_revoke_with_reason(self):
d = sop.parse_directives(
"/sop-revoke comprehensive-testing realized the e2e was mocking the DB",
self.aliases,
)
self.assertEqual(d[0][0], "sop-revoke")
self.assertEqual(d[0][1], "comprehensive-testing")
self.assertIn("mocking", d[0][2])
def test_directive_in_middle_of_comment(self):
body = (
"Reviewed the PR, looks good overall.\n"
"/sop-ack comprehensive-testing\n"
"Will follow up on the doc nit separately."
)
d = sop.parse_directives(body, self.aliases)
self.assertEqual(len(d), 1)
self.assertEqual(d[0][1], "comprehensive-testing")
def test_multiple_directives_in_one_comment(self):
body = (
"/sop-ack comprehensive-testing\n"
"/sop-ack local-postgres-e2e\n"
)
d = sop.parse_directives(body, self.aliases)
self.assertEqual(len(d), 2)
slugs = {x[1] for x in d}
self.assertEqual(slugs, {"comprehensive-testing", "local-postgres-e2e"})
def test_must_be_at_line_start(self):
# A directive embedded mid-line is not honored (prevents review
# comments like "to /sop-ack you need..." from acting as acks).
body = "If you want to /sop-ack comprehensive-testing reply in this thread"
d = sop.parse_directives(body, self.aliases)
self.assertEqual(d, [])
def test_leading_whitespace_allowed(self):
body = " /sop-ack comprehensive-testing"
d = sop.parse_directives(body, self.aliases)
self.assertEqual(len(d), 1)
def test_empty_body(self):
self.assertEqual(sop.parse_directives("", self.aliases), [])
self.assertEqual(sop.parse_directives(None, self.aliases), [])
def test_normalization_applied(self):
# /sop-ack Comprehensive_Testing → canonical comprehensive-testing
d = sop.parse_directives("/sop-ack Comprehensive_Testing", self.aliases)
self.assertEqual(d[0][1], "comprehensive-testing")
# ---------------------------------------------------------------------------
# section_marker_present
# ---------------------------------------------------------------------------
class TestSectionMarkerPresent(unittest.TestCase):
def test_marker_with_inline_answer(self):
body = "- [ ] **Comprehensive testing performed**: Added 12 new tests covering null/empty/giant inputs."
self.assertTrue(sop.section_marker_present(body, "Comprehensive testing performed"))
def test_marker_with_empty_answer(self):
body = "- [ ] **Comprehensive testing performed**:"
self.assertFalse(sop.section_marker_present(body, "Comprehensive testing performed"))
def test_marker_with_only_whitespace_answer(self):
body = "- [ ] **Comprehensive testing performed**: \n"
self.assertFalse(sop.section_marker_present(body, "Comprehensive testing performed"))
def test_marker_with_next_line_answer(self):
body = (
"- [ ] **Comprehensive testing performed**:\n"
" Yes — see attached log + 12 new unit tests in foo_test.py.\n"
)
self.assertTrue(sop.section_marker_present(body, "Comprehensive testing performed"))
def test_marker_missing(self):
body = "- [ ] **Local-postgres E2E run**: N/A — pure-frontend\n"
self.assertFalse(sop.section_marker_present(body, "Comprehensive testing performed"))
def test_case_insensitive_marker_match(self):
body = "- [ ] **comprehensive TESTING performed**: yes"
self.assertTrue(sop.section_marker_present(body, "Comprehensive testing performed"))
def test_empty_body(self):
self.assertFalse(sop.section_marker_present("", "X"))
self.assertFalse(sop.section_marker_present(None, "X"))
# ---------------------------------------------------------------------------
# compute_ack_state
# ---------------------------------------------------------------------------
class TestComputeAckState(unittest.TestCase):
def setUp(self):
self.items = _items_by_slug()
self.aliases = _numeric_aliases()
@staticmethod
def _approve_all(slug, users):
return list(users)
@staticmethod
def _approve_none(slug, users):
return []
def _approve_only(self, allowed_users):
return lambda slug, users: [u for u in users if u in allowed_users]
def test_peer_ack_passes(self):
comments = [_comment("bob", "/sop-ack comprehensive-testing")]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], ["bob"])
def test_self_ack_rejected(self):
comments = [_comment("alice", "/sop-ack comprehensive-testing")]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], [])
self.assertEqual(state["comprehensive-testing"]["rejected"]["self_ack"], ["alice"])
def test_not_in_team_rejected(self):
comments = [_comment("eve", "/sop-ack comprehensive-testing")]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_none
)
self.assertEqual(state["comprehensive-testing"]["ackers"], [])
self.assertEqual(state["comprehensive-testing"]["rejected"]["not_in_team"], ["eve"])
def test_revoke_invalidates_own_prior_ack(self):
# Bob acks then later revokes — Bob no longer counts.
comments = [
_comment("bob", "/sop-ack comprehensive-testing"),
_comment("bob", "/sop-revoke comprehensive-testing realized e2e was mocked"),
]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], [])
def test_revoke_does_not_affect_others_acks(self):
# Bob revokes his own ack; Carol's still counts.
comments = [
_comment("bob", "/sop-ack comprehensive-testing"),
_comment("carol", "/sop-ack comprehensive-testing"),
_comment("bob", "/sop-revoke comprehensive-testing"),
]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], ["carol"])
def test_ack_after_revoke_restored(self):
# Bob revokes then re-acks (e.g. after re-reviewing).
comments = [
_comment("bob", "/sop-ack comprehensive-testing"),
_comment("bob", "/sop-revoke comprehensive-testing"),
_comment("bob", "/sop-ack comprehensive-testing"),
]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], ["bob"])
def test_numeric_shorthand_ack(self):
# /sop-ack 1 → comprehensive-testing
comments = [_comment("bob", "/sop-ack 1")]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], ["bob"])
def test_ack_for_unknown_slug_ignored(self):
# Some other slug not in config — silently drop (doesn't crash).
comments = [_comment("bob", "/sop-ack does-not-exist")]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
for slug in self.items:
self.assertEqual(state[slug]["ackers"], [])
def test_multi_item_multi_user(self):
comments = [
_comment("bob", "/sop-ack comprehensive-testing\n/sop-ack staging-smoke"),
_comment("carol", "/sop-ack five-axis-review"),
]
state = sop.compute_ack_state(
comments, "alice", self.items, self.aliases, self._approve_all
)
self.assertEqual(state["comprehensive-testing"]["ackers"], ["bob"])
self.assertEqual(state["staging-smoke"]["ackers"], ["bob"])
self.assertEqual(state["five-axis-review"]["ackers"], ["carol"])
self.assertEqual(state["root-cause"]["ackers"], [])
# ---------------------------------------------------------------------------
# render_status
# ---------------------------------------------------------------------------
class TestRenderStatus(unittest.TestCase):
def setUp(self):
self.items = _items()
self.items_by_slug = _items_by_slug()
def _state_with(self, acked: list[str]) -> dict:
return {
it["slug"]: {
"ackers": ["peer"] if it["slug"] in acked else [],
"rejected": {"self_ack": [], "not_in_team": []},
}
for it in self.items
}
def test_all_acked_returns_success(self):
all_slugs = [it["slug"] for it in self.items]
state, desc = sop.render_status(
self.items, self._state_with(all_slugs), {s: True for s in all_slugs}
)
self.assertEqual(state, "success")
self.assertIn("7/7", desc)
def test_partial_acked_returns_failure(self):
state, desc = sop.render_status(
self.items,
self._state_with(["comprehensive-testing", "staging-smoke"]),
{it["slug"]: True for it in self.items},
)
self.assertEqual(state, "failure")
self.assertIn("2/7", desc)
self.assertIn("missing", desc)
def test_description_truncates_long_missing_list(self):
# Only ack one — 6 missing should be summarized as "+N".
state, desc = sop.render_status(
self.items,
self._state_with(["comprehensive-testing"]),
{it["slug"]: True for it in self.items},
)
# Length budget: under 140 chars.
self.assertLessEqual(len(desc), 140)
self.assertIn("+", desc) # +N elision marker
def test_body_unfilled_surfaced(self):
all_slugs = [it["slug"] for it in self.items]
state, desc = sop.render_status(
self.items,
self._state_with(all_slugs),
{it["slug"]: False for it in self.items},
)
self.assertEqual(state, "failure")
self.assertIn("body-unfilled", desc)
# ---------------------------------------------------------------------------
# get_tier_mode
# ---------------------------------------------------------------------------
class TestGetTierMode(unittest.TestCase):
def setUp(self):
self.cfg = sop.load_config(CONFIG_PATH)
def test_tier_high_is_hard(self):
pr = {"labels": [{"name": "tier:high"}, {"name": "area:ci"}]}
self.assertEqual(sop.get_tier_mode(pr, self.cfg), "hard")
def test_tier_medium_is_hard(self):
pr = {"labels": [{"name": "tier:medium"}]}
self.assertEqual(sop.get_tier_mode(pr, self.cfg), "hard")
def test_tier_low_is_soft(self):
pr = {"labels": [{"name": "tier:low"}]}
self.assertEqual(sop.get_tier_mode(pr, self.cfg), "soft")
def test_no_tier_label_defaults_to_hard(self):
# Per feedback_fix_root_not_symptom — never silently lower the bar.
pr = {"labels": [{"name": "area:ci"}]}
self.assertEqual(sop.get_tier_mode(pr, self.cfg), "hard")
def test_no_labels_defaults_to_hard(self):
self.assertEqual(sop.get_tier_mode({"labels": []}, self.cfg), "hard")
self.assertEqual(sop.get_tier_mode({}, self.cfg), "hard")
# ---------------------------------------------------------------------------
# load_config
# ---------------------------------------------------------------------------
class TestLoadConfig(unittest.TestCase):
def test_default_config_parses(self):
cfg = sop.load_config(CONFIG_PATH)
self.assertIn("items", cfg)
self.assertEqual(len(cfg["items"]), 7)
slugs = {it["slug"] for it in cfg["items"]}
self.assertEqual(
slugs,
{
"comprehensive-testing",
"local-postgres-e2e",
"staging-smoke",
"root-cause",
"five-axis-review",
"no-backwards-compat",
"memory-consulted",
},
)
def test_default_config_tier_mode_shape(self):
cfg = sop.load_config(CONFIG_PATH)
self.assertEqual(cfg["tier_failure_mode"]["tier:high"], "hard")
self.assertEqual(cfg["tier_failure_mode"]["tier:medium"], "hard")
self.assertEqual(cfg["tier_failure_mode"]["tier:low"], "soft")
self.assertEqual(cfg["default_mode"], "hard")
def test_each_item_has_required_fields(self):
cfg = sop.load_config(CONFIG_PATH)
for it in cfg["items"]:
self.assertIn("slug", it)
self.assertIn("numeric_alias", it)
self.assertIn("pr_section_marker", it)
self.assertIn("required_teams", it)
self.assertIsInstance(it["required_teams"], list)
self.assertGreater(len(it["required_teams"]), 0)
# ---------------------------------------------------------------------------
# Edge case: full integration without team probe (dependency-injected)
# ---------------------------------------------------------------------------
class TestEndToEndAckFlow(unittest.TestCase):
"""All-7-items happy path with synthetic comments. Verifies the
full pipeline minus the Gitea API."""
def test_all_seven_acked_by_proper_teams(self):
items = _items_by_slug()
aliases = _numeric_aliases()
comments = [
_comment("qa-bot", "/sop-ack comprehensive-testing"),
_comment("eng-bot", "/sop-ack local-postgres-e2e"),
_comment("eng-bot", "/sop-ack staging-smoke"),
_comment("mgr-bot", "/sop-ack root-cause"),
_comment("eng-bot", "/sop-ack five-axis-review"),
_comment("mgr-bot", "/sop-ack no-backwards-compat"),
_comment("eng-bot", "/sop-ack memory-consulted"),
]
def probe(slug, users):
# Pretend every user is in every team.
return list(users)
state = sop.compute_ack_state(comments, "alice-author", items, aliases, probe)
body = {it["slug"]: True for it in items.values()}
items_list = list(items.values())
result_state, desc = sop.render_status(items_list, state, body)
self.assertEqual(result_state, "success")
self.assertIn("7/7", desc)
def test_all_acks_still_fail_when_body_section_unfilled(self):
items = _items_by_slug()
aliases = _numeric_aliases()
comments = [
_comment("qa-bot", "/sop-ack comprehensive-testing"),
_comment("eng-bot", "/sop-ack local-postgres-e2e"),
_comment("eng-bot", "/sop-ack staging-smoke"),
_comment("mgr-bot", "/sop-ack root-cause"),
_comment("eng-bot", "/sop-ack five-axis-review"),
_comment("mgr-bot", "/sop-ack no-backwards-compat"),
_comment("eng-bot", "/sop-ack memory-consulted"),
]
def probe(slug, users):
return list(users)
state = sop.compute_ack_state(comments, "alice-author", items, aliases, probe)
body = {it["slug"]: True for it in items.values()}
body["root-cause"] = False
items_list = list(items.values())
result_state, desc = sop.render_status(items_list, state, body)
self.assertEqual(result_state, "failure")
self.assertIn("7/7", desc)
self.assertIn("body-unfilled: root-cause", desc)
if __name__ == "__main__":
unittest.main(verbosity=2)

View File

@ -1,109 +0,0 @@
# SOP-Checklist gate — per-item required reviewer teams.
#
# RFC#351 v1 starter set. Each item lists:
# slug — canonical kebab-case form used in /sop-ack <slug>
# pr_section_marker — substring matched in the PR body to detect that
# the author filled in this item (case-insensitive)
# required_teams — list of Gitea team names; an ack from ANY one of
# these teams (logical OR) satisfies the item.
# Membership is probed at gate-time via
# GET /api/v1/teams/{id}/members/{login}.
# Team-id resolution happens at script start via
# GET /api/v1/orgs/{org}/teams (cheap, one call).
# numeric_alias — 1..7; lets reviewers type `/sop-ack 3` as a
# shortcut for `/sop-ack staging-smoke`.
#
# WHY THESE TEAM MAPPINGS:
# The RFC table referenced persona-role names like `core-qa`,
# `core-be`, `core-devops` — these are individual Gitea user logins,
# not teams. The Gitea team-membership API is /teams/{id}/members/{u},
# so we need actual teams. Orchestrator preflight 2026-05-12 verified
# only these teams exist on molecule-ai: ceo(5), engineers(2),
# managers(6), qa(20), security(21), Owners(1), and bot teams. We
# map the RFC roles to the closest existing team and surface the
# mapping explicitly so it's reviewable.
#
# HOW TO EDIT:
# - Tightening: replace `engineers` with a smaller team after creating
# it (e.g. a new `senior-engineers` team if needed).
# - Loosening: add another team to required_teams (OR semantics).
# - Add an item: append to items list and document the slug below.
#
# AUTHOR SELF-ACK IS FORBIDDEN regardless of which team contains them
# — the gate script enforces commenter != PR author before checking
# team membership.
version: 1
# Tier-aware failure mode (RFC#351 open question 2):
# For tier:high — hard-fail (status `failure`, blocks merge via BP).
# For tier:medium — hard-fail (same as high; medium is non-trivial).
# For tier:low — soft-fail (status `pending` with `acked: N/M` in the
# description). BP can choose to require the context
# or not for low-tier PRs.
# If no tier label is present, default to medium (hard-fail) — every PR
# should have a tier label per sop-tier-check, and absence indicates
# a missing-tier defect we should surface, not silently lower the bar.
tier_failure_mode:
"tier:high": hard
"tier:medium": hard
"tier:low": soft
default_mode: hard # used when no tier:* label is present
items:
- slug: comprehensive-testing
numeric_alias: 1
pr_section_marker: "Comprehensive testing performed"
required_teams: [qa, engineers]
description: >-
What was tested, how, edge cases covered. Ack from any qa-team
member (or engineers fallback while qa is small).
- slug: local-postgres-e2e
numeric_alias: 2
pr_section_marker: "Local-postgres E2E run"
required_teams: [engineers]
description: >-
Link to local CI artifact, or "N/A: pure-frontend change". Ack
from any engineer who can verify the local DB test actually ran.
- slug: staging-smoke
numeric_alias: 3
pr_section_marker: "Staging-smoke verified or pending"
required_teams: [engineers]
description: >-
Link to canary run, or "scheduled post-merge". Ack from any
engineer (core-devops/infra-sre are members of engineers team).
- slug: root-cause
numeric_alias: 4
pr_section_marker: "Root-cause not symptom"
required_teams: [managers, ceo]
description: >-
One-sentence root-cause statement. Ack from managers tier
(team-leads) or ceo. Senior judgment required to attest
root-cause-versus-symptom.
- slug: five-axis-review
numeric_alias: 5
pr_section_marker: "Five-Axis review walked"
required_teams: [engineers]
description: >-
Correctness / readability / architecture / security / performance.
Ack from any non-author engineer.
- slug: no-backwards-compat
numeric_alias: 6
pr_section_marker: "No backwards-compat shim / dead code added"
required_teams: [managers, ceo]
description: >-
Yes/no + justification if no. Senior ack required because
backward-compat shims are how dead-code accretes.
- slug: memory-consulted
numeric_alias: 7
pr_section_marker: "Memory/saved-feedback consulted"
required_teams: [engineers]
description: >-
List of feedback memories applicable to this change. Ack from
any engineer who has the same memory access.

View File

@ -1,58 +1,88 @@
# audit-force-merge — emit `incident.force_merge` to runner stdout when
# a PR is merged with required-status-checks not green. Vector picks
# audit-force-merge — emit `incident.force_merge` to the runner log when
# a PR is merged with required-status checks NOT all green. Vector picks
# the JSON line off docker_logs and ships to Loki on
# molecule-canonical-obs (per `reference_obs_stack_phase1`); query as:
#
# {host="operator"} |= "event_type" |= "incident.force_merge" | json
#
# Closes the §SOP-6 audit gap (the doc says force-merges write to
# `structure_events`, but that table lives in the platform DB, not
# Gitea-side; Loki is the practical equivalent for Gitea Actions
# events). When the credential / observability stack converges later,
# this can sync into structure_events from Loki via a backfill job —
# the structured JSON shape is forward-compatible.
# Companion to `audit-force-merge.sh` (script-extract pattern, same as
# sop-tier-check). The audit observes BOTH UI-merged and REST-merged PRs
# uniformly per `feedback_gh_cli_merge_lies_use_rest`.
#
# Logic in `.gitea/scripts/audit-force-merge.sh` per the same script-
# extract pattern as sop-tier-check.
# Closes the §SOP-6 audit gap for the molecule-core repo. RFC:
# internal#219 §6. Mirrors the same-named workflow in
# molecule-controlplane; design rationale lives in the RFC, not here,
# to keep the workflow file scannable.
name: audit-force-merge
# pull_request_target loads from the base branch — same security model
# as sop-tier-check. Without this, an attacker could rewrite the
# workflow on a PR and skip the audit emission for their own
# force-merge. See `.gitea/workflows/sop-tier-check.yml` for the full
# rationale.
# as sop-tier-check. Without this, a PR author could rewrite the
# workflow on their own PR and skip the audit emission for their own
# force-merge. The base-branch checkout below ALSO uses
# `base.sha`, not `base.ref`, so a fast-moving base can't slip a
# different audit script in under us.
on:
pull_request_target:
types: [closed]
# `pull-requests: read` + `contents: read` covers everything the script
# needs (fetch PR + commit statuses). `issues:` deliberately omitted —
# audit fires-and-forgets to stdout, never opens issues.
permissions:
contents: read
pull-requests: read
jobs:
audit:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
# Skip when PR is closed without merge — saves a runner.
if: github.event.pull_request.merged == true
steps:
- name: Check out base branch (for the script)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# base.sha pinning, NOT base.ref — see header rationale.
ref: ${{ github.event.pull_request.base.sha }}
- name: Detect force-merge + emit audit event
env:
# Same org-level secret the sop-tier-check workflow uses.
# Same org-level secret the sop-tier-check workflow uses;
# falls back to the auto-injected GITHUB_TOKEN if the
# org-level SOP_TIER_CHECK_TOKEN isn't set on a transitional
# repo.
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.pull_request.number }}
# Required-status-check contexts to evaluate at merge time.
# Newline-separated. Mirror this against branch protection
# (settings → branches → protected branch → required checks).
# Newline-separated. MUST mirror branch protection's
# status_check_contexts for protected branches
# (currently `main`; `staging` protection forthcoming per
# RFC internal#219 Phase 4).
#
# Initialized 2026-05-11 from the current molecule-core `main`
# branch protection:
#
# GET /api/v1/repos/molecule-ai/molecule-core/
# branch_protections/main
# → status_check_contexts = [
# "Secret scan / Scan diff for credential-shaped strings (pull_request)",
# "sop-tier-check / tier-check (pull_request)"
# ]
#
# Declared here rather than fetched from /branch_protections
# because that endpoint requires admin write — sop-tier-bot is
# read-only by design (least-privilege).
# because that endpoint requires admin write — sop-tier-bot
# is read-only by design (least-privilege per
# `feedback_least_privilege_via_workflow_env` / internal#257).
# Drift between this env and the real protection list is
# auto-detected by `ci-required-drift.yml` (RFC §4 + §6),
# which opens a `[ci-drift]` issue within one hour.
#
# When the protection set changes (e.g. Phase 4 adds the
# `ci / all-required (pull_request)` sentinel), update BOTH
# branch protection AND this env in the SAME PR; drift-detect
# will otherwise file an issue for you.
REQUIRED_CHECKS: |
CI / all-required (pull_request)
sop-checklist / all-items-acked (pull_request)
Secret scan / Scan diff for credential-shaped strings (pull_request)
sop-tier-check / tier-check (pull_request)
run: bash .gitea/scripts/audit-force-merge.sh

View File

@ -37,7 +37,6 @@ jobs:
# Phase 3 (RFC #219 §1): surface broken workflows without blocking
# the PR. Follow-up PR flips this off after surfaced defects are
# triaged.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View File

@ -48,7 +48,6 @@ jobs:
# Phase 3 (RFC #219 §1): surface broken workflows without blocking
# the PR. Follow-up PR flips this off after surfaced defects are
# triaged.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

View File

@ -45,7 +45,6 @@ jobs:
# Phase 3 (RFC #219 §1): surface broken workflows without blocking
# the PR. Follow-up PR flips this off after surfaced defects are
# triaged.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 5
steps:

View File

@ -23,11 +23,11 @@
# `feedback_behavior_based_ast_gates` — NOT grep-by-name. That way
# job renames or matrix-expansion-induced churn produce honest signal.
#
# NOTE on protection endpoint scope: `GET /repos/.../branch_protections/{branch}`
# requires repo-admin role in Gitea 1.22.6. If DRIFT_BOT_TOKEN lacks it,
# the script skips that branch with a clear ::error:: diagnostic and exits 0
# (the issue IS the alarm, not a red workflow). See provisioning trail in
# the run step's GITEA_TOKEN env comment.
# IMPORTANT — TRANSITIONAL STATE: molecule-core's ci.yml does NOT yet
# contain the `all-required` sentinel job (RFC §4 Phase 4 adds it).
# Until Phase 4 lands the detector will hard-fail with exit 3 on the
# missing sentinel. That's intentional: a red workflow on a 5-min cron
# is louder than a silent issue and forces Phase 4 to land soon.
name: ci-required-drift

View File

@ -70,12 +70,10 @@ jobs:
changes:
name: Detect changes
runs-on: ubuntu-latest
# Phase 4 (RFC #219 §1): all required jobs >=98% green on main.
# Flip confirmed 2026-05-12 via combined-status check of latest main
# commit (all CI jobs green). `all-required` sentinel hard-fails
# when this job fails; no Phase 3 suppression needed.
# revert: add `continue-on-error: true` back if regressions appear.
continue-on-error: false
# Phase 3 (RFC #219 §1): surface broken workflows without blocking
# the PR. Follow-up PR flips this off after the surfaced defects
# (if any) are triaged.
continue-on-error: true
outputs:
platform: ${{ steps.check.outputs.platform }}
canvas: ${{ steps.check.outputs.canvas }}
@ -126,30 +124,7 @@ jobs:
name: Platform (Go)
needs: changes
runs-on: ubuntu-latest
# mc#774 (interim): re-mask platform-build pending fix-forward. Phase 4
# (#656) flipped this to continue-on-error: false based on a Phase-3-masked
# "green on main 2026-05-12" — the prior continue-on-error: true had
# been hiding failing tests in workspace-server/internal/handlers/.
# Two distinct failure classes surfaced on 0e5152c3:
# (1) 4x delegation_test.go (lines 1110/1176/1228/1271): helpers
# expectExecuteDelegationBase/Success/Failed are missing sqlmock
# expectations for queries production has issued since ~2026-04-21
# (last_outbound_at UPDATE, lookupDeliveryMode/Runtime SELECTs,
# a2a_receive INSERT activity_logs, recordLedgerStatus writes).
# Halt cond #3 applies (regression > 7 days → broader sweep).
# (2) 1x mcp_test.go:433 (TestMCPHandler_CommitMemory_GlobalScope_Blocked):
# commit 7d1a189f (2026-05-10) hardened mcp.go to scrub err.Error()
# from JSON-RPC responses (OFFSEC-001), but the test asserts the
# error message contains "GLOBAL". Production-vs-test contract
# collision — needs design call, not mock update.
# Time-boxed Option A (90 min) did not fit the cross-cutting scope.
# This is a sequenced revert→fix→reflip per
# feedback_strict_root_only_after_class_a emergency clause — NOT
# a permanent re-mask. Re-flip blocked on mc#774 fix-forward landing.
# Other 4 #656 flips (changes, canvas-build, shellcheck, python-lint)
# retain continue-on-error: false; only platform-build regresses.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true # mc#774 fix-forward in flight; re-flip when mc#774 lands (PR #669 → rebase after #709)
continue-on-error: true
defaults:
run:
working-directory: workspace-server
@ -169,13 +144,10 @@ jobs:
run: go build ./cmd/server
# CLI (molecli) moved to standalone repo: git.moleculesai.app/molecule-ai/molecule-cli
- if: needs.changes.outputs.platform == 'true'
run: go vet ./...
- if: needs.changes.outputs.platform == 'true'
name: Install golangci-lint
run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.12.2
run: go vet ./... || true
- if: needs.changes.outputs.platform == 'true'
name: Run golangci-lint
run: $(go env GOPATH)/bin/golangci-lint run --timeout 3m ./...
run: golangci-lint run --timeout 3m ./... || true
- if: needs.changes.outputs.platform == 'true'
name: Diagnostic — per-package verbose 60s
run: |
@ -190,7 +162,6 @@ jobs:
echo "::group::pendinguploads exit=$pu_exit (last 100 lines)"
tail -100 /tmp/test-pu.log
echo "::endgroup::"
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
- if: needs.changes.outputs.platform == 'true'
name: Run tests with race detection and coverage
@ -300,8 +271,7 @@ jobs:
name: Canvas (Next.js)
needs: changes
runs-on: ubuntu-latest
# Phase 4 (RFC #219 §1): confirmed green on main 2026-05-12.
continue-on-error: false
continue-on-error: true
defaults:
run:
working-directory: canvas
@ -347,8 +317,7 @@ jobs:
name: Shellcheck (E2E scripts)
needs: changes
runs-on: ubuntu-latest
# Phase 4 (RFC #219 §1): confirmed green on main 2026-05-12.
continue-on-error: false
continue-on-error: true
steps:
- if: needs.changes.outputs.scripts != 'true'
run: echo "No tests/e2e/ or infra/scripts/ changes — skipping real shellcheck; this job always runs to satisfy the required-check name on branch protection."
@ -377,7 +346,6 @@ jobs:
canvas-deploy-reminder:
name: Canvas Deploy Reminder
runs-on: ubuntu-latest
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
needs: [changes, canvas-build]
# Only fires on direct pushes to main (i.e. after staging→main promotion).
@ -424,8 +392,7 @@ jobs:
name: Python Lint & Test
needs: changes
runs-on: ubuntu-latest
# Phase 4 (RFC #219 §1): confirmed green on main 2026-05-12.
continue-on-error: false
continue-on-error: true
env:
WORKSPACE_ID: test
defaults:
@ -541,16 +508,12 @@ jobs:
# explicitly excludes `github.event_name`-gated jobs from F1 (see
# `.gitea/scripts/ci-required-drift.py::ci_job_names`).
#
# Phase 3 (RFC #219 §1) safety: underlying build jobs carry
# continue-on-error: true so their failures are masked to null (2026-05-12: re-enabled mc#774 interim)
# (Gitea suppresses status reporting for CoE jobs). This sentinel
# runs with continue-on-error: false so it always reports its
# result to the API — without this, the required-status entry
# (CI / all-required (pull_request)) is never created, which
# blocks PR merges. When Phase 3 ends, flip underlying jobs to
# continue-on-error: false; this sentinel can then be flipped to
# continue-on-error: true if a Phase-4 regression requires it.
continue-on-error: false
# Phase 3 (RFC #219 §1) safety: continue-on-error here so the sentinel
# does not hard-fail and block PRs while the underlying build jobs are
# still in Phase 3 (continue-on-error: true suppresses their status to null).
# When Phase 3 ends (defects fixed, continue-on-error flipped off on build
# jobs), remove continue-on-error here so the sentinel again hard-fails.
continue-on-error: true
runs-on: ubuntu-latest
timeout-minutes: 1
needs:
@ -574,26 +537,17 @@ jobs:
echo "$results" | python3 -c '
import json, sys
ns = json.load(sys.stdin)
# Phase 3 masked: jobs with continue-on-error: true may report "failure"
# Remove when mc#774 handler test failures are resolved.
PHASE3_MASKED = {"platform-build"}
# Exclude null (Phase 3 suppressed / in-flight) from the bad list.
bad = [(k, v.get("result")) for k, v in ns.items()
if v.get("result") not in ("success", None, "cancelled", "skipped") and k not in PHASE3_MASKED]
if v.get("result") not in ("success", None)]
if bad:
print(f"FAIL: jobs not green:", file=sys.stderr)
for k, r in bad:
print(f" - {k}: {r}", file=sys.stderr)
sys.exit(1)
pending = [(k, v.get("result")) for k, v in ns.items()
if v.get("result") is None]
cancelled = [(k, v.get("result")) for k, v in ns.items()
if v.get("result") == "cancelled"]
pending = [(k, v.get("result")) for k, v in ns.items() if v.get("result") is None]
if pending:
print(f"WARN: {len(pending)} job(s) still in-flight (result=null): " +
", ".join(k for k, _ in pending), file=sys.stderr)
if cancelled:
print(f"INFO: {len(cancelled)} job(s) masked by continue-on-error: " +
", ".join(k for k, _ in cancelled), file=sys.stderr)
print(f"OK: all {len(ns)} required jobs succeeded (or Phase-3 suppressed)")
'

View File

@ -90,7 +90,6 @@ jobs:
name: Synthetic E2E against staging
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
# Bumped from 12 → 20 (2026-05-04). Tenant user-data install phase
# (apt-get update + install docker.io/jq/awscli/caddy + snap install

View File

@ -103,7 +103,6 @@ jobs:
detect-changes:
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
outputs:
api: ${{ steps.decide.outputs.api }}
@ -155,7 +154,6 @@ jobs:
name: E2E API Smoke Test
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 15
env:
@ -166,6 +164,7 @@ jobs:
# we let Docker assign an ephemeral host port.
PG_CONTAINER: pg-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
REDIS_CONTAINER: redis-e2e-api-${{ github.run_id }}-${{ github.run_attempt }}
PORT: "8080"
steps:
- name: No-op pass (paths filter excluded this commit)
if: needs.detect-changes.outputs.api != 'true'
@ -269,20 +268,6 @@ jobs:
if: needs.detect-changes.outputs.api == 'true'
working-directory: workspace-server
run: go build -o platform-server ./cmd/server
- name: Pick platform port
if: needs.detect-changes.outputs.api == 'true'
run: |
PLATFORM_PORT=$(python3 - <<'PY'
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("127.0.0.1", 0))
print(s.getsockname()[1])
PY
)
echo "PORT=${PLATFORM_PORT}" >> "$GITHUB_ENV"
echo "BASE=http://127.0.0.1:${PLATFORM_PORT}" >> "$GITHUB_ENV"
echo "Platform host port: ${PLATFORM_PORT}"
- name: Start platform (background)
if: needs.detect-changes.outputs.api == 'true'
working-directory: workspace-server
@ -295,7 +280,7 @@ jobs:
if: needs.detect-changes.outputs.api == 'true'
run: |
for i in $(seq 1 30); do
if curl -sf "$BASE/health" > /dev/null; then
if curl -sf http://127.0.0.1:8080/health > /dev/null; then
echo "Platform up after ${i}s"
exit 0
fi

View File

@ -70,7 +70,6 @@ jobs:
detect-changes:
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
outputs:
canvas: ${{ steps.decide.outputs.canvas }}
@ -119,7 +118,6 @@ jobs:
name: Canvas tabs E2E
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 40
@ -168,7 +166,6 @@ jobs:
- name: Install Playwright browsers
if: needs.detect-changes.outputs.canvas == 'true'
timeout-minutes: 10
run: npx playwright install --with-deps chromium
- name: Run staging canvas E2E

View File

@ -84,7 +84,6 @@ jobs:
name: E2E Staging External Runtime
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 25

View File

@ -88,20 +88,17 @@ jobs:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 1
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: "3.11"
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
- name: YAML validation (best-effort)
run: |
echo "e2e-staging-saas.yml — PR validation: workflow YAML is valid."
echo "E2E step runs only when provisioning-critical files change."
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
# Actual E2E: runs on trunk pushes (main + staging). NOT the PR-fire-only
@ -112,7 +109,6 @@ jobs:
# Only runs on trunk pushes. PR paths get pr-validate instead.
if: github.event.pull_request.base.ref == ''
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 45
permissions:

View File

@ -37,7 +37,6 @@ jobs:
name: Intentional-failure teardown sanity
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 20

View File

@ -32,21 +32,12 @@ on:
# iterating all open PRs when PR_NUMBER is empty.
workflow_dispatch:
permissions:
# read: contents — for checkout (base ref, not PR head for security)
# read: pull-requests — for reading PR info via API
# write: pull-requests — for posting/updating gate-check comments
# Without this the token cannot POST/PATCH /issues/comments → 403.
contents: read
pull-requests: write
env:
GITHUB_SERVER_URL: https://git.moleculesai.app
jobs:
gate-check:
runs-on: ubuntu-latest
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true # Never block on our own detector failing
steps:
- name: Check out BASE ref (never PR-head under pull_request_target)
@ -77,32 +68,25 @@ jobs:
if: github.event_name == 'schedule'
env:
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
run: |
set -euo pipefail
# Fetch all open PRs and run gate-check on each
# socket.setdefaulttimeout(15): defence-in-depth for missing SOP_TIER_CHECK_TOKEN.
# gate_check.py uses timeout=15 on every urlopen call; this catches the
# inline Python polling loop too (issue #603).
pr_numbers=$(python3 <<'PY'
import json
import os
import socket
import urllib.request
socket.setdefaulttimeout(15)
token = os.environ["GITEA_TOKEN"]
repo = os.environ["REPO"]
req = urllib.request.Request(
f"https://git.moleculesai.app/api/v1/repos/{repo}/pulls?state=open&limit=100",
headers={"Authorization": f"token {token}", "Accept": "application/json"},
)
with urllib.request.urlopen(req) as r:
prs = json.loads(r.read())
for pr in prs:
print(pr["number"])
PY
)
pr_numbers=$(python3 -c "
import socket, urllib.request, json, os
socket.setdefaulttimeout(15)
token = os.environ['GITEA_TOKEN']
req = urllib.request.Request(
'https://git.moleculesai.app/api/v1/repos/${{ github.repository }}/pulls?state=open&limit=100',
headers={'Authorization': f'token {token}', 'Accept': 'application/json'}
)
with urllib.request.urlopen(req) as r:
prs = json.loads(r.read())
for pr in prs:
print(pr['number'])
")
for pr in $pr_numbers; do
echo "Checking PR #$pr..."
python3 tools/gate-check-v3/gate_check.py \

View File

@ -1,51 +0,0 @@
name: gitea-merge-queue
# External serialized merge queue for Gitea 1.22.6.
#
# Gitea's `pull_auto_merge` table is not a real merge queue: it does not
# serialize green PRs against a freshly-tested latest main. This workflow runs
# the user-space queue bot, one PR per tick, using the non-bypass merge actor.
#
# Queue contract:
# - add label `merge-queue` to an open same-repo PR
# - bot updates stale PR heads with current main, then waits for CI
# - bot merges only when current main is green and required PR contexts pass
# - add `merge-queue-hold` to pause a queued PR without removing it
on:
schedule:
- cron: '*/5 * * * *'
workflow_dispatch:
permissions:
contents: read
concurrency:
group: gitea-merge-queue-${{ github.repository }}
cancel-in-progress: false
jobs:
queue:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Check out queue script from main
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event.repository.default_branch }}
- name: Process one queued PR
env:
# AUTO_SYNC_TOKEN is the devops-engineer persona PAT. It is the
# non-bypass merge actor allowed by branch protection.
GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
WATCH_BRANCH: ${{ github.event.repository.default_branch }}
QUEUE_LABEL: merge-queue
HOLD_LABEL: merge-queue-hold
UPDATE_STYLE: merge
REQUIRED_CONTEXTS: >-
CI / all-required (pull_request),
sop-checklist / all-items-acked (pull_request)
run: python3 .gitea/scripts/gitea-merge-queue.py

View File

@ -78,8 +78,7 @@ jobs:
detect-changes:
name: detect-changes
runs-on: ubuntu-latest
# mc#774 Phase 3 (RFC §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
continue-on-error: true
outputs:
handlers: ${{ steps.filter.outputs.handlers }}
@ -119,8 +118,7 @@ jobs:
name: Handlers Postgres Integration
needs: detect-changes
runs-on: ubuntu-latest
# mc#774 Phase 3 (RFC §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
continue-on-error: true
env:
# Unique name per run so concurrent jobs don't collide on the

View File

@ -63,7 +63,6 @@ jobs:
detect-changes:
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
outputs:
run: ${{ steps.decide.outputs.run }}
@ -155,7 +154,6 @@ jobs:
name: Harness Replays
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 30
steps:

View File

@ -1,120 +0,0 @@
name: lint-bp-context-emit-match
# Tier 2f scheduled lint (per mc#774) — detects drift between
# `branch_protections/<branch>.status_check_contexts` and the set of
# contexts emitted by `.gitea/workflows/*.yml`.
#
# Rule
# ----
# For each protected branch context (Source A — BP), there must exist
# at least one emitting workflow + job pair (Source B — workflow YAML
# + on:-event mapping) whose runtime status-name maps to it. The
# inverse direction (emitter without BP context) is informational
# only — Tier 2g handles that at PR-time.
#
# Why this exists
# ---------------
# A BP-required context with no emitter blocks merges forever — Gitea
# 1.22.6 treats absent-as-`pending`, NOT absent-as-`skipped`. The
# phantom-required-check class previously surfaced as
# `feedback_phantom_required_check_after_gitea_migration` (a port
# kept the GitHub context name after rename to Gitea, but no
# workflow emitted under the new name).
#
# This lint catches the same class structurally + a forward case:
# workflow renamed/deleted while still in BP.
#
# Scope
# -----
# Scheduled daily. We DON'T run on `pull_request` because (a) the
# emitter side moves with PR diffs (transitional state false-flags)
# and (b) Tier 2g handles emitter-side drift at PR-time.
#
# Cross-repo
# ----------
# Today this runs only on molecule-core/main. Per internal#349
# (cross-repo BP sweep) Class-D repos will get the same lint after
# their BP rollouts.
#
# Auth
# ----
# `GET /repos/.../branch_protections/{branch}` requires repo-admin
# role on Gitea 1.22.6. We use DRIFT_BOT_TOKEN (same persona as
# ci-required-drift.yml — `internal#329` provisioning trail).
# Graceful-degrade per Tier 2a contract: 403/404 → exit 0 with
# ::error::.
#
# Idempotency
# -----------
# The drift issue is filed with title prefix
# `[ci-bp-drift] {repo}/{branch}: BP→emitter mismatch`. The script
# searches OPEN issues for an exact title-prefix match and PATCHes
# the existing issue (if any) instead of POSTing a duplicate.
# Mirrors `ci-required-drift.py`'s contract.
#
# Phase contract (RFC internal#219 §1 ladder)
# -------------------------------------------
# Lands at `continue-on-error: true` (Phase 3). After 7 days of clean
# scheduled runs on `main`, flip to `false` so a scheduled failure
# becomes a hard CI signal.
#
# Cross-links
# -----------
# - mc#774 (the RFC that specs this lint)
# - internal#349 (cross-repo BP sweep)
# - feedback_phantom_required_check_after_gitea_migration
# - feedback_tier_label_ids_are_per_repo
# - ci-required-drift.yml (F2 detector, narrower-scope sibling)
on:
schedule:
# Daily at 03:31 UTC — off-peak, prime-staggered from other
# scheduled jobs (ci-required-drift :00 hourly, lint-coe-tracking
# 13:11). At 03:31 the CI fleet is quietest in EMEA hours.
- cron: '31 3 * * *'
workflow_dispatch:
# No `push` / `pull_request` here — Tier 2g owns PR-time drift.
env:
GITHUB_SERVER_URL: https://git.moleculesai.app
permissions:
contents: read
issues: write # needed to file/edit the drift issue
concurrency:
group: lint-bp-context-emit-match-${{ github.ref }}
cancel-in-progress: true
jobs:
lint:
name: lint-bp-context-emit-match
runs-on: ubuntu-latest
timeout-minutes: 5
# Phase 3 (RFC #219 §1): surface drift without blocking. After 7
# clean scheduled runs on main, flip to false so a scheduled
# failure is a hard CI signal.
continue-on-error: true # mc#774 Phase 3 — flip to false after 7 clean main runs
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
- name: Install PyYAML
run: python -m pip install --quiet 'PyYAML==6.0.2'
- name: Run lint-bp-context-emit-match
env:
# DRIFT_BOT_TOKEN — repo-admin on this repo (internal#329
# provisioning trail). Required for branch_protections read.
GITEA_TOKEN: ${{ secrets.DRIFT_BOT_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
BRANCH: main
WORKFLOWS_DIR: .gitea/workflows
DRIFT_LABEL: ci-bp-drift
GITHUB_RUN_URL: https://git.moleculesai.app/${{ github.repository }}/actions/runs/${{ github.run_id }}
run: python3 .gitea/scripts/lint_bp_context_emit_match.py
- name: Run lint-bp-context-emit-match unit tests
run: |
python -m pip install --quiet pytest
python3 -m pytest tests/test_lint_bp_context_emit_match.py -v

View File

@ -1,121 +0,0 @@
name: lint-continue-on-error-tracking
# Tier 2e hard-gate lint (per mc#774) — every
# `continue-on-error: true` in `.gitea/workflows/*.yml` must carry a
# `# mc#NNNN` or `# internal#NNNN` tracker comment within 2 lines,
# the referenced issue must be OPEN, and ≤14 days old.
#
# Why this exists
# ---------------
# `continue-on-error: true` on `platform-build` had been hiding
# mc#774-class regressions for ~3 weeks before #656 surfaced them on
# 2026-05-12. A 14-day cap on tracker age forces a review cycle and
# surfaces mask-drift within at most 14 days of the original defect.
# Each `continue-on-error: true` gets a paper trail — close or renew.
#
# How the gate works
# ------------------
# 1. Walk `.gitea/workflows/*.yml` via PyYAML's line-tracking loader
# (per `feedback_behavior_based_ast_gates`) and find every job
# whose `continue-on-error` evaluates truthy (`true` or string
# `"true"` — Gitea's evaluator coerces strings).
# 2. For each, scan ±2 lines of the directive's source line for a
# `# mc#NNNN` or `# internal#NNNN` comment. Inline-trailing
# comments on the directive line count.
# 3. For each tracker reference, GET the issue from the Gitea API.
# Validate: exists, `state == open`, `created_at` ≤ MAX_AGE_DAYS.
# 4. Aggregate ALL violations (not short-circuit) and exit 1 if any.
#
# Triggers
# --------
# Runs on PR events (paths-filter on `.gitea/workflows/**`) AND on
# a daily schedule. PR runs catch the violation at introduction time.
# Schedule runs catch the AGE-EXPIRY class: a tracker that was ≤14d
# old when the PR landed but is now 20d old, with the underlying
# defect still unfixed. Per `feedback_chained_defects_in_never_tested_workflows`,
# scheduled drift detection is the second half of the gate.
#
# Phase contract (RFC internal#219 §1 ladder)
# -------------------------------------------
# Lands at `continue-on-error: true` (Phase 3 — surface broken shapes
# without blocking). The pre-existing `continue-on-error: true`
# directives on `main` will all violate this lint at first
# (intentional — they're the masked defects this lint exists to
# surface). Each must be triaged: file a fresh tracker comment,
# close-and-flip, or document the deliberate keep-mask in a fresh
# 14-day-renewable tracker. After main is clean for 3 days,
# follow-up PR flips this workflow's continue-on-error to false.
# Tracking: mc#774.
#
# Cross-links
# -----------
# - mc#774 (the RFC that specs this lint)
# - mc#774 (the empirical masked-3-weeks case)
# - feedback_chained_defects_in_never_tested_workflows
# - feedback_behavior_based_ast_gates
# - feedback_strict_root_only_after_class_a
#
# Auth: DRIFT_BOT_TOKEN — same persona used by ci-required-drift.yml
# (provisioned under internal#329). Auto-injected GITHUB_TOKEN is
# insufficient because `internal#NNN` references cross repositories
# (molecule-core → molecule-ai/internal).
on:
pull_request:
types: [opened, synchronize, reopened]
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint_continue_on_error_tracking.py'
- 'tests/test_lint_continue_on_error_tracking.py'
push:
branches: [main, staging]
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint_continue_on_error_tracking.py'
schedule:
# Daily at 13:11 UTC — off-peak, prime-staggered from the other
# Tier-2 lint schedules (ci-required-drift runs hourly :00).
- cron: '11 13 * * *'
workflow_dispatch:
env:
GITHUB_SERVER_URL: https://git.moleculesai.app
permissions:
contents: read
concurrency:
group: lint-coe-tracking-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
lint:
name: lint-continue-on-error-tracking
runs-on: ubuntu-latest
timeout-minutes: 10
# Phase 3 (RFC #219 §1): surface masked defects without blocking
# PRs. Pre-existing continue-on-error: true directives on main
# all violate this lint at first — intentional. Flip to false
# follow-up after main is clean for 3 days. mc#774.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true # mc#774 Phase 3 mask — 14d forced-renewal cadence
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
- name: Install PyYAML
run: python -m pip install --quiet 'PyYAML==6.0.2'
- name: Run lint-continue-on-error-tracking
env:
GITEA_TOKEN: ${{ secrets.DRIFT_BOT_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
INTERNAL_REPO: molecule-ai/internal
WORKFLOWS_DIR: .gitea/workflows
MAX_AGE_DAYS: '14'
run: python3 .gitea/scripts/lint_continue_on_error_tracking.py
- name: Run lint-continue-on-error-tracking unit tests
run: |
python -m pip install --quiet pytest
python3 -m pytest tests/test_lint_continue_on_error_tracking.py -v

View File

@ -30,16 +30,10 @@ name: Lint curl status-code capture
on:
pull_request:
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint-curl-status-capture.py'
- 'tests/test_lint_curl_status_capture.py'
paths: ['.gitea/workflows/**']
push:
branches: [main, staging]
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint-curl-status-capture.py'
- 'tests/test_lint_curl_status_capture.py'
paths: ['.gitea/workflows/**']
env:
GITHUB_SERVER_URL: https://git.moleculesai.app
@ -51,10 +45,60 @@ jobs:
# Phase 3 (RFC #219 §1): surface broken workflows without blocking
# the PR. Follow-up PR flips this off after surfaced defects are
# triaged.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Find curl ... -w '%{http_code}' ... || echo "000" subshells
run: |
python3 .gitea/scripts/lint-curl-status-capture.py
set -uo pipefail
# Multi-line aware: look for `$(curl ... -w '%{http_code}' ... || echo "000")`
# subshell where the entire command-substitution wraps a curl that
# ends with `|| echo "000"`. Must distinguish from the SAFE shape
# `$(cat tempfile 2>/dev/null || echo "000")` — `cat` with a missing
# tempfile produces empty stdout, no pollution.
python3 <<'PY'
import os, re, sys, glob
BAD_FILES = []
# Match the buggy substitution across newlines: $(curl ... -w '%{http_code}' ... || echo "000")
# The `\\n` is the bash line-continuation that lets curl flags span lines.
# We collapse continuation lines first, then look for the single-line bad pattern.
PATTERN = re.compile(
r'\$\(\s*curl\b[^)]*-w\s*[\'"]%\{http_code\}[\'"][^)]*\|\|\s*echo\s+"000"\s*\)',
re.DOTALL,
)
# Self-skip: this lint workflow contains the literal anti-pattern in
# its own docstring — that's intentional, not a bug.
SELF = ".gitea/workflows/lint-curl-status-capture.yml"
for f in sorted(glob.glob(".gitea/workflows/*.yml")):
if f == SELF:
continue
with open(f) as fh:
content = fh.read()
# Collapse bash line-continuations (\\\n + leading whitespace)
# into a single logical line so the regex can see the full
# curl invocation as one chunk.
flat = re.sub(r'\\\s*\n\s*', ' ', content)
for m in PATTERN.finditer(flat):
BAD_FILES.append((f, m.group(0)[:120]))
if not BAD_FILES:
print("OK No curl-status-capture pollution patterns detected")
sys.exit(0)
print(f"::error::Found {len(BAD_FILES)} curl-status-capture pollution site(s):")
for f, snippet in BAD_FILES:
print(f"::error file={f}::Curl status-capture pollution: '|| echo \"000\"' inside a $(curl ... -w '%{{http_code}}' ...) subshell. On non-2xx or connection failure, curl's -w writes a status, then exits non-zero, then the || echo appends another '000' — producing 'HTTP 000000' or '409000' that fails comparisons silently. Fix: route -w into a tempfile so the exit code can't pollute stdout. See memory feedback_curl_status_capture_pollution.md.")
print(f" matched: {snippet}...")
print()
print("Fix template:")
print(' set +e')
print(' curl ... -w \'%{http_code}\' >code.txt 2>/dev/null')
print(' set -e')
print(' HTTP_CODE=$(cat code.txt 2>/dev/null)')
print(' [ -z "$HTTP_CODE" ] && HTTP_CODE="000"')
sys.exit(1)
PY

View File

@ -1,133 +0,0 @@
name: lint-mask-pr-atomicity
# Tier 2d hard-gate lint (per mc#774) — blocks PRs that touch
# `.gitea/workflows/ci.yml` and modify ONLY ONE of {continue-on-error,
# all-required.sentinel.needs} without a `Paired: #NNN` reference in
# the PR body or in a commit message.
#
# Why this exists
# ---------------
# PR#665 (interim `continue-on-error: true` on `platform-build`) and
# PR#668 (sentinel-`needs` demotion of the same job) were designed as a
# pair but merged solo — #665 landed at 04:47Z 2026-05-12, #668 was
# still open at 05:07Z when the main-red watchdog (#674) fired. Result:
# ~20 minutes of `main` red and a cascade of false-positives on
# unrelated PRs. This lint structurally prevents that class.
#
# How the gate works
# ------------------
# 1. The workflow runs on every PR whose diff touches ci.yml (paths
# filter). It is NOT a required check on `main` because the rule is
# diff-based — running it on PRs that don't touch ci.yml would
# produce a `pending` status forever (per
# `feedback_path_filtered_workflow_cant_be_required`).
# 2. The script reads `BASE_SHA:ci.yml` and `HEAD_SHA:ci.yml`, parses
# both via PyYAML AST (per `feedback_behavior_based_ast_gates` — no
# grep, no regex on the raw text — so a YAML-shape refactor still
# detects).
# 3. Walks `jobs.*.continue-on-error` on each side; flags any value
# diff. Reads `jobs.all-required.needs` on each side; flags any
# set diff (order-insensitive — `needs:` is engine-unordered).
# 4. If both predicates fired → atomic, OK. If neither → no risk, OK.
# If exactly one fired → require `Paired: #NNN` in PR body OR in
# any commit message between base..head; else fail.
#
# Phase contract (RFC internal#219 §1 ladder)
# -------------------------------------------
# This workflow lands at `continue-on-error: true` (Phase 3 — surface
# regressions without blocking PRs while the rule beds in).
# Follow-up PR flips to `false` once we have ≥3 days of clean runs on
# `main` and no false-positives. Tracking issue: mc#774.
#
# Cross-links
# -----------
# - mc#774 (the RFC that specs this lint)
# - PR#665 / PR#668 (the empirical split-pair)
# - mc#774 (the main-red incident the split caused)
# - feedback_strict_root_only_after_class_a
# - feedback_behavior_based_ast_gates
#
# Auth: only needs the auto-injected GITHUB_TOKEN (read-only, repo
# scope). No DRIFT_BOT_TOKEN needed — Tier 2d does NOT call
# branch_protections (Tier 2g/f do).
on:
pull_request:
types: [opened, synchronize, reopened, edited]
# `edited` is included because the rule depends on PR_BODY: a user
# may add `Paired: #NNN` after first push to satisfy the lint. The
# rerun on `edited` lets the PR turn green without an empty
# commit. Gitea 1.22.6 fires `edited` on body changes — verified
# via gitea-source/models/issues/pull_list.go::triggerNewPRWebhook.
paths:
- '.gitea/workflows/ci.yml'
- '.gitea/scripts/lint_mask_pr_atomicity.py'
- '.gitea/workflows/lint-mask-pr-atomicity.yml'
- 'tests/test_lint_mask_pr_atomicity.py'
env:
# Belt-and-suspenders against the runner-default trap
# (feedback_act_runner_github_server_url). Runners are configured
# with this env via /opt/molecule/runners/config.yaml, but pinning
# at the workflow level protects against a runner regenerated
# without the config file.
GITHUB_SERVER_URL: https://git.moleculesai.app
permissions:
contents: read
pull-requests: read
# Per-PR concurrency — re-pushes cancel previous runs to keep the
# queue short. The lint is cheap (one git show + log + a YAML parse).
concurrency:
group: lint-mask-pr-atomicity-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
scan:
name: lint-mask-pr-atomicity
runs-on: ubuntu-latest
timeout-minutes: 5
# Phase 3 (RFC #219 §1): surface broken shapes without blocking
# PRs. Follow-up PR flips this to `false` once recent runs on main
# are confirmed clean (eat-our-own-dogfood discipline mirrors
# PR#673's same-shape comment). Tracking: mc#774.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- name: Check out PR head with full history (need base SHA blobs)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# `git show <base-sha>:<path>` needs the base SHA's blobs.
# Shallow=1 would miss it. Same rationale as PR#673 and
# check-migration-collisions.yml.
fetch-depth: 0
- name: Set up Python (PyYAML for AST parsing)
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
- name: Install PyYAML
# Same pin as ci-required-drift.yml + the rest of the Tier 2
# lint family — keep runner-cache hits uniform.
run: python -m pip install --quiet 'PyYAML==6.0.2'
- name: Ensure base ref is reachable locally
# fetch-depth=0 usually pulls the base too, but explicit-fetch
# is cheap insurance against runner-version drift (matches the
# comment in check-migration-collisions.yml and PR#673).
run: |
git fetch origin "${{ github.event.pull_request.base.ref }}" || true
- name: Run lint-mask-pr-atomicity
env:
BASE_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
# PR body — the script greps for `Paired: #NNN`.
PR_BODY: ${{ github.event.pull_request.body }}
CI_WORKFLOW_PATH: .gitea/workflows/ci.yml
SENTINEL_JOB_KEY: all-required
run: python3 .gitea/scripts/lint_mask_pr_atomicity.py
- name: Run lint-mask-pr-atomicity unit tests
# Run the test suite in-CI so the lint's own behaviour is
# verified on every change. Matches lint-workflow-yaml.yml.
run: |
python -m pip install --quiet pytest
python3 -m pytest tests/test_lint_mask_pr_atomicity.py -v

View File

@ -1,141 +0,0 @@
name: Lint pre-flip continue-on-error
# Pre-merge gate: blocks PRs that flip `continue-on-error: true → false`
# on any job in `.gitea/workflows/*.yml` WITHOUT proof that the affected
# job's recent runs on the target branch (PR base) are actually green.
#
# Empirical class: PR #656 / mc#774. PR #656 (RFC internal#219 Phase 4)
# flipped 5 platform-build-class jobs `continue-on-error: true → false`
# on the basis of a "verified green on main via combined-status check".
# But that "green" was the LIE the prior `continue-on-error: true`
# produced: Gitea Quirk #10 (internal#342 + dup #287) — a failed step
# inside a `continue-on-error: true` job rolls up to a `success`
# job-level status. The precondition the PR claimed to verify was
# structurally fooled by the bug being flipped.
#
# mc#774 captured the surfaced defects (2 mutually-masked regressions):
# - Class 1: sqlmock helper drift since 2f36bb9a (24 days old)
# - Class 2: OFFSEC-001 contract collision since 7d1a189f (1 day old)
#
# Codified 04:35Z as hongming-pc2 charter §SOP-N rule (e)
# "run-log-grep-before-flip" — now structurally enforced here at PR
# time, ahead of merge.
#
# How the gate works:
# 1. Read every `.gitea/workflows/*.yml` at the PR base SHA AND at
# the PR head SHA via `git show <sha>:<path>` (no checkout
# needed).
# 2. Parse both sides via PyYAML AST (NOT grep — per
# `feedback_behavior_based_ast_gates`). Walk `jobs.<key>.
# continue-on-error` on each side. A flip is base=true,
# head=false.
# 3. For each flipped job, render the commit-status context as
# `"{workflow.name} / {job.name or job.key} (push)"` — that's
# how Gitea Actions emits the per-context status on `main`/
# `staging` runs.
# 4. Pull last 5 commits on the PR base branch, fetch combined
# commit-status per commit, scan for the target context. For
# each match, fetch the run log via the web-UI route
# `{server_url}/{repo}/actions/runs/{run_id}/jobs/{job_idx}/logs`
# (per `reference_gitea_actions_log_fetch` —
# Gitea 1.22.6 lacks REST `/actions/runs/*`; web-UI is the
# only working path, see also
# `reference_gitea_1_22_6_lacks_rest_rerun_endpoints`).
# 5. Grep each log for `--- FAIL`, `FAIL\s`, `::error::`. If
# the status is `success` but the log shows any of these,
# the job was masked. Block the PR with `::error::`.
#
# Graceful-degrade contract (per task halt-conditions):
# - Log fetch 404 (act_runner pruned the log, transient outage):
# emit `::warning::` "log unavailable" — does NOT block.
# - Zero recent runs of the flipped job's context on the base
# branch (newly added workflow): emit `::warning::` "no run
# history to verify" — allow the flip. Chicken-and-egg
# exemption.
# - YAML parse error in one of the workflow files: warn-only,
# don't block — the YAML lint workflows catch this separately.
#
# Cross-links: PR#656, mc#774, PR#665 (interim re-mask),
# Quirk #10 (internal#342 + dup #287), hongming-pc2 charter
# §SOP-N rule (e), feedback_strict_root_only_after_class_a,
# feedback_no_shared_persona_token_use.
#
# Phase contract (RFC internal#219 §1 ladder):
# - This workflow lands at `continue-on-error: true` (Phase 3 —
# surface defects without blocking). Follow-up PR flips it to
# `false` ONLY after this workflow's own recent runs on `main`
# are confirmed clean — exactly the discipline the workflow
# itself enforces. Eat your own dogfood.
on:
pull_request:
types: [opened, synchronize, reopened]
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint_pre_flip_continue_on_error.py'
- '.gitea/workflows/lint-pre-flip-continue-on-error.yml'
env:
# Per `feedback_act_runner_github_server_url` — without this,
# actions/checkout and friends default to github.com → break.
GITHUB_SERVER_URL: https://git.moleculesai.app
permissions:
contents: read
# Need read on the API to pull combined commit-status + commit list
# for the base branch. The job-log fetch uses the same token via
# the web-UI route (Gitea 1.22.6 accepts `Authorization: token ...`
# there).
pull-requests: read
concurrency:
group: lint-pre-flip-coe-${{ github.event.pull_request.head.sha || github.sha }}
cancel-in-progress: true
jobs:
scan:
name: Verify continue-on-error flips have run-log proof
runs-on: ubuntu-latest
timeout-minutes: 8
# Phase 3 (RFC internal#219 §1): surface broken flips without blocking
# the PR yet. Follow-up flips this to `false` once the workflow itself
# has clean recent runs on main. mc#774 interim — remove when CoE→false.
continue-on-error: true # mc#774
steps:
- name: Check out PR head (full history for base-SHA access)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# `git show <base-sha>:<path>` needs the base SHA's blobs.
# Shallow=1 would miss it. Same rationale as
# check-migration-collisions.yml.
fetch-depth: 0
- name: Set up Python (PyYAML for AST parsing)
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
- name: Install PyYAML
# Same pin as ci-required-drift.yml — keep dependencies
# uniform so a Gitea runner cache hits across both jobs.
run: python -m pip install --quiet 'PyYAML==6.0.2'
- name: Ensure base ref is reachable locally
# `actions/checkout@v6 fetch-depth=0` usually pulls the base
# too, but explicit-fetch is cheap insurance against the
# form-of-ref differences across Gitea runner versions
# (mirrors the comment in check-migration-collisions.yml).
run: |
git fetch origin "${{ github.event.pull_request.base.ref }}" || true
- name: Run lint
env:
# Auto-injected by Gitea Actions; sufficient scope for
# combined-status + commit-list + log fetch via web-UI
# route. NO repo-admin needed (unlike the
# branch_protections endpoint).
GITEA_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
BASE_REF: ${{ github.event.pull_request.base.ref }}
BASE_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
# Last 5 commits on the base branch is the spec default.
RECENT_COMMITS_N: '5'
run: python3 .gitea/scripts/lint_pre_flip_continue_on_error.py

View File

@ -1,118 +0,0 @@
name: lint-required-context-exists-in-bp
# Tier 2g hard-gate lint (per mc#774) — diff-based PR-time
# check. When a PR adds a NEW commit-status emission (workflow YAML
# `name:` + job `name:`-or-key + on:-event), the workflow file must
# carry one of three directives adjacent to the new job:
#
# - `# bp-required: yes` — and BP must list the context
# - `# bp-required: pending #NNN` — acknowledged asymmetry + tracker
# - `# bp-exempt: <reason>` — informational job, not a gate
#
# Default (no directive on a new emitter) = FAIL.
#
# Why this exists
# ---------------
# PR#656 added `CI / all-required (pull_request)` as a sentinel
# context that workflows emit, but BP did NOT list it. When
# platform-build failed, all-required failed, but BP let the PR
# merge anyway → cascade to mc#774. With this lint, PR#656 would
# have been blocked until either the BP PATCH ran alongside OR
# the author added a `bp-required: pending` directive.
#
# Tier 2g vs Tier 2f
# ------------------
# Tier 2g runs at PR-time (diff-based) and BLOCKS the merge.
# Tier 2f runs daily (scheduled) and FILES a drift issue. They
# share the workflow-context enumeration helpers
# (`_event_map`, `workflow_contexts`, `_job_display`) but the
# semantics are intentionally distinct so they're separate scripts.
# Co-design is documented in mc#774.
#
# Directive comment lives in the workflow file (NOT PR body)
# ----------------------------------------------------------
# A PR-body claim of "BP exempt" evaporates on merge — the
# asymmetry returns to undetected state and Tier 2f's daily
# scheduled audit can't see it. The directive must live with the
# emitter so both PR-time (Tier 2g) and post-merge (Tier 2f)
# readers consume the same source.
#
# Phase contract (RFC internal#219 §1 ladder)
# -------------------------------------------
# Lands at `continue-on-error: true` (Phase 3 — surface the
# pattern without blocking PRs while the directive convention
# beds in). After 7 days of clean runs on `main` with no false
# positives, follow-up flips to `false`. Tracking: mc#774.
#
# Cross-links
# -----------
# - mc#774 (the RFC that specs this lint)
# - PR#656 (the empirical case)
# - mc#774 (the surfaced cascade)
# - feedback_phantom_required_check_after_gitea_migration (Tier 2f cousin)
# - feedback_behavior_based_ast_gates
#
# Auth: DRIFT_BOT_TOKEN (repo-admin for branch_protections read).
on:
pull_request:
types: [opened, synchronize, reopened]
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint_required_context_exists_in_bp.py'
- '.gitea/workflows/lint-required-context-exists-in-bp.yml'
- 'tests/test_lint_required_context_exists_in_bp.py'
env:
GITHUB_SERVER_URL: https://git.moleculesai.app
permissions:
contents: read
concurrency:
group: lint-required-context-exists-in-bp-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
# bp-exempt: this lint is a PR-time advisory and is not intended to
# be a required gate on main. The directive eat-our-own-dogfood
# confirms the convention works on the lint that defines it.
lint:
name: lint-required-context-exists-in-bp
runs-on: ubuntu-latest
timeout-minutes: 5
# Phase 3 (RFC #219 §1): surface the pattern without blocking PRs
# while the directive convention beds in. Follow-up flip to false
# after 7 clean days on main. mc#774.
continue-on-error: true # mc#774 Phase 3 — flip to false after 7 clean main runs
steps:
- name: Check out PR head with full history (need base SHA blobs)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# `git show <base-sha>:<path>` needs the base SHA's blobs.
# Same rationale as PR#673 and check-migration-collisions.yml.
fetch-depth: 0
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
- name: Install PyYAML
run: python -m pip install --quiet 'PyYAML==6.0.2'
- name: Ensure base ref is reachable locally
# Cheap insurance against runner-version drift.
run: |
git fetch origin "${{ github.event.pull_request.base.ref }}" || true
- name: Run lint-required-context-exists-in-bp
env:
# DRIFT_BOT_TOKEN — repo-admin (needed for branch_protections).
GITEA_TOKEN: ${{ secrets.DRIFT_BOT_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
BRANCH: main
BASE_SHA: ${{ github.event.pull_request.base.sha }}
HEAD_SHA: ${{ github.event.pull_request.head.sha }}
WORKFLOWS_DIR: .gitea/workflows
run: python3 .gitea/scripts/lint_required_context_exists_in_bp.py
- name: Run lint-required-context-exists-in-bp unit tests
run: |
python -m pip install --quiet pytest
python3 -m pytest tests/test_lint_required_context_exists_in_bp.py -v

View File

@ -1,96 +0,0 @@
# lint-required-no-paths — structural enforcement of
# `feedback_path_filtered_workflow_cant_be_required`.
#
# Fails the PR if ANY workflow whose status-check context appears in
# `branch_protections/main.status_check_contexts` carries a
# `paths:` or `paths-ignore:` filter in its `on:` block.
#
# Why this exists:
# A required-check workflow with a paths filter silently degrades the
# merge gate. If a PR's diff doesn't touch the filter, the workflow
# never fires; Gitea (1.22.6) reports the required context as
# `pending` (NOT `skipped == success`), so the PR cannot merge. For a
# docs-only PR against `paths: ['**.go']`, the PR is wedged forever.
#
# Previously prevented only by reviewer vigilance + the saved memory
# `feedback_path_filtered_workflow_cant_be_required`. This workflow
# makes it a hard CI gate.
#
# Forward-compat scope:
# Today (2026-05-11) molecule-core/main protects 3 contexts:
# - "Secret scan / Scan diff for credential-shaped strings (pull_request)"
# - "sop-tier-check / tier-check (pull_request)"
# - "CI / all-required (pull_request)"
# Per RFC#324 Step 2 the required-list expands to ~5 contexts
# (qa-review, security-review added). Each new required context's
# workflow must remain unconditional. This lint pins that contract.
#
# Meta-required-check:
# This workflow ITSELF deliberately has NO `paths:` filter on its `on:`
# block — otherwise a paths-non-matching PR could bypass the check.
# Self-evident from this file: only `pull_request` types + no paths.
#
# Auth:
# `GET /repos/.../branch_protections/{branch}` requires repo-admin
# role in Gitea 1.22.6. The workflow-default `GITHUB_TOKEN` is
# non-admin (read-only), so we re-use `DRIFT_BOT_TOKEN` (same persona
# that powers `ci-required-drift.yml` — verified working there).
# If `DRIFT_BOT_TOKEN` becomes unavailable, the script exits 0 with a
# loud `::error::` rather than red-X every PR — token-scope issues
# should be fixed at the token, not surfaced as a gate failure on
# every unrelated PR.
#
# Behavior-based gate per `feedback_behavior_based_ast_gates`:
# YAML AST walk (PyYAML), NOT grep. Workflow renames, formatting
# changes (block-scalar vs flow-style), or moving `paths:` between
# `pull_request:` and `pull_request_target:` all still detect.
#
# IMPORTANT — Gitea 1.22.6 parser quirk per
# `feedback_gitea_workflow_dispatch_inputs_unsupported`: do NOT add an
# `inputs:` block to `workflow_dispatch:` — Gitea 1.22.6 rejects the
# entire workflow as "unknown on type" and it registers for ZERO events.
name: lint-required-no-paths
on:
pull_request:
types: [opened, synchronize, reopened]
workflow_dispatch:
# Read protection + read local YAML. No writes.
permissions:
contents: read
# Only one in-flight run per PR — re-pushes cancel the previous run to
# keep the queue short. Required-list reads are cheap (one GET); the
# cancellation is just hygiene.
concurrency:
group: lint-required-no-paths-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
lint:
name: lint-required-no-paths
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Check out repo (we read the workflow YAML files locally)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up Python (PyYAML for AST parsing)
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12'
- name: Install PyYAML
run: python -m pip install --quiet 'PyYAML==6.0.2'
- name: Run lint-required-no-paths
env:
# DRIFT_BOT_TOKEN is owned by mc-drift-bot, a least-privilege
# Gitea persona with repo-admin role for branch_protections
# read. Same secret used by ci-required-drift.yml — see that
# workflow's header for provisioning trail (internal#329).
GITEA_TOKEN: ${{ secrets.DRIFT_BOT_TOKEN }}
GITEA_HOST: git.moleculesai.app
REPO: ${{ github.repository }}
BRANCH: main
WORKFLOWS_DIR: .gitea/workflows
run: python3 .gitea/scripts/lint-required-no-paths.py

View File

@ -1,76 +0,0 @@
name: Lint workflow YAML (Gitea-1.22.6-hostile shapes)
# Tier-2 hard-gate lint (RFC internal#219 §1, charter §SOP-N rule (m)).
# Catches six Gitea-1.22.6-hostile workflow-YAML shapes BEFORE they reach
# `main`. Each rule maps to a documented incident in saved memory:
#
# 1. workflow_dispatch.inputs — feedback_gitea_workflow_dispatch_inputs_unsupported
# (2026-05-11 PyPI freeze 24h)
# 2. on: workflow_run — task #81 (Gitea 1.22.6 lacks the event)
# 3. name: containing "/" — breaks status-context tokenization
# 4. cross-file name collision — status-reaper rev1 fail-loud class
# 5. cross-repo uses: org/r/p@r — feedback_gitea_cross_repo_uses_blocked
# (DEFAULT_ACTIONS_URL=github → 404)
# 6. (WARN) api.github.com refs — feedback_act_runner_github_server_url
# without workflow-level GITHUB_SERVER_URL
#
# Empirical history this hardens against:
# - status-reaper rev1 caught rule-4 (name-collision) class
# - sop-tier-refire DOA'd on rule-2 (workflow_run partial)
# - #319 bootstrap-paradox (chained-defect class, related)
# - internal#329 dispatcher race (adjacent)
# - 2026-05-11 publish-runtime: rule-1, 24h PyPI freeze
#
# Triggers:
# - pull_request: pre-merge gate — block hostile shapes before they land
# - push: post-merge regression detection — catch direct-to-main edits
#
# Per RFC internal#219 §1 contract: continue-on-error: true during the
# surface-broken-shapes phase. Follow-up PR flips off after surfaced
# defects are triaged. The push-trigger ensures we catch regressions
# even if the pull_request gate is bypassed by branch-protection drift.
on:
pull_request:
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint-workflow-yaml.py'
- 'tests/test_lint_workflow_yaml.py'
push:
branches: [main, staging]
paths:
- '.gitea/workflows/**'
- '.gitea/scripts/lint-workflow-yaml.py'
- 'tests/test_lint_workflow_yaml.py'
# Belt-and-suspenders against runner default
# (feedback_act_runner_github_server_url).
env:
GITHUB_SERVER_URL: https://git.moleculesai.app
jobs:
lint:
name: Lint workflow YAML for Gitea-1.22.6-hostile shapes
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken shapes without blocking PRs.
# Follow-up PR flips this off after the 4 existing-on-main rule-2
# (workflow_run) violations are migrated to a supported trigger.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: '3.11'
- name: Install PyYAML
run: pip install --quiet 'PyYAML>=6.0'
- name: Lint .gitea/workflows/*.yml
run: python3 .gitea/scripts/lint-workflow-yaml.py
- name: Run lint-workflow-yaml unit tests
run: |
pip install --quiet pytest
python3 -m pytest tests/test_lint_workflow_yaml.py -v

View File

@ -37,15 +37,13 @@ name: main-red-watchdog
# "unknown on type" when `workflow_dispatch.inputs.X` is present. Revisit
# when Gitea ≥ 1.23 is fleet-wide.
on:
# SCHEDULE RE-ENABLED 2026-05-12 rev3 — interim disable (mc#645) reverted alongside
# status-reaper rev3 (widen-window). Job-level timeout-minutes raised 5 → 15 below
# to absorb runner-saturation latency without spurious cancels (the original cascade
# cause). If runner-saturation root persists, the dedicated-runner-label split
# remains the structural next step (tracked separately).
schedule:
# Hourly at :05 — task spec calls for "off-zero" (`5 * * * *`),
# offset from :17 (ci-required-drift) and :00 (peak cron load).
- cron: '5 * * * *'
# SCHEDULE DISABLED 2026-05-12 — interim per RFC#420 Option-C machinery-down emergency
# Watchdog timing out behind runner saturation; rev3+dedicated-runner-label in flight
# Re-enable after rev3 lands + runner saturation root resolved
# schedule:
# # Hourly at :05 — task spec calls for "off-zero" (`5 * * * *`),
# # offset from :17 (ci-required-drift) and :00 (peak cron load).
# - cron: '5 * * * *'
workflow_dispatch:
# Read commit status + branch ref + issues; write issues (open/PATCH/close).
@ -63,12 +61,7 @@ concurrency:
jobs:
watchdog:
runs-on: ubuntu-latest
# rev3 (2026-05-12, mc#645 revert): raised 5 → 15 to absorb runner-saturation
# latency. Original 5min cap was producing 124-style cancels under load,
# which fed the very `[main-red]` issues this workflow files (self-poisoning).
# 15min is still well below Gitea-default 6h job ceiling; if a real hang
# occurs the issue-file path is still the alarm surface.
timeout-minutes: 15
timeout-minutes: 5
steps:
- name: Check out repo (script lives at .gitea/scripts/)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View File

@ -9,12 +9,18 @@ name: publish-canvas-image
# - Workflow-level env.GITHUB_SERVER_URL pinned per
# feedback_act_runner_github_server_url.
# - `continue-on-error: true` on each job (RFC §1 contract).
# - Retargeted the image push from GHCR to ECR. GHCR was retired during
# the 2026-05-06 Gitea migration, and Gitea's GITHUB_TOKEN cannot
# authenticate to ghcr.io.
# - **Open question for review**: this workflow pushes the canvas
# image to `ghcr.io`. GHCR was retired during the 2026-05-06
# Gitea migration in favor of ECR (per staging-verify.yml header
# notes). The image may not be consumable post-migration. Two
# options for follow-up: (a) retarget to
# `153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/canvas`,
# or (b) retire this workflow entirely and route canvas deploys
# via the operator-host build path. tier:low + continue-on-error
# means failed pushes do not block PRs.
#
# Builds and pushes the canvas Docker image to ECR whenever a commit lands
# Builds and pushes the canvas Docker image to GHCR whenever a commit lands
# on main that touches canvas code. Previously canvas changes were visible in
# CI (npm run build passed) but the live container was never updated —
# operators had to manually run `docker compose build canvas` each time.
@ -39,10 +45,10 @@ on:
permissions:
contents: read
packages: write
packages: write # required to push to ghcr.io/${{ github.repository_owner }}/*
env:
IMAGE_NAME: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/canvas
IMAGE_NAME: ghcr.io/molecule-ai/canvas
GITHUB_SERVER_URL: https://git.moleculesai.app
jobs:
@ -56,43 +62,21 @@ jobs:
# See issue #576 + infra-lead pulse ~00:30Z.
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Log in to ECR
env:
IMAGE_NAME: ${{ env.IMAGE_NAME }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2
run: |
set -euo pipefail
ECR_REGISTRY="${IMAGE_NAME%%/*}"
aws ecr get-login-password --region us-east-2 | \
docker login --username AWS --password-stdin "${ECR_REGISTRY}"
- name: Log in to GHCR
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0
- name: Ensure ECR repository exists
env:
IMAGE_NAME: ${{ env.IMAGE_NAME }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: us-east-2
run: |
set -euo pipefail
repo_path="${IMAGE_NAME#*/}"
if ! aws ecr describe-repositories --repository-names "${repo_path}" --region us-east-2 >/dev/null 2>&1; then
aws ecr create-repository \
--repository-name "${repo_path}" \
--image-scanning-configuration scanOnPush=true \
--region us-east-2 >/dev/null
fi
# Health check: verify Docker daemon is accessible before attempting any
# build steps. This fails loudly at step 1 when the runner's docker.sock
# is inaccessible rather than silently continuing to the build step
@ -102,14 +86,12 @@ jobs:
set -euo pipefail
echo "::group::Docker daemon health check"
echo "Runner: ${HOSTNAME:-unknown}"
docker_info="$(docker info 2>&1)" || {
docker info 2>&1 | head -5 || {
echo "::error::Docker daemon is not accessible at /var/run/docker.sock"
echo "::error::Runner: ${HOSTNAME:-unknown}"
printf '%s\n' "${docker_info}"
echo "::error::Check: (1) daemon running, (2) runner user in docker group, (3) sock perms 660+"
exit 1
}
printf '%s\n' "${docker_info}" | sed -n '1,5p'
echo "Docker daemon OK"
echo "::endgroup::"
@ -143,7 +125,7 @@ jobs:
echo "platform_url=${PLATFORM_URL}" >> "$GITHUB_OUTPUT"
echo "ws_url=${WS_URL}" >> "$GITHUB_OUTPUT"
- name: Build & push canvas image to ECR
- name: Build & push canvas image to GHCR
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
context: ./canvas
@ -156,10 +138,9 @@ jobs:
tags: |
${{ env.IMAGE_NAME }}:latest
${{ env.IMAGE_NAME }}:sha-${{ steps.tags.outputs.sha }}
# Gitea artifact-cache reachability is best-effort on the operator
# runner network. Do not let cache export fail an image that already
# built and pushed successfully.
cache-from: type=gha
cache-to: type=gha,mode=max
labels: |
org.opencontainers.image.source=https://git.moleculesai.app/${{ github.repository }}
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.description=Molecule AI canvas (Next.js 15 + React Flow)

View File

@ -55,7 +55,6 @@ jobs:
# The actual bump work happens on the main/staging push after merge.
pr-validate:
runs-on: ubuntu-latest
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true # do not block PR merge on operational failures
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View File

@ -20,12 +20,6 @@ name: publish-workspace-server-image
#
# ECR target: 153263036946.dkr.ecr.us-east-2.amazonaws.com/molecule-ai/*
# Required secrets: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AUTO_SYNC_TOKEN
#
# mc#711: Docker daemon not accessible on ubuntu-latest runner (molecule-canonical-1
# shows client-only in `docker info` — daemon not running). DinD mount is present but
# daemon doesn't respond. Fix: add diagnostic step showing socket info so ops can
# identify which runners have a live daemon. If no daemon is available, the job
# fails fast with actionable output rather than silent deep failure.
on:
push:
@ -58,25 +52,36 @@ env:
jobs:
build-and-push:
# REVERTED (infra/revert-docker-runner-label): `runs-on: ubuntu-latest` restored.
# The `docker` label is not registered on any act_runner. `runs-on: [ubuntu-latest, docker]`
# causes jobs to queue indefinitely with zero eligible runners — strictly worse than the
# pre-#599 coin-flip (50% success rate). Once the `docker` label is registered on
# ≥2 runners, re-apply the fix from #599 (infra/docker-runner-label).
# See issue #576 + infra-lead pulse ~00:30Z.
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Diagnose Docker daemon access
# Health check: verify Docker daemon is accessible before attempting any
# build steps. This fails loudly at step 1 when the runner's docker.sock
# is inaccessible (e.g. permission change, daemon restart, or group-membership
# drift) rather than silently continuing to step 2 where `docker build`
# fails deep in the process with a cryptic ECR auth error that doesn't
# surface the root cause. Also reports the daemon version so operator
# can correlate with runner host logs.
- name: Verify Docker daemon access
run: |
set -euo pipefail
echo "::group::Docker daemon diagnosis"
echo "::group::Docker daemon health check"
echo "Runner: ${HOSTNAME:-unknown}"
echo "--- Socket info ---"
ls -la /var/run/docker.sock 2>/dev/null || echo "/var/run/docker.sock: not found"
stat /var/run/docker.sock 2>/dev/null || true
echo "--- User info ---"
id
echo "--- docker version ---"
docker version 2>&1 || true
echo "--- docker info (full) ---"
docker info 2>&1 || echo "docker info failed: exit $?"
docker info 2>&1 | head -5 || {
echo "::error::Docker daemon is not accessible at /var/run/docker.sock"
echo "::error::Runner: ${HOSTNAME:-unknown}"
echo "::error::Check: (1) daemon is running, (2) runner user is in docker group, (3) sock permissions are 660+"
exit 1
}
echo "Docker daemon OK"
echo "::endgroup::"
# Pre-clone manifest deps before docker build.
@ -95,6 +100,9 @@ jobs:
MOLECULE_GITEA_TOKEN: ${{ secrets.AUTO_SYNC_TOKEN }}
run: |
set -euo pipefail
# clone-manifest.sh supports anonymous cloning for public repos (post-
# 2026-05-08 migration). The token is only needed for private repos.
# Do NOT require it — a missing secret would fail the build unnecessarily.
mkdir -p .tenant-bundle-deps
# Strip JSON5 comments before jq parsing — Integration Tester appends
# `// Triggered by ...` which breaks `jq` in clone-manifest.sh.

View File

@ -51,7 +51,6 @@ jobs:
name: Audit Railway env vars for drift-prone pins
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 10

View File

@ -9,11 +9,12 @@ name: redeploy-tenants-on-main
# - Workflow-level env.GITHUB_SERVER_URL pinned per
# feedback_act_runner_github_server_url.
# - `continue-on-error: true` on each job (RFC §1 contract).
# - ~~**Gitea workflow_run trigger limitation**~~ FIXED: replaced with
# push+paths filter per this PR. Gitea 1.22.6 does not support
# `workflow_run` (task #81). The push trigger fires on every
# commit to publish-workspace-server-image.yml which is the
# same signal (only successful runs commit to main).
# - **Gitea workflow_run trigger limitation**: Gitea 1.22.6's support
# for the `workflow_run` event is partial. If this never fires on a
# real publish-workspace-server-image completion, the follow-up
# triage PR should replace the trigger with a push-with-paths-filter
# on .gitea/workflows/publish-workspace-server-image.yml. Until
# then continue-on-error+dead-workflow doesn't break anything.
#
# Auto-refresh prod tenant EC2s after every main merge.
@ -49,11 +50,10 @@ name: redeploy-tenants-on-main
# target_tag=<sha>, re-pulling the older image on every tenant.
on:
push:
workflow_run:
workflows: ['publish-workspace-server-image']
types: [completed]
branches: [main]
paths:
- '.gitea/workflows/publish-workspace-server-image.yml'
workflow_dispatch:
permissions:
contents: read
# No write scopes needed — the workflow hits an external CP endpoint,
@ -86,7 +86,6 @@ jobs:
if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 25
steps:

View File

@ -9,13 +9,12 @@ name: redeploy-tenants-on-staging
# - Workflow-level env.GITHUB_SERVER_URL pinned per
# feedback_act_runner_github_server_url.
# - `continue-on-error: true` on each job (RFC §1 contract).
# - ~~**Gitea workflow_run trigger limitation**~~ FIXED: replaced with
# push+paths filter per this PR. Gitea 1.22.6 does not support
# `workflow_run` (task #81). The push trigger fires on every
# commit to publish-workspace-server-image.yml which is the
# same signal (only successful runs commit to main). Removed
# `workflow_run.conclusion==success` job if since push implies
# the workflow completed and committed.
# - **Gitea workflow_run trigger limitation**: Gitea 1.22.6's support
# for the `workflow_run` event is partial. If this never fires on a
# real publish-workspace-server-image completion, the follow-up
# triage PR should replace the trigger with a push-with-paths-filter
# on .gitea/workflows/publish-workspace-server-image.yml. Until
# then continue-on-error+dead-workflow doesn't break anything.
#
# Auto-refresh staging tenant EC2s after every staging-branch merge.
@ -51,11 +50,10 @@ name: redeploy-tenants-on-staging
# of a known-good build.
on:
push:
branches: [staging]
paths:
- '.gitea/workflows/publish-workspace-server-image.yml'
workflow_dispatch:
workflow_run:
workflows: ['publish-workspace-server-image']
types: [completed]
branches: [main]
permissions:
contents: read
# No write scopes needed — the workflow hits an external CP endpoint,
@ -74,9 +72,14 @@ env:
jobs:
redeploy:
# Skip the auto-trigger if publish-workspace-server-image didn't
# actually succeed. workflow_run fires on any completion state; we
# don't want to redeploy against a half-built image.
# NOTE (Gitea port): workflow_dispatch trigger dropped; only the
# workflow_run path remains.
if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 25
steps:

View File

@ -53,7 +53,6 @@ jobs:
# runners with internet access to package mirrors). Falls back to GitHub
# binary download. GitHub releases may be blocked on some runner networks
# (infra#241 follow-up).
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
run: |
if apt-get update -qq && apt-get install -y -qq jq; then

View File

@ -67,7 +67,6 @@ jobs:
# Phase 3 (RFC #219 §1): surface broken workflows without blocking
# the PR. Follow-up PR flips this off after surfaced defects are
# triaged.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View File

@ -52,7 +52,6 @@ jobs:
detect-changes:
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
outputs:
wheel: ${{ steps.decide.outputs.wheel }}
@ -97,7 +96,6 @@ jobs:
name: PR-built wheel + import smoke
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- name: No-op pass (paths filter excluded this commit)

View File

@ -57,7 +57,6 @@ jobs:
name: Detect SECRET_PATTERNS drift
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
timeout-minutes: 5
steps:

View File

@ -1,121 +0,0 @@
# sop-checklist-gate — peer-ack merge gate for SOP-checklist items.
#
# RFC#351 Step 2 of 6 (implementation MVP).
#
# === DESIGN ===
#
# Goal: each PR must answer 7 SOP-checklist questions in its body,
# and each item must have at least one /sop-ack <slug> comment from
# a non-author peer in the required team. BP requires the
# `sop-checklist / all-items-acked (pull_request)` status to merge.
#
# Triggers:
# - `pull_request_target`: opened, edited, synchronize, reopened
# → fires when PR opens, body is edited (refire — RFC#351 §4),
# or new code is pushed (head.sha changes → stale status would
# be auto-discarded by BP via dismiss_stale_reviews, but the
# status itself is per-SHA so we re-post on the new head).
# - `issue_comment`: created, edited, deleted
# → fires on any new comment so /sop-ack / /sop-revoke take
# effect immediately (Gitea 1.22.6 doesn't refire on
# pull_request_review per feedback_pull_request_review_no_refire,
# so issue_comment is the canonical refire channel).
#
# Trust boundary (mirrors RFC#324 §A4 + sop-tier-check security note):
# `pull_request_target` (not `pull_request`) — workflow def is loaded
# from BASE branch, so a PR cannot rewrite this workflow to exfiltrate
# the token. The `actions/checkout` step pins `ref: base.sha` so the
# script ALSO comes from BASE. PR-HEAD code is never executed in the
# runner.
#
# Token scope:
# - read:repository, read:organization for PR + comments + team probes
# - write:repository for POST /statuses/{sha}
# - The token owner MUST be a member of every team referenced by the
# config's required_teams (else /teams/{id}/members/{login} returns
# 403 — see review-check.sh same-gotcha doc). For the MVP we use
# the dev-lead token (a member of engineers, managers, qa, security)
# via a repo secret `SOP_CHECKLIST_GATE_TOKEN`. Provisioning of that
# secret is a follow-up authorization step (separate from this PR).
#
# Failure mode: tier-aware (RFC#351 open question 2):
# - tier:high → state=failure (hard-fail; BP blocks merge)
# - tier:medium → state=failure (hard-fail; same)
# - tier:low → state=pending (soft-fail; BP can choose to require
# this context or skip for low-tier PRs)
# - missing/no-tier → state=failure (default-mode: hard — never lower
# the bar per feedback_fix_root_not_symptom)
#
# Slash-command contract (RFC#351 v1 + §A1.1-style notes from RFC#324):
#
# /sop-ack <slug-or-numeric-alias> [optional note]
# — register a peer-ack for one checklist item.
# — slug accepts kebab-case, snake_case, or natural-spaces
# (all normalize to canonical kebab-case).
# — numeric 1..7 maps via config.items[*].numeric_alias.
# — most-recent (user, slug) directive wins.
#
# /sop-revoke <slug-or-numeric-alias> [reason]
# — invalidate the commenter's own prior /sop-ack for this slug.
# — does NOT affect other peers' acks on the same slug.
# — most-recent (user, slug) directive wins, so a later /sop-ack
# re-restores the ack.
#
# The eval is read-only + idempotent (read PR + comments + team
# membership, compute, post status). Re-running on any event is safe —
# the new status overwrites the previous one for the same context.
name: sop-checklist-gate
on:
pull_request_target:
types: [opened, edited, synchronize, reopened, labeled, unlabeled]
issue_comment:
types: [created, edited, deleted]
permissions:
contents: read
pull-requests: read
# NOTE: `statuses: write` is the GitHub-Actions name for POST /statuses.
# Gitea 1.22.6 may not gate on this permission key (it just checks the
# token), but listing it explicitly documents intent for the next
# platform-version upgrade.
statuses: write
jobs:
gate:
# Run on pull_request_target events always. On issue_comment events,
# only when the comment is on a PR (issue_comment fires for issues
# too) and the body contains one of the slash-commands.
if: |
github.event_name == 'pull_request_target' ||
(github.event_name == 'issue_comment' &&
github.event.issue.pull_request != null &&
(contains(github.event.comment.body, '/sop-ack') ||
contains(github.event.comment.body, '/sop-revoke')))
runs-on: ubuntu-latest
steps:
- name: Check out BASE ref (trust boundary — never PR-head)
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
# For pull_request_target, the default branch is the trust
# anchor. For issue_comment the PR base may differ from the
# default branch (PR targeting `staging`), so we use the
# default-branch ref explicitly — same approach as
# qa-review.yml so the script source is always trusted.
ref: ${{ github.event.repository.default_branch }}
- name: Run sop-checklist-gate
env:
GITEA_TOKEN: ${{ secrets.SOP_CHECKLIST_GATE_TOKEN || secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number || github.event.issue.number }}
OWNER: ${{ github.repository_owner }}
REPO_NAME: ${{ github.event.repository.name }}
run: |
set -euo pipefail
python3 .gitea/scripts/sop-checklist-gate.py \
--owner "$OWNER" \
--repo "$REPO_NAME" \
--pr "$PR_NUMBER" \
--config .gitea/sop-checklist-config.yaml \
--gitea-host git.moleculesai.app

View File

@ -64,8 +64,7 @@ jobs:
tier-check:
runs-on: ubuntu-latest
# BURN-IN: continue-on-error prevents AND-composition from blocking
# PRs during the 7-day window. Remove after 2026-05-17 (mc#774).
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
# PRs during the 7-day window. Remove after 2026-05-17 (internal#189).
continue-on-error: true
permissions:
contents: read
@ -90,7 +89,6 @@ jobs:
# runners). The sop-tier-check script has its own fallback as a
# third line of defense. continue-on-error: true ensures this step
# failing does not block the job.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
run: |
# apt-get is the primary method — Ubuntu package mirrors are reliably
@ -111,7 +109,6 @@ jobs:
# continue-on-error: true at step level — job-level is ignored by Gitea
# Actions (quirk #10, internal runbooks). Belt-and-suspenders with
# SOP_FAIL_OPEN=1 + || true below.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
env:
GITEA_TOKEN: ${{ secrets.SOP_TIER_CHECK_TOKEN || secrets.GITHUB_TOKEN }}

View File

@ -11,14 +11,11 @@ name: Staging verify
# - Workflow-level env.GITHUB_SERVER_URL pinned per
# feedback_act_runner_github_server_url.
# - `continue-on-error: true` on each job (RFC §1 contract).
# - ~~**Gitea workflow_run trigger limitation**~~ FIXED: replaced with
# push+paths filter per this PR. Gitea 1.22.6 does not support
# `workflow_run` (task #81). The push trigger fires on every
# commit to publish-workspace-server-image.yml. Removed the
# `workflow_run.conclusion==success` job if since the push trigger
# doesn't carry completion state — the smoke test is the safety net
# (it will detect and abort on a bad image regardless). Added
# workflow_dispatch for manual runs.
# - **Gitea workflow_run trigger limitation**: Gitea 1.22.6's support
# for the `workflow_run` event is partial. If this never fires on a
# real publish-workspace-server-image completion, the follow-up
# triage PR should replace the trigger with a push-with-paths-filter
# on the same publish workflow's path (i.e. `.gitea/workflows/publish-workspace-server-image.yml`).
#
# Runs the canary smoke suite against the staging canary tenant fleet
@ -62,11 +59,9 @@ name: Staging verify
# are populated.
on:
push:
branches: [staging]
paths:
- '.gitea/workflows/publish-workspace-server-image.yml'
workflow_dispatch:
workflow_run:
workflows: ["publish-workspace-server-image"]
types: [completed]
permissions:
contents: read
packages: write
@ -83,9 +78,12 @@ env:
jobs:
staging-smoke:
# Skip when the upstream workflow failed — no image to test against.
# workflow_dispatch trigger dropped in this Gitea port; only the
# workflow_run path remains.
if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
outputs:
sha: ${{ steps.compute.outputs.sha }}
@ -206,7 +204,6 @@ jobs:
if: ${{ needs.staging-smoke.result == 'success' && needs.staging-smoke.outputs.smoke_ran == 'true' }}
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
env:
SHA: ${{ needs.staging-smoke.outputs.sha }}

View File

@ -53,19 +53,16 @@ name: status-reaper
# `inputs:` block here. Gitea 1.22.6 rejects the whole workflow as
# "unknown on type" when `workflow_dispatch.inputs.X` is present.
on:
# SCHEDULE RE-ENABLED 2026-05-12 rev3 — interim disable (mc#645) reverted now that
# rev3 widens DEFAULT_SWEEP_LIMIT 10 → 30 (covers retroactive-failure timing window).
# Sibling watchdog re-enabled in the same PR with timeout-minutes raised 5 → 15.
schedule:
# Every 5 minutes. Off-zero alignment with sibling cron workflows:
# ci-required-drift (`:17`), main-red-watchdog (`:05`),
# railway-pin-audit (`:23`). 5-min cadence gives a tight enough
# close on schedule-triggered false-reds that main-red-watchdog
# (hourly :05) almost never files an issue on the false case.
# rev3 keeps `*/5` unchanged per hongming-pc2 03:25Z review:
# "trades window-width-cheap for cadence-loady" — N=30 widens
# the lookback cheaply without doubling runner load via `*/2`.
- cron: '*/5 * * * *'
# SCHEDULE DISABLED 2026-05-12 — interim per RFC#420 Option-C machinery-down emergency
# Reaper rev2 not compensating + watchdog timeout-cascade; rev3 in flight
# Re-enable after rev3 lands + runner saturation root resolved
# schedule:
# # Every 5 minutes. Off-zero alignment with sibling cron workflows:
# # ci-required-drift (`:17`), main-red-watchdog (`:05`),
# # railway-pin-audit (`:23`). 5-min cadence gives a tight enough
# # close on schedule-triggered false-reds that main-red-watchdog
# # (hourly :05) almost never files an issue on the false case.
# - cron: '*/5 * * * *'
workflow_dispatch:
# Compensating-status POST needs write on repo statuses; no other

View File

@ -29,26 +29,26 @@ name: Sweep stale AWS Secrets Manager secrets
# reconciler enumerator) is filed as a separate controlplane
# issue. This sweeper is the immediate cost-relief stopgap.
#
# AWS credentials: use the dedicated Secrets Manager janitor principal.
# Do not fall back to the molecule-cp application principal: it does
# not need account-wide ListSecrets, and a 2026-05-12 CI failure proved
# that using it here turns a least-privilege production credential into
# a red scheduled janitor.
# AWS credentials: the confirmed Gitea secrets are AWS_ACCESS_KEY_ID /
# AWS_SECRET_ACCESS_KEY (the molecule-cp IAM user). These are the same
# credentials used by the rest of the platform. The dedicated
# AWS_JANITOR_* naming (which the original GitHub workflow used) was
# never populated in Gitea — the existing secrets are AWS_ACCESS_KEY_ID /
# AWS_SECRET_ACCESS_KEY (per issue #425 §425 audit). These DO have
# secretsmanager:ListSecrets (the production molecule-cp principal);
# if ListSecrets is revoked in future, a dedicated janitor principal
# would need to be created and the Gitea secret names updated here.
#
# Safety: the script's MAX_DELETE_PCT gate (default 50%, mirroring
# sweep-cf-orphans.yml — tenant secrets are durable by design, unlike
# the mostly-orphan tunnels) refuses to nuke past the threshold.
on:
# Disabled as an hourly schedule until the dedicated
# AWS_SECRETS_JANITOR_* key exists in the key-management SSOT and is
# mirrored into Gitea. Falling back to the molecule-cp app principal is
# intentionally not allowed: it lacks account-wide ListSecrets, and
# granting that to an application credential would weaken least privilege.
#
# Keep the manual trigger so operators can validate the workflow immediately
# after provisioning the janitor key, then restore the hourly :30 schedule.
workflow_dispatch:
schedule:
# Hourly at :30 — offsets from sweep-cf-orphans (:15) and
# sweep-cf-tunnels (:45) so the three janitors don't burst the
# CP admin endpoints at the same minute.
- cron: '30 * * * *'
# Don't let two sweeps race the same AWS account.
concurrency:
group: sweep-aws-secrets
@ -65,7 +65,6 @@ jobs:
name: Sweep AWS Secrets Manager
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
# 30 min cap, mirroring the other janitors. AWS DeleteSecret is
# fast (~0.3s/call) so even a 100+ backlog drains in seconds
@ -74,8 +73,8 @@ jobs:
timeout-minutes: 30
env:
AWS_REGION: ${{ secrets.AWS_REGION || 'us-east-1' }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_SECRETS_JANITOR_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRETS_JANITOR_SECRET_ACCESS_KEY }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
CP_ADMIN_API_TOKEN: ${{ secrets.CP_ADMIN_API_TOKEN }}
CP_STAGING_ADMIN_API_TOKEN: ${{ secrets.CP_STAGING_ADMIN_API_TOKEN }}
MAX_DELETE_PCT: ${{ github.event.inputs.max_delete_pct || '50' }}

View File

@ -71,7 +71,6 @@ jobs:
name: Sweep CF orphans
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
# 3 min surfaces hangs (CF API stall, AWS describe-instances stuck)
# within one cron interval instead of burning a full tick. Realistic

View File

@ -55,7 +55,6 @@ jobs:
name: Sweep CF tunnels
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
# 30 min cap. Was 5 min on the theory that the only thing that
# could take >5min is a CF-API hang — but on 2026-05-02 a backlog

View File

@ -11,9 +11,8 @@ name: Ops Scripts Tests
# - `continue-on-error: true` on the job (RFC §1 contract).
#
# Runs the unittest suite for scripts/ on every PR + push that touches
# anything under scripts/ or .gitea/scripts/. Kept separate from the main CI
# so a script-only change doesn't trigger the heavier Go/Canvas/Python
# pipelines.
# anything under scripts/. Kept separate from the main CI so a script-only
# change doesn't trigger the heavier Go/Canvas/Python pipelines.
#
# Discovery layout: tests sit alongside the code they test (see
# scripts/ops/test_sweep_cf_decide.py for the pattern; scripts/
@ -28,13 +27,11 @@ on:
branches: [main, staging]
paths:
- 'scripts/**'
- '.gitea/scripts/**'
- '.gitea/workflows/test-ops-scripts.yml'
pull_request:
branches: [main, staging]
paths:
- 'scripts/**'
- '.gitea/scripts/**'
- '.gitea/workflows/test-ops-scripts.yml'
env:
@ -49,15 +46,12 @@ jobs:
name: Ops scripts (unittest)
runs-on: ubuntu-latest
# Phase 3 (RFC #219 §1): surface broken workflows without blocking.
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: '3.11'
- name: Install .gitea script test dependencies
run: python -m pip install --quiet 'pytest==9.0.2' 'PyYAML==6.0.2'
- name: Run scripts/ unittests (build_runtime_package, ...)
# Top-level scripts/ tests live alongside their target file
# (e.g. scripts/test_build_runtime_package.py exercises
@ -69,5 +63,3 @@ jobs:
- name: Run scripts/ops/ unittests (sweep_cf_decide, ...)
working-directory: scripts/ops
run: python -m unittest discover -p 'test_*.py' -v
- name: Run .gitea/scripts pytest suite
run: python -m pytest .gitea/scripts/tests -q

View File

@ -31,7 +31,6 @@ jobs:
name: Weekly Platform-Go Surface
runs-on: ubuntu-latest
# continue-on-error: surface only, never block
# mc#774: pre-existing continue-on-error mask; root-fix and remove, do not renew silently.
continue-on-error: true
defaults:
run:
@ -54,20 +53,9 @@ jobs:
- name: Build
run: go build ./cmd/server
# `go vet` is NOT `|| true`-guarded: surfacing latent vet errors on main is
# the whole point of this workflow (issue #567 — the motivating case was a
# `go vet` error in org_external.go that sat undetected on main for weeks).
# A vet error here fails the step → fails the job → shows red on the weekly
# commit. Per Gitea quirk #10 (job-level continue-on-error is ignored), that
# red surfaces on main — which is the intended signal, not a regression.
- name: go vet
run: go vet ./...
run: go vet ./... || true
# golangci-lint stays `|| true`-guarded: lint is noisier (more false-
# positives than vet) and golangci-lint may not be pre-installed on every
# runner image — a `|| true` here keeps a missing-binary or lint-noise case
# from masking the vet/test signal above. Tighten to match ci.yml's lint
# gate if/when ci.yml's lint step becomes hard-failing.
- name: golangci-lint
run: golangci-lint run --timeout 3m ./... || true

View File

@ -131,7 +131,6 @@ jobs:
- name: Install Playwright browsers
if: needs.detect-changes.outputs.canvas == 'true'
timeout-minutes: 10
run: npx playwright install --with-deps chromium
- name: Run staging canvas E2E

View File

@ -91,19 +91,16 @@ export function SearchDialog() {
if (!open) return null;
return (
<div className="fixed inset-0 z-[70] flex items-start justify-center pt-[20vh]">
{/* Backdrop — interactive dismiss area; aria-hidden so screen readers ignore it */}
<div
className="absolute inset-0 bg-black/50 backdrop-blur-sm cursor-pointer"
onClick={() => setOpen(false)}
aria-hidden="true"
/>
{/* Dialog */}
<div
className="fixed inset-0 z-[70] flex items-start justify-center pt-[20vh] bg-black/50 backdrop-blur-sm"
onClick={() => setOpen(false)}
>
<div
role="dialog"
aria-modal="true"
aria-label="Search workspaces"
className="relative z-[71] w-[420px] bg-surface/95 backdrop-blur-xl border border-line/60 rounded-2xl shadow-2xl shadow-black/50 overflow-hidden"
className="w-[420px] bg-surface/95 backdrop-blur-xl border border-line/60 rounded-2xl shadow-2xl shadow-black/50 overflow-hidden"
onClick={(e) => e.stopPropagation()}
>
{/* Search input */}
<div className="flex items-center gap-3 px-4 py-3 border-b border-line/40">

View File

@ -1,63 +0,0 @@
// @vitest-environment jsdom
/**
* Unit tests for formatAuditRelativeTime pure date formatter from AuditTrailPanel.
*/
import { describe, it, expect } from "vitest";
import { formatAuditRelativeTime } from "../AuditTrailPanel";
describe("formatAuditRelativeTime", () => {
it('returns "just now" for timestamps within the last minute', () => {
const now = 1_700_000_000_000;
const thirtySecAgo = new Date(now - 30_000).toISOString();
expect(formatAuditRelativeTime(thirtySecAgo, now)).toBe("just now");
});
it('returns "Xm ago" for timestamps within the last hour', () => {
const now = 1_700_000_000_000;
const fiveMinAgo = new Date(now - 5 * 60_000).toISOString();
expect(formatAuditRelativeTime(fiveMinAgo, now)).toBe("5m ago");
});
it('returns "Xh ago" for timestamps within the last day', () => {
const now = 1_700_000_000_000;
const threeHoursAgo = new Date(now - 3 * 3_600_000).toISOString();
expect(formatAuditRelativeTime(threeHoursAgo, now)).toBe("3h ago");
});
it("returns locale date string for timestamps older than 24h", () => {
const now = 1_700_000_000_000;
const twoDaysAgo = new Date(now - 2 * 86_400_000).toISOString();
const result = formatAuditRelativeTime(twoDaysAgo, now);
// Should be a date string (not "Xh ago" or "Xm ago")
expect(result).not.toMatch(/m ago|h ago|just now/);
expect(result).toBe(new Date(twoDaysAgo).toLocaleDateString());
});
it("handles the boundary between minute and hour correctly", () => {
const now = 1_700_000_000_000;
const exactlyOneHourAgo = new Date(now - 3_600_000).toISOString();
expect(formatAuditRelativeTime(exactlyOneHourAgo, now)).toBe("1h ago");
});
it("handles the boundary between hour and day correctly", () => {
const now = 1_700_000_000_000;
// 23h ago is < 24h so it shows "23h ago"; exactly 24h falls through to date string
const twentyThreeHoursAgo = new Date(now - 23 * 3_600_000).toISOString();
expect(formatAuditRelativeTime(twentyThreeHoursAgo, now)).toBe("23h ago");
});
it("returns locale date string for exactly 24h ago (boundary)", () => {
const now = 1_700_000_000_000;
const exactlyOneDayAgo = new Date(now - 86_400_000).toISOString();
const result = formatAuditRelativeTime(exactlyOneDayAgo, now);
// diff is exactly 86_400_000, which is NOT < 86_400_000, so it falls through
expect(result).toBe(new Date(exactlyOneDayAgo).toLocaleDateString());
});
it("future timestamps return 'just now' (negative diff < 60_000)", () => {
const now = 1_700_000_000_000;
const future = new Date(now + 60_000).toISOString();
// Negative diff passes diff < 60_000, returning "just now"
expect(formatAuditRelativeTime(future, now)).toBe("just now");
});
});

View File

@ -1,90 +0,0 @@
// @vitest-environment jsdom
/**
* Unit tests for pure helpers from MemoryInspectorPanel:
* isPluginUnavailableError, formatRelativeTime, formatTTL
*
* These are the three exported non-component functions. The component
* itself (MemoryInspectorPanel) requires full API + store mocking and
* is exercised by the existing MemoryTab.test.tsx.
*/
import { describe, it, expect } from "vitest";
import { isPluginUnavailableError, formatTTL } from "../MemoryInspectorPanel";
// formatRelativeTime is not exported — tested via the component in MemoryTab.test.tsx
describe("isPluginUnavailableError", () => {
it("returns true when Error message contains MEMORY_PLUGIN_URL", () => {
const err = new Error("memory: could not resolve MEMORY_PLUGIN_URL — plugin not configured");
expect(isPluginUnavailableError(err)).toBe(true);
});
it("returns true for Error containing MEMORY_PLUGIN_URL", () => {
expect(isPluginUnavailableError(new Error("MEMORY_PLUGIN_URL is not set"))).toBe(true);
});
it("returns false for unrelated error messages", () => {
expect(isPluginUnavailableError(new Error("workspace not found"))).toBe(false);
});
it("returns false for null", () => {
expect(isPluginUnavailableError(null)).toBe(false);
});
it("returns false for undefined", () => {
expect(isPluginUnavailableError(undefined)).toBe(false);
});
it("returns false for plain objects without message", () => {
expect(isPluginUnavailableError({ code: 503 })).toBe(false);
});
it("is case-sensitive (MEMORY_PLUGIN_URL must match exactly)", () => {
const lowerErr = new Error("memory_plugin_url missing");
const upperErr = new Error("MEMORY_PLUGIN_URL missing");
expect(isPluginUnavailableError(lowerErr)).toBe(false);
expect(isPluginUnavailableError(upperErr)).toBe(true);
});
});
describe("formatTTL", () => {
it("returns '' for null", () => {
expect(formatTTL(null)).toBe("");
});
it("returns '' for undefined", () => {
expect(formatTTL(undefined)).toBe("");
});
it('returns "expired" when expiresAt is in the past', () => {
const past = new Date(Date.now() - 60_000).toISOString();
expect(formatTTL(past)).toBe("expired");
});
it('returns "Xs" for less than a minute', () => {
const soon = new Date(Date.now() + 30_000).toISOString();
expect(formatTTL(soon)).toBe("30s");
});
it('returns "Xm" for less than an hour', () => {
const soon = new Date(Date.now() + 5 * 60_000).toISOString();
expect(formatTTL(soon)).toBe("5m");
});
it('returns "Xh" for less than a day', () => {
const soon = new Date(Date.now() + 3 * 3_600_000).toISOString();
expect(formatTTL(soon)).toBe("3h");
});
it('returns "Xd" for more than a day', () => {
const soon = new Date(Date.now() + 2 * 86_400_000).toISOString();
expect(formatTTL(soon)).toBe("2d");
});
it("returns '' for invalid date string", () => {
expect(formatTTL("not-a-date")).toBe("");
});
it("returns '' for empty string", () => {
expect(formatTTL("")).toBe("");
});
});

View File

@ -1,390 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for SidePanel general rendering and non-tab behaviors.
*
* Companion to SidePanel.tabs.test.tsx which covers tablist ARIA
* and localStorage width persistence.
*
* Covers:
* - Null when no node is selected
* - Null when selectedNodeId points to a missing node
* - Header: node name, role, tier badge
* - MetaPill capability summary pills
* - Resize handle: role=separator, aria-valuenow/min/max, aria-orientation
* - Resize handle: ArrowLeft/Right/Home/End keyboard nav
* - Needs-restart banner + Restart Now button
* - Current-task banner with pulsing dot
* - Footer shows workspace ID
* - Close button calls selectNode(null)
* - Tab switch via onClick fires setPanelTab
* - setSidePanelWidth called on mount
*/
import React from "react";
import { render, screen, fireEvent, cleanup } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { SidePanel } from "../SidePanel";
// ── Tab content stubs ───────────────────────────────────────────────────────
vi.mock("../tabs/DetailsTab", () => ({ DetailsTab: () => null }));
vi.mock("../tabs/SkillsTab", () => ({ SkillsTab: () => null }));
vi.mock("../tabs/ChatTab", () => ({ ChatTab: () => null }));
vi.mock("../tabs/ConfigTab", () => ({ ConfigTab: () => null }));
vi.mock("../tabs/TerminalTab", () => ({ TerminalTab: () => null }));
vi.mock("../tabs/FilesTab", () => ({ FilesTab: () => null }));
vi.mock("../MemoryInspectorPanel", () => ({ MemoryInspectorPanel: () => null }));
vi.mock("../tabs/TracesTab", () => ({ TracesTab: () => null }));
vi.mock("../tabs/EventsTab", () => ({ EventsTab: () => null }));
vi.mock("../tabs/ActivityTab", () => ({ ActivityTab: () => null }));
vi.mock("../tabs/ScheduleTab", () => ({ ScheduleTab: () => null }));
vi.mock("../tabs/ChannelsTab", () => ({ ChannelsTab: () => null }));
vi.mock("../AuditTrailPanel", () => ({ AuditTrailPanel: () => null }));
vi.mock("../StatusDot", () => ({ StatusDot: () => null }));
vi.mock("../Tooltip", () => ({
Tooltip: ({ children }: { children: React.ReactNode }) => <>{children}</>,
}));
vi.mock("@/components/Toaster", () => ({ showToast: vi.fn() }));
// ── Canvas store mock — mutable so each test can reconfigure ───────────────
const mockSetPanelTab = vi.fn();
const mockSelectNode = vi.fn();
const mockSetSidePanelWidth = vi.fn();
const mockRestartWorkspace = vi.fn().mockResolvedValue(undefined);
const BASE_NODE = {
id: "ws-1",
data: {
name: "Test Workspace",
status: "online" as const,
tier: 2,
role: "Engineer",
parentId: null,
needsRestart: false,
currentTask: null,
agentCard: null,
},
};
// Mutable store state — tests reassign fields to test different states
let storeState = {
selectedNodeId: "ws-1" as string | null,
panelTab: "chat",
setPanelTab: mockSetPanelTab,
selectNode: mockSelectNode,
setSidePanelWidth: mockSetSidePanelWidth,
nodes: [BASE_NODE],
restartWorkspace: mockRestartWorkspace,
};
vi.mock("@/store/canvas", () => ({
useCanvasStore: Object.assign(
vi.fn((selector: (s: typeof storeState) => unknown) => selector(storeState)),
{ getState: () => storeState }
),
summarizeWorkspaceCapabilities: () => ({ runtime: "claude-code", skillCount: 3 }),
}));
beforeEach(() => {
mockSetPanelTab.mockReset();
mockSelectNode.mockReset();
mockSetSidePanelWidth.mockReset();
mockRestartWorkspace.mockReset().mockResolvedValue(undefined);
localStorage.clear();
// Reset store state to default
storeState = {
selectedNodeId: "ws-1",
panelTab: "chat",
setPanelTab: mockSetPanelTab,
selectNode: mockSelectNode,
setSidePanelWidth: mockSetSidePanelWidth,
nodes: [BASE_NODE],
restartWorkspace: mockRestartWorkspace,
};
});
afterEach(() => {
cleanup();
});
// ─── Null guard ──────────────────────────────────────────────────────────────
describe("SidePanel — null guard", () => {
it("returns null when selectedNodeId is null", () => {
storeState.selectedNodeId = null;
const { container } = render(<SidePanel />);
expect(container.firstChild).toBeNull();
});
it("returns null when selectedNodeId does not match any node", () => {
storeState.selectedNodeId = "nonexistent-ws";
storeState.nodes = [];
const { container } = render(<SidePanel />);
expect(container.firstChild).toBeNull();
});
});
// ─── Header ─────────────────────────────────────────────────────────────────
describe("SidePanel — header", () => {
it("shows node name in heading", () => {
render(<SidePanel />);
expect(screen.getByRole("heading", { name: "Test Workspace" })).toBeTruthy();
});
it("shows node role", () => {
render(<SidePanel />);
expect(screen.getByText("Engineer")).toBeTruthy();
});
it("shows tier badge with correct value", () => {
render(<SidePanel />);
// T2 appears in header badge AND meta pill — confirm at least one
const all = screen.getAllByText("T2");
expect(all.length).toBeGreaterThanOrEqual(1);
});
it("close button is present with aria-label", () => {
render(<SidePanel />);
expect(screen.getByRole("button", { name: /close workspace panel/i })).toBeTruthy();
});
it("close button calls selectNode(null)", () => {
render(<SidePanel />);
fireEvent.click(screen.getByRole("button", { name: /close workspace panel/i }));
expect(mockSelectNode).toHaveBeenCalledWith(null);
});
});
// ─── MetaPills ─────────────────────────────────────────────────────────────
describe("SidePanel — meta pills", () => {
it("renders Tier, Runtime, Skills, and Status pills in the meta row", () => {
render(<SidePanel />);
// All four labels appear somewhere in the meta pills row
expect(screen.getByText(/tier/i)).toBeTruthy();
expect(screen.getByText(/runtime/i)).toBeTruthy();
expect(screen.getByText(/skills/i)).toBeTruthy();
expect(screen.getByText(/status/i)).toBeTruthy();
});
it("shows correct runtime value in meta pill", () => {
render(<SidePanel />);
expect(screen.getByText("claude-code")).toBeTruthy();
});
it("shows skill count in meta pill", () => {
render(<SidePanel />);
expect(screen.getByText("3")).toBeTruthy();
});
});
// ─── Resize handle ──────────────────────────────────────────────────────────
describe("SidePanel — resize handle", () => {
it("has role=separator", () => {
render(<SidePanel />);
expect(screen.getByRole("separator")).toBeTruthy();
});
it("has aria-label='Resize workspace panel'", () => {
render(<SidePanel />);
expect(screen.getByRole("separator").getAttribute("aria-label")).toBe(
"Resize workspace panel"
);
});
it("has aria-valuenow=480 (default width)", () => {
render(<SidePanel />);
expect(screen.getByRole("separator").getAttribute("aria-valuenow")).toBe("480");
});
it("has aria-valuemin=320", () => {
render(<SidePanel />);
expect(screen.getByRole("separator").getAttribute("aria-valuemin")).toBe("320");
});
it("has aria-valuemax=800", () => {
render(<SidePanel />);
expect(screen.getByRole("separator").getAttribute("aria-valuemax")).toBe("800");
});
it("has aria-orientation=vertical", () => {
render(<SidePanel />);
expect(screen.getByRole("separator").getAttribute("aria-orientation")).toBe("vertical");
});
it("has tabIndex=0 (focusable)", () => {
render(<SidePanel />);
expect(screen.getByRole("separator").getAttribute("tabindex")).toBe("0");
});
it("ArrowLeft increases width by 16px (STEP — moves left edge rightward, widens panel)", () => {
render(<SidePanel />);
const sep = screen.getByRole("separator");
fireEvent.keyDown(sep, { key: "ArrowLeft" });
const panel = document.querySelector(".fixed") as HTMLElement;
expect(parseInt(panel.style.width, 10)).toBe(480 + 16); // widens
});
it("ArrowRight decreases width by 16px (STEP — moves left edge leftward, narrows panel)", () => {
render(<SidePanel />);
const sep = screen.getByRole("separator");
fireEvent.keyDown(sep, { key: "ArrowRight" });
const panel = document.querySelector(".fixed") as HTMLElement;
expect(parseInt(panel.style.width, 10)).toBe(480 - 16); // narrows
});
it("Home key sets width to MIN (320)", () => {
render(<SidePanel />);
fireEvent.keyDown(screen.getByRole("separator"), { key: "Home" });
const panel = document.querySelector(".fixed") as HTMLElement;
expect(parseInt(panel.style.width, 10)).toBe(320);
});
it("End key sets width to MAX (800)", () => {
render(<SidePanel />);
fireEvent.keyDown(screen.getByRole("separator"), { key: "End" });
const panel = document.querySelector(".fixed") as HTMLElement;
expect(parseInt(panel.style.width, 10)).toBe(800);
});
it("ArrowLeft persists new width to localStorage", () => {
render(<SidePanel />);
fireEvent.keyDown(screen.getByRole("separator"), { key: "ArrowLeft" });
expect(localStorage.getItem("molecule:sidepanel-width")).toBe(String(480 + 16));
});
it("Home persists new width to localStorage", () => {
render(<SidePanel />);
fireEvent.keyDown(screen.getByRole("separator"), { key: "Home" });
expect(localStorage.getItem("molecule:sidepanel-width")).toBe("320");
});
});
// ─── Needs-restart banner ────────────────────────────────────────────────────
describe("SidePanel — needs-restart banner", () => {
it("shows banner when needsRestart=true and no currentTask", () => {
storeState.nodes = [{ ...BASE_NODE, data: { ...BASE_NODE.data, needsRestart: true, currentTask: null } }];
render(<SidePanel />);
expect(screen.getByText(/config changed/i)).toBeTruthy();
expect(screen.getByRole("button", { name: /restart now/i })).toBeTruthy();
});
it("does NOT show banner when needsRestart=false", () => {
render(<SidePanel />);
expect(screen.queryByText(/config changed/i)).toBeNull();
expect(screen.queryByRole("button", { name: /restart now/i })).toBeNull();
});
it("Restart Now button calls restartWorkspace(selectedNodeId)", () => {
storeState.nodes = [{ ...BASE_NODE, data: { ...BASE_NODE.data, needsRestart: true, currentTask: null } }];
render(<SidePanel />);
fireEvent.click(screen.getByRole("button", { name: /restart now/i }));
expect(mockRestartWorkspace).toHaveBeenCalledWith("ws-1");
});
});
// ─── Current-task banner ────────────────────────────────────────────────────
describe("SidePanel — current-task banner", () => {
it("shows banner when currentTask is set", () => {
storeState.nodes = [{ ...BASE_NODE, data: { ...BASE_NODE.data, currentTask: "Deploying bundle..." } }];
render(<SidePanel />);
expect(screen.getByText("Deploying bundle...")).toBeTruthy();
});
it("does NOT show banner when currentTask is null", () => {
render(<SidePanel />);
expect(screen.queryByText(/deploying bundle/i)).toBeNull();
});
});
// ─── Footer ─────────────────────────────────────────────────────────────────
describe("SidePanel — footer", () => {
it("footer shows workspace ID in monospace font", () => {
render(<SidePanel />);
// ws-1 appears in the footer with font-mono class
expect(screen.getByText("ws-1")).toBeTruthy();
});
});
// ─── Tab switching ─────────────────────────────────────────────────────────
describe("SidePanel — tab switching", () => {
it("clicking Details tab calls setPanelTab('details')", () => {
render(<SidePanel />);
fireEvent.click(screen.getByRole("tab", { name: /details/i }));
expect(mockSetPanelTab).toHaveBeenCalledWith("details");
});
it("clicking Plugins tab calls setPanelTab('skills')", () => {
render(<SidePanel />);
fireEvent.click(screen.getByRole("tab", { name: /plugins/i }));
expect(mockSetPanelTab).toHaveBeenCalledWith("skills");
});
it("clicking Terminal tab calls setPanelTab('terminal')", () => {
render(<SidePanel />);
fireEvent.click(screen.getByRole("tab", { name: /terminal/i }));
expect(mockSetPanelTab).toHaveBeenCalledWith("terminal");
});
});
// ─── setSidePanelWidth ─────────────────────────────────────────────────────
describe("SidePanel — setSidePanelWidth side-effect", () => {
it("calls setSidePanelWidth with 480 (default width) on mount", () => {
render(<SidePanel />);
expect(mockSetSidePanelWidth).toHaveBeenCalledWith(480);
});
it("updates setSidePanelWidth after keyboard resize", () => {
render(<SidePanel />);
mockSetSidePanelWidth.mockClear();
fireEvent.keyDown(screen.getByRole("separator"), { key: "ArrowLeft" });
expect(mockSetSidePanelWidth).toHaveBeenCalledWith(480 + 16);
});
});
// ─── Width localStorage ────────────────────────────────────────────────────
describe("SidePanel — width localStorage", () => {
it("does not persist default width to localStorage on initial mount (only on user resize)", () => {
render(<SidePanel />);
// localStorage is only written by the keyboard resize handler, not on mount
expect(localStorage.getItem("molecule:sidepanel-width")).toBeNull();
});
it("reads saved width from localStorage", () => {
localStorage.setItem("molecule:sidepanel-width", "600");
const { container } = render(<SidePanel />);
const panel = container.firstChild as HTMLElement;
expect(panel.style.width).toBe("600px");
});
it("caps saved width to default when below minimum", () => {
localStorage.setItem("molecule:sidepanel-width", "100");
const { container } = render(<SidePanel />);
const panel = container.firstChild as HTMLElement;
expect(panel.style.width).toBe("480px");
});
});
// ─── Offline status ─────────────────────────────────────────────────────────
describe("SidePanel — offline status", () => {
it("shows tier badge even when node is offline", () => {
storeState.nodes = [{ ...BASE_NODE, data: { ...BASE_NODE.data, status: "offline" as const } }];
render(<SidePanel />);
// T2 appears in both header badge and meta pill — just confirm at least one exists
const all = screen.getAllByText("T2");
expect(all.length).toBeGreaterThanOrEqual(1);
});
it("shows 'offline' in the Status meta pill when node is offline", () => {
storeState.nodes = [{ ...BASE_NODE, data: { ...BASE_NODE.data, status: "offline" as const } }];
render(<SidePanel />);
expect(screen.getByText("offline")).toBeTruthy();
});
});

View File

@ -1,260 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for TemplatePalette the floating sidebar drawer.
*
* Covers:
* - Toggle button aria-label (open / closed)
* - Sidebar renders when open, hides when closed
* - Sidebar header: "Templates" heading, subtitle
* - Loading state
* - Empty state ("No templates found")
* - Template cards: name, description, tier badge, skill pills
* - Deploy button calls deploy()
* - Errors swallowed empty state shown
* - setTemplatePaletteOpen called on open/close
* - OrgTemplatesSection rendered inside sidebar
* - Import Agent Folder button in footer
* - Refresh templates button in footer
*/
import React from "react";
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
// ── Hoisted mocks — vi.hoisted() so they're available when vi.mock runs ──────
// IMPORTANT: use plain vi.fn() in the return object (NOT `const fn = vi.fn(); return { fn }`)
const { mockDeploy, mockSetTemplatePaletteOpen, mockGet } = vi.hoisted(() => ({
mockDeploy: vi.fn(),
mockSetTemplatePaletteOpen: vi.fn(),
mockGet: vi.fn(),
}));
vi.mock("@/hooks/useTemplateDeploy", () => ({
useTemplateDeploy: () => ({
deploy: mockDeploy,
deploying: null,
error: null,
modal: null,
}),
}));
vi.mock("@/store/canvas", () => ({
useCanvasStore: vi.fn((selector: (s: { setTemplatePaletteOpen: typeof mockSetTemplatePaletteOpen }) => unknown) =>
selector({ setTemplatePaletteOpen: mockSetTemplatePaletteOpen })
),
}));
vi.mock("@/lib/api", () => ({
api: { get: mockGet },
}));
vi.mock("../OrgImportPreflightModal", () => ({
OrgImportPreflightModal: () => null,
}));
vi.mock("../ConfirmDialog", () => ({
ConfirmDialog: () => null,
}));
vi.mock("../Spinner", () => ({
Spinner: () => <span data-testid="spinner" aria-hidden="true" />,
}));
vi.mock("../Toaster", () => ({ showToast: vi.fn() }));
// ── Component import — after all mocks ──────────────────────────────────────
import { TemplatePalette } from "../TemplatePalette";
beforeEach(() => {
mockDeploy.mockReset();
mockSetTemplatePaletteOpen.mockReset();
mockGet.mockReset().mockResolvedValue([]);
});
afterEach(() => {
cleanup();
});
// ── Helpers ──────────────────────────────────────────────────────────────────
async function flush() {
await act(async () => { await Promise.resolve(); });
}
const MOCK_TEMPLATES = [
{
id: "tmpl-1",
name: "Software Engineer",
description: "Best for writing code",
tier: 1,
skills: ["web-search", "read-file", "write-file"],
},
{
id: "tmpl-2",
name: "Researcher",
description: "Deep research agent",
tier: 2,
skills: [],
},
];
// ─── Toggle button ─────────────────────────────────────────────────────────
describe("TemplatePalette — toggle button", () => {
it("has aria-label='Open template palette' when closed", () => {
render(<TemplatePalette />);
expect(screen.getByRole("button", { name: /open template palette/i })).toBeTruthy();
});
it("has aria-label='Close template palette' when open", async () => {
render(<TemplatePalette />);
fireEvent.click(screen.getByRole("button", { name: /open template palette/i }));
await flush();
expect(screen.getByRole("button", { name: /close template palette/i })).toBeTruthy();
});
it("clicking toggle opens sidebar", async () => {
render(<TemplatePalette />);
fireEvent.click(screen.getByRole("button", { name: /open template palette/i }));
await flush();
expect(screen.getByRole("heading", { name: "Templates" })).toBeTruthy();
});
it("clicking toggle again closes sidebar", async () => {
render(<TemplatePalette />);
fireEvent.click(screen.getByRole("button", { name: /open template palette/i }));
await flush();
fireEvent.click(screen.getByRole("button", { name: /close template palette/i }));
await flush();
expect(screen.queryByRole("heading", { name: "Templates" })).toBeNull();
});
it("calls setTemplatePaletteOpen(true) when opened", async () => {
render(<TemplatePalette />);
fireEvent.click(screen.getByRole("button", { name: /open template palette/i }));
await flush();
expect(mockSetTemplatePaletteOpen).toHaveBeenCalledWith(true);
});
it("calls setTemplatePaletteOpen(false) when closed", async () => {
render(<TemplatePalette />);
fireEvent.click(screen.getByRole("button", { name: /open template palette/i }));
await flush();
mockSetTemplatePaletteOpen.mockClear();
fireEvent.click(screen.getByRole("button", { name: /close template palette/i }));
await flush();
expect(mockSetTemplatePaletteOpen).toHaveBeenCalledWith(false);
});
});
// ─── Sidebar content ───────────────────────────────────────────────────────
describe("TemplatePalette — sidebar", () => {
async function openSidebar() {
fireEvent.click(screen.getByRole("button", { name: /open template palette/i }));
await flush();
}
it("shows 'Templates' heading", async () => {
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByRole("heading", { name: "Templates" })).toBeTruthy();
});
it("shows subtitle 'Click to deploy a workspace'", async () => {
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByText(/click to deploy a workspace/i)).toBeTruthy();
});
it("shows loading state", async () => {
mockGet.mockReturnValue(new Promise(() => {}));
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByTestId("spinner")).toBeTruthy();
expect(screen.getByText(/loading/i)).toBeTruthy();
});
it("shows empty state when no templates", async () => {
mockGet.mockResolvedValue([]);
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByText(/no templates found/i)).toBeTruthy();
});
it("renders template cards", async () => {
mockGet.mockResolvedValue(MOCK_TEMPLATES);
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByText("Software Engineer")).toBeTruthy();
expect(screen.getByText("Researcher")).toBeTruthy();
});
it("shows template description", async () => {
mockGet.mockResolvedValue(MOCK_TEMPLATES);
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByText(/best for writing code/i)).toBeTruthy();
});
it("shows tier badge on template card", async () => {
mockGet.mockResolvedValue(MOCK_TEMPLATES);
render(<TemplatePalette />);
await openSidebar();
// T1 appears in tier badge
expect(screen.getAllByText("T1").length).toBeGreaterThanOrEqual(1);
});
it("shows up to 3 skill pills", async () => {
mockGet.mockResolvedValue(MOCK_TEMPLATES);
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByText("web-search")).toBeTruthy();
expect(screen.getByText("read-file")).toBeTruthy();
expect(screen.getByText("write-file")).toBeTruthy();
});
it("shows '+N more' when more than 3 skills", async () => {
mockGet.mockResolvedValue([
{ id: "tmpl-many", name: "Full Stack", description: "", tier: 1, skills: ["a", "b", "c", "d", "e"] },
]);
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByText("+2")).toBeTruthy();
});
it("deploy button calls deploy(t)", async () => {
mockGet.mockResolvedValue(MOCK_TEMPLATES);
render(<TemplatePalette />);
await openSidebar();
const deployBtns = screen.getAllByRole("button", { name: /software engineer/i });
await act(async () => { deployBtns[0].click(); });
expect(mockDeploy).toHaveBeenCalledWith(MOCK_TEMPLATES[0]);
});
it("shows empty state when api.get rejects (error is swallowed)", async () => {
mockGet.mockRejectedValue(new Error("server error"));
render(<TemplatePalette />);
await openSidebar();
await waitFor(() => {
expect(screen.getByText(/no templates found/i)).toBeTruthy();
});
});
it("renders OrgTemplatesSection inside sidebar", async () => {
render(<TemplatePalette />);
await openSidebar();
expect(document.querySelector("[data-testid='org-templates-section']")).toBeTruthy();
});
it("renders Import Agent Folder button in footer", async () => {
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByRole("button", { name: /import agent folder/i })).toBeTruthy();
});
it("renders Refresh templates button in footer", async () => {
render(<TemplatePalette />);
await openSidebar();
expect(screen.getByRole("button", { name: /^refresh templates$/i })).toBeTruthy();
});
});

View File

@ -63,7 +63,6 @@ export function DropTargetBadge() {
<>
{ghostVisible && (
<div
data-testid="ghost-slot"
className="pointer-events-none absolute z-40 rounded-lg border-2 border-dashed border-emerald-400/70 bg-emerald-500/10"
style={{
left: slotTL.x,
@ -74,7 +73,6 @@ export function DropTargetBadge() {
/>
)}
<div
data-testid="drop-badge"
className="pointer-events-none absolute z-50 -translate-x-1/2 -translate-y-full rounded-md bg-emerald-500 px-2 py-0.5 text-[11px] font-medium text-emerald-50 shadow-lg shadow-emerald-950/40"
style={{ left: badge.x, top: badge.y - 6 }}
>

View File

@ -1,253 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for DropTargetBadge floating drag affordance rendered over the
* ReactFlow canvas while a workspace node is being dragged onto a parent.
*
* Covers:
* - Renders nothing when dragOverNodeId is null
* - Renders nothing when target node not found in store
* - Renders nothing when getInternalNode returns null
* - Renders ghost slot + badge when valid target is found
* - Ghost hidden when slot falls outside parent bounds
* - Badge text includes the target workspace name
* - Badge positioned via screen-space coordinates from flowToScreenPosition
*/
import React from "react";
import { render, screen, cleanup } from "@testing-library/react";
import { afterEach, describe, expect, it, vi } from "vitest";
import { DropTargetBadge } from "../DropTargetBadge";
// ─── Mutable store state — hoisted so vi.mock factory closures capture the ref ─
let _storeState: {
dragOverNodeId: string | null;
nodes: Array<{
id: string;
data: Record<string, unknown>;
parentId: string | null;
measured?: { width: number; height: number };
}>;
} = {
dragOverNodeId: null,
nodes: [],
};
const _subscribers = new Set<() => void>();
function _notifySubscribers() {
for (const fn of _subscribers) fn();
}
const _mockUseCanvasStore = vi.hoisted(() => {
const impl = (selector: (s: typeof _storeState) => unknown) => selector(_storeState);
return impl;
});
// Module-level mutable impl — setFlowMock() swaps it out per test.
let _flowImpl: (arg: { x: number; y: number }) => { x: number; y: number } =
({ x, y }) => ({ x: x * 2, y: y * 2 });
let _flowToScreenPosition = vi.hoisted(() =>
vi.fn((arg: { x: number; y: number }) => _flowImpl(arg)),
);
let _getInternalNode = vi.hoisted(() =>
vi.fn<(id: string) => {
internals: { positionAbsolute: { x: number; y: number } };
measured?: { width: number; height: number };
} | null>(() => null),
);
const _mockUseReactFlow = vi.hoisted(() =>
vi.fn(() => ({
getInternalNode: _getInternalNode,
flowToScreenPosition: _flowToScreenPosition,
})),
);
// ─── Module mocks ─────────────────────────────────────────────────────────────
vi.mock("@/store/canvas", () => ({
useCanvasStore: _mockUseCanvasStore,
}));
vi.mock("@xyflow/react", () => ({
useReactFlow: _mockUseReactFlow,
}));
// ─── Helpers ──────────────────────────────────────────────────────────────────
function setStore(state: Partial<typeof _storeState>) {
_storeState = { ..._storeState, ...state };
_notifySubscribers();
}
// Helper to set per-test flowToScreenPosition mock — replaces _flowImpl.
function setFlowMock(impl: (arg: { x: number; y: number }) => { x: number; y: number }) {
_flowImpl = impl;
}
// ─── Tests ────────────────────────────────────────────────────────────────────
describe("DropTargetBadge — renders nothing when not dragging", () => {
afterEach(() => {
cleanup();
_storeState = { dragOverNodeId: null, nodes: [] };
_getInternalNode.mockReset().mockReturnValue(null);
_flowImpl = ({ x, y }) => ({ x: x * 2, y: y * 2 });
});
it("returns null when dragOverNodeId is null", () => {
setStore({ dragOverNodeId: null });
render(<DropTargetBadge />);
expect(document.body.textContent).toBe("");
});
it("returns null when target node not found in store nodes array", () => {
setStore({ dragOverNodeId: "ws-target", nodes: [] });
render(<DropTargetBadge />);
expect(document.body.textContent).toBe("");
});
});
describe("DropTargetBadge — renders nothing when getInternalNode is null", () => {
afterEach(() => {
cleanup();
_storeState = { dragOverNodeId: null, nodes: [] };
_getInternalNode.mockReset().mockReturnValue(null);
_flowImpl = ({ x, y }) => ({ x: x * 2, y: y * 2 });
});
it("returns null when getInternalNode returns null (node not in RF viewport)", () => {
_getInternalNode.mockReturnValue(null);
setStore({
dragOverNodeId: "ws-target",
nodes: [{ id: "ws-target", data: { name: "Target WS" }, parentId: null }],
});
render(<DropTargetBadge />);
expect(document.body.textContent).toBe("");
});
});
describe("DropTargetBadge — renders ghost slot + badge for valid drag target", () => {
afterEach(() => {
cleanup();
_storeState = { dragOverNodeId: null, nodes: [] };
_getInternalNode.mockReset().mockReturnValue(null);
_flowImpl = ({ x, y }) => ({ x: x * 2, y: y * 2 });
});
it("renders the drop badge with target name", () => {
_getInternalNode.mockReturnValue({
internals: { positionAbsolute: { x: 100, y: 200 } },
measured: { width: 220, height: 120 },
});
_flowToScreenPosition
.mockReturnValueOnce({ x: 500, y: 400 }) // slotTL
.mockReturnValueOnce({ x: 900, y: 600 }) // slotBR
.mockReturnValueOnce({ x: 700, y: 200 }); // badge
setStore({
dragOverNodeId: "ws-target",
nodes: [
{ id: "ws-target", data: { name: "SEO Workspace" }, parentId: null, measured: { width: 220, height: 120 } },
],
});
render(<DropTargetBadge />);
expect(screen.getByText(/Drop into: SEO Workspace/)).toBeTruthy();
});
it("renders the ghost slot div via data-testid", () => {
// measured.height must be large enough that parentBR.y > slotTL.y=330 so
// ghostVisible = (slotTL.y < parentBR.y) is true.
// parentBR.y = abs.y + measured.height = 200 + h > 330 → h > 130
_getInternalNode.mockReturnValue({
internals: { positionAbsolute: { x: 100, y: 200 } },
measured: { width: 220, height: 500 },
});
// Component calls flowToScreenPosition 5 times (confirmed via debug):
// 1) badge {x:210, y:200} -> {x:420, y:400} (badge center)
// 2) slotTL {x:116, y:330} -> {x:232, y:660} (slot origin)
// 3) slotBR {x:356, y:460} -> {x:712, y:920} (ghost uses this)
// 4) parentTL {x:100, y:200} -> {x:200, y:400} (parent origin)
// 5) parentBR {x:320, y:320} -> {x:640, y:640} (parent corner)
setFlowMock(({ x, y }: { x: number; y: number }) => {
if (x === 210 && y === 200) return { x: 420, y: 400 };
if (x === 116 && y === 330) return { x: 232, y: 660 };
if (x === 356 && y === 460) return { x: 712, y: 920 };
if (x === 100 && y === 200) return { x: 200, y: 400 };
// 5th call: parentBR = abs + {w:220, h:500} = {320, 700}
if (x === 320 && y === 700) return { x: 640, y: 1400 };
return { x: x * 2, y: y * 2 };
});
setStore({
dragOverNodeId: "ws-target",
nodes: [
{ id: "ws-target", data: { name: "Target" }, parentId: null, measured: { width: 220, height: 500 } },
],
});
render(<DropTargetBadge />);
expect(screen.getByTestId("ghost-slot")).toBeTruthy();
// Ghost uses slotBR from 3rd call: slotBR - slotTL = (712-232, 920-660)
expect(screen.getByTestId("ghost-slot").style.left).toBe("232px");
expect(screen.getByTestId("ghost-slot").style.top).toBe("660px");
expect(screen.getByTestId("ghost-slot").style.width).toBe("480px");
expect(screen.getByTestId("ghost-slot").style.height).toBe("260px");
});
it("ghost is hidden when slot falls entirely outside parent bounds", () => {
_getInternalNode.mockReturnValue({
internals: { positionAbsolute: { x: 100, y: 200 } },
measured: { width: 220, height: 120 },
});
// Set slotBR (3rd call) to be inside parent to hide ghost.
// slotBR.x ≤ parentTL.x makes slotBR.x - slotTL.x < 0 → ghostVisible = false.
setFlowMock(({ x, y }: { x: number; y: number }) => {
if (x === 210 && y === 200) return { x: 420, y: 400 }; // badge (1st call)
if (x === 116 && y === 330) return { x: 232, y: 660 }; // slotTL (2nd call)
if (x === 356 && y === 460) return { x: 150, y: 460 }; // slotBR (3rd): slotBR.x=150 < parentTL.x=200 → hidden
if (x === 100 && y === 200) return { x: 200, y: 400 }; // parentTL (4th call)
if (x === 320 && y === 320) return { x: 640, y: 640 }; // parentBR (5th call)
return { x: x * 2, y: y * 2 };
});
setStore({
dragOverNodeId: "ws-target",
nodes: [
{ id: "ws-target", data: { name: "Tiny" }, parentId: null, measured: { width: 220, height: 120 } },
],
});
render(<DropTargetBadge />);
// Badge should still render, ghost should not
expect(screen.getByText(/Drop into: Tiny/)).toBeTruthy();
expect(screen.queryByTestId("ghost-slot")).toBeNull();
});
it("badge is absolutely positioned with left and top from flowToScreenPosition", () => {
_getInternalNode.mockReturnValue({
internals: { positionAbsolute: { x: 100, y: 200 } },
measured: { width: 220, height: 120 },
});
setFlowMock(({ x, y }: { x: number; y: number }) => {
if (x === 210 && y === 200) return { x: 420, y: 400 };
if (x === 116 && y === 330) return { x: 232, y: 660 };
if (x === 356 && y === 460) return { x: 712, y: 920 };
if (x === 100 && y === 200) return { x: 200, y: 400 };
if (x === 320 && y === 320) return { x: 640, y: 640 };
return { x: x * 2, y: y * 2 };
});
setStore({
dragOverNodeId: "ws-target",
nodes: [
{ id: "ws-target", data: { name: "Target" }, parentId: null, measured: { width: 220, height: 120 } },
],
});
render(<DropTargetBadge />);
expect(screen.getByTestId("drop-badge")).toBeTruthy();
// Badge uses 1st call: {x:210,y:200} -> {x:420,y:400}, badge.y = 400-6 = 394
expect(screen.getByTestId("drop-badge").style.left).toBe("420px");
expect(screen.getByTestId("drop-badge").style.top).toBe("394px");
expect(screen.getByText(/Drop into: Target/)).toBeTruthy();
});
});

View File

@ -1,97 +0,0 @@
// @vitest-environment jsdom
/**
* TopBar canvas header scaffold with logo, canvas name, New Agent button,
* and SettingsButton integration point.
*
* Coverage:
* - Renders header with logo and canvas name (default and custom)
* - New Agent button present and clickable
* - SettingsButton rendered (via mock)
* - Ref forwarding wired (settingsGearRef passed as ref prop)
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render } from "@testing-library/react";
import React from "react";
import { TopBar } from "../TopBar";
vi.mock("@/components/settings/SettingsButton", () => ({
SettingsButton: React.forwardRef<HTMLButtonElement, object>(
(_props, ref) => <button ref={ref} aria-label="Settings" type="button"></button>,
),
}));
afterEach(() => {
cleanup();
vi.restoreAllMocks();
});
// ─── Render ────────────────────────────────────────────────────────────────────
describe("TopBar — render", () => {
it("renders the header element", () => {
render(<TopBar />);
const header = document.querySelector("header");
expect(header).toBeTruthy();
});
it("shows default canvas name 'Canvas'", () => {
render(<TopBar />);
expect(document.body.textContent).toContain("Canvas");
});
it("shows custom canvas name when provided", () => {
render(<TopBar canvasName="Production Canvas" />);
expect(document.body.textContent).toContain("Production Canvas");
expect(document.body.textContent).not.toContain("Canvas\n"); // not default
});
it("renders New Agent button", () => {
render(<TopBar />);
const btn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("New Agent"),
);
expect(btn).toBeTruthy();
});
it("renders SettingsButton", () => {
render(<TopBar />);
const settingsBtn = document.querySelector('button[aria-label="Settings"]');
expect(settingsBtn).toBeTruthy();
});
it("renders logo icon", () => {
render(<TopBar />);
const logo = Array.from(document.querySelectorAll("span")).find(
(s) => s.getAttribute("aria-hidden") === "true",
);
expect(logo).toBeTruthy();
expect(logo?.textContent).toContain("☁");
});
});
// ─── Interaction ──────────────────────────────────────────────────────────────
describe("TopBar — interaction", () => {
it("New Agent button is in the DOM and not disabled", () => {
render(<TopBar />);
const btn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("New Agent"),
);
expect(btn).toBeTruthy();
expect(btn!.getAttribute("disabled")).toBeNull();
});
it("renders without crashing with empty canvasName", () => {
render(<TopBar canvasName="" />);
expect(document.querySelector("header")).toBeTruthy();
});
it("renders without crashing with long canvasName", () => {
const longName = "A".repeat(200);
render(<TopBar canvasName={longName} />);
expect(document.body.textContent).toContain(longName);
});
});

View File

@ -101,20 +101,6 @@ describe("Esc — deselect / close context menu", () => {
fireEvent.keyDown(window, { key: "Escape" });
expect(mockStoreState.selectNode).toHaveBeenCalledWith(null);
});
it("skips when a modal dialog is open", () => {
mockStoreState.contextMenu = null;
mockStoreState.selectedNodeId = "n1";
renderWithProvider();
const dialog = document.createElement("div");
dialog.setAttribute("role", "dialog");
dialog.setAttribute("aria-modal", "true");
document.body.appendChild(dialog);
fireEvent.keyDown(window, { key: "Escape" });
expect(mockStoreState.clearSelection).not.toHaveBeenCalled();
expect(mockStoreState.selectNode).not.toHaveBeenCalled();
document.body.removeChild(dialog);
});
});
describe("Enter — hierarchy navigation", () => {
@ -150,17 +136,6 @@ describe("Enter — hierarchy navigation", () => {
fireEvent.keyDown(window, { key: "Enter" });
expect(mockStoreState.selectNode).not.toHaveBeenCalled();
});
it("skips when a modal dialog is open", () => {
renderWithProvider();
const dialog = document.createElement("div");
dialog.setAttribute("role", "dialog");
dialog.setAttribute("aria-modal", "true");
document.body.appendChild(dialog);
fireEvent.keyDown(window, { key: "Enter" });
expect(mockStoreState.selectNode).not.toHaveBeenCalled();
document.body.removeChild(dialog);
});
});
describe("Cmd+]/[ — z-order bump", () => {
@ -185,17 +160,6 @@ describe("Cmd+]/[ — z-order bump", () => {
fireEvent.keyDown(window, { key: "]", ctrlKey: true });
expect(mockStoreState.bumpZOrder).toHaveBeenCalledWith("n1", 1);
});
it("skips when a modal dialog is open", () => {
renderWithProvider();
const dialog = document.createElement("div");
dialog.setAttribute("role", "dialog");
dialog.setAttribute("aria-modal", "true");
document.body.appendChild(dialog);
fireEvent.keyDown(window, { key: "]", metaKey: true });
expect(mockStoreState.bumpZOrder).not.toHaveBeenCalled();
document.body.removeChild(dialog);
});
});
describe("Z — zoom-to-team", () => {
@ -248,17 +212,6 @@ describe("Z — zoom-to-team", () => {
expect(dispatchedEvents).toHaveLength(0);
document.body.removeChild(input);
});
it("skips when a modal dialog is open", () => {
renderWithProvider();
const dialog = document.createElement("div");
dialog.setAttribute("role", "dialog");
dialog.setAttribute("aria-modal", "true");
document.body.appendChild(dialog);
fireEvent.keyDown(window, { key: "z" });
expect(dispatchedEvents).toHaveLength(0);
document.body.removeChild(dialog);
});
});
describe("Arrow keys — keyboard node movement", () => {

View File

@ -13,9 +13,7 @@ function hasChildren(nodeId: string, nodes: Node<WorkspaceNodeData>[]): boolean
/**
* Canvas-wide keyboard shortcuts. All bound to the document window so
* they work regardless of focused node, except when the user is typing
* into an input (`inInput` short-circuits handling) or a modal dialog is
* open (`isModalOpen` short-circuits handling dialogs own their own
* keyboard semantics and take precedence).
* into an input (`inInput` short-circuits handling).
*
* Esc close context menu, clear selection, deselect
* Enter descend into selected node's first child
@ -27,10 +25,6 @@ function hasChildren(nodeId: string, nodes: Node<WorkspaceNodeData>[]): boolean
* Cmd/Ctrl+Arrow resize selected node ( height, width)
* Cmd/Ctrl+Shift+Arrow resize by 2px per press (fine control)
*/
/** Returns true when a modal dialog (role=dialog, aria-modal=true) is open. */
const isModalOpen = () =>
document.querySelector('[role="dialog"][aria-modal="true"]') !== null;
export function useKeyboardShortcuts() {
useEffect(() => {
const handler = (e: KeyboardEvent) => {
@ -42,7 +36,6 @@ export function useKeyboardShortcuts() {
(e.target as HTMLElement).isContentEditable;
if (e.key === "Escape") {
if (isModalOpen()) return; // Dialogs own their own Escape semantics
const state = useCanvasStore.getState();
if (state.contextMenu) {
state.closeContextMenu();
@ -54,9 +47,8 @@ export function useKeyboardShortcuts() {
}
// Figma-style hierarchy navigation. Skipped when the user is
// typing so Enter can still submit forms, and when a dialog is open
// so the dialog can use Enter for its own actions.
if (!inInput && !isModalOpen() && (e.key === "Enter" || e.key === "NumpadEnter")) {
// typing so Enter can still submit forms.
if (!inInput && (e.key === "Enter" || e.key === "NumpadEnter")) {
e.preventDefault();
const state = useCanvasStore.getState();
const id = state.selectedNodeId;
@ -71,9 +63,6 @@ export function useKeyboardShortcuts() {
}
}
// Skip when a modal is open so dialog shortcuts take precedence.
if (isModalOpen()) return;
if (
!inInput &&
(e.metaKey || e.ctrlKey) &&
@ -122,7 +111,7 @@ export function useKeyboardShortcuts() {
if (!selectedId) return;
// Skip when a modal/dialog is already open — dialogs own their own
// arrow-key semantics and shouldn't trigger canvas moves.
if (isModalOpen()) return;
if (document.querySelector('[role="dialog"][aria-modal="true"]')) return;
e.preventDefault();
const step = e.shiftKey ? 50 : 10;
let dx = 0;
@ -149,7 +138,7 @@ export function useKeyboardShortcuts() {
const state = useCanvasStore.getState();
const selectedId = state.selectedNodeId;
if (!selectedId) return;
if (isModalOpen()) return;
if (document.querySelector('[role="dialog"][aria-modal="true"]')) return;
e.preventDefault();
const step = e.shiftKey ? 2 : 10;
const node = state.nodes.find((n) => n.id === selectedId);

View File

@ -54,14 +54,9 @@ export function MobileChat({
// user sees their prior thread on entry. The store is updated by the
// socket → ChatTab flows the desktop runs; on mobile we read from the
// same buffer to keep state coherent across viewports.
// NOTE: do NOT use `?? []` in the selector — Zustand uses Object.is
// for selector equality. A fallback `?? []` creates a new [] reference on
// every store update when agentMessages[agentId] is undefined, causing an
// infinite re-render loop (React error #185 / Maximum update depth
// exceeded). The undefined case is handled by the initializer below.
const storedMessages = useCanvasStore((s) => s.agentMessages[agentId]);
const storedMessages = useCanvasStore((s) => s.agentMessages[agentId] ?? []);
const [messages, setMessages] = useState<ChatMessage[]>(() =>
(storedMessages ?? []).map((m) => ({
storedMessages.map((m) => ({
id: m.id,
role: "agent",
text: m.content,

View File

@ -1,115 +0,0 @@
// @vitest-environment jsdom
/**
* AgentCard mobile agent row card.
*
* Per WCAG 2.1 AA:
* - Rendered as <button> with aria-label composing accessible name
* - aria-label includes: name, status, tier, remote flag
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { AgentCard, type MobileAgent } from "../components";
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
const onlineAgent: MobileAgent = {
id: "ws-1",
name: "My Agent",
tag: "claude-code",
tier: "T2",
status: "online",
remote: false,
runtime: "claude-code",
skills: 3,
calls: 12,
desc: "Handles customer support",
parentId: null,
};
const remoteFailedAgent: MobileAgent = {
id: "ws-2",
name: "Remote Worker",
tag: "external",
tier: "T4",
status: "failed",
remote: true,
runtime: "external",
skills: 5,
calls: 0,
desc: "",
parentId: "ws-1",
};
// ─── Render ───────────────────────────────────────────────────────────────────
describe("AgentCard — render", () => {
it("renders as a button", () => {
render(<AgentCard agent={onlineAgent} dark={false} onClick={vi.fn()} />);
expect(document.querySelector("button")).toBeTruthy();
});
it("button has aria-label with name, status, tier", () => {
render(<AgentCard agent={onlineAgent} dark={false} onClick={vi.fn()} />);
const btn = document.querySelector("button") as HTMLButtonElement;
const label = btn.getAttribute("aria-label") ?? "";
expect(label).toContain("My Agent");
expect(label).toContain("online");
expect(label).toContain("T2");
});
it("aria-label includes remote for remote agents", () => {
render(<AgentCard agent={remoteFailedAgent} dark={false} onClick={vi.fn()} />);
const btn = document.querySelector("button") as HTMLButtonElement;
const label = btn.getAttribute("aria-label") ?? "";
expect(label).toContain("Remote Worker");
expect(label).toContain("failed");
expect(label).toContain("T4");
expect(label).toContain("remote");
});
it("aria-label omits remote for non-remote agents", () => {
render(<AgentCard agent={onlineAgent} dark={false} onClick={vi.fn()} />);
const btn = document.querySelector("button") as HTMLButtonElement;
const label = btn.getAttribute("aria-label") ?? "";
expect(label).not.toContain("remote");
});
it("renders agent name text inside the button", () => {
render(<AgentCard agent={onlineAgent} dark={false} onClick={vi.fn()} />);
const btn = document.querySelector("button") as HTMLButtonElement;
expect(btn.textContent).toContain("My Agent");
});
it("compact prop reduces padding", () => {
render(<AgentCard agent={onlineAgent} dark={false} onClick={vi.fn()} compact={true} />);
const btn = document.querySelector("button") as HTMLButtonElement;
const style = btn.getAttribute("style") ?? "";
// compact uses "12px 14px" padding vs "14px 16px" default
expect(style).toContain("padding");
});
});
// ─── Interaction ─────────────────────────────────────────────────────────────
describe("AgentCard — interaction", () => {
it("calls onClick when button is clicked", () => {
const onClick = vi.fn();
render(<AgentCard agent={onlineAgent} dark={false} onClick={onClick} />);
const btn = document.querySelector("button") as HTMLButtonElement;
btn.click();
expect(onClick).toHaveBeenCalledTimes(1);
});
it("renders without onClick (optional prop)", () => {
// Should not throw
expect(() => render(<AgentCard agent={onlineAgent} dark={false} />)).not.toThrow();
});
});

View File

@ -1,118 +0,0 @@
// @vitest-environment jsdom
/**
* FilterChips mobile agent filter toolbar.
*
* Per WCAG 2.1 AA / ARIA radio group pattern:
* - Container has role="toolbar" + aria-label
* - Each button has role="radio" + aria-checked
* - Icon spans have aria-hidden="true"
* - Only one radio can be checked at a time (single-select filter)
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render } from "@testing-library/react";
import React from "react";
import { FilterChips, type AgentFilter } from "../components";
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
const defaultCounts = { all: 12, online: 8, issue: 2, paused: 2 };
// ─── Render ───────────────────────────────────────────────────────────────────
describe("FilterChips — render", () => {
it("renders 4 filter buttons", () => {
render(<FilterChips value="all" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
const buttons = document.querySelectorAll('[role="radio"]');
expect(buttons.length).toBe(4);
});
it("container has role=toolbar and aria-label", () => {
render(<FilterChips value="all" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
const toolbar = document.querySelector('[role="toolbar"]');
expect(toolbar).toBeTruthy();
expect(toolbar?.getAttribute("aria-label")).toBe("Filter agents");
});
it("each button has role=radio", () => {
render(<FilterChips value="all" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
const buttons = document.querySelectorAll('[role="radio"]');
buttons.forEach((btn) => {
expect(btn.getAttribute("role")).toBe("radio");
});
});
it("active filter has aria-checked=true, others false", () => {
render(<FilterChips value="issue" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
const buttons = document.querySelectorAll('[role="radio"]');
buttons.forEach((btn) => {
const label = btn.textContent ?? "";
if (label.startsWith("Issues")) {
expect(btn.getAttribute("aria-checked")).toBe("true");
} else {
expect(btn.getAttribute("aria-checked")).toBe("false");
}
});
});
it("count spans have aria-hidden=true", () => {
render(<FilterChips value="all" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
const hidden = document.querySelectorAll('[aria-hidden="true"]');
// Each chip has one count span marked aria-hidden
expect(hidden.length).toBeGreaterThanOrEqual(4);
});
});
// ─── Interaction ─────────────────────────────────────────────────────────────
describe("FilterChips — interaction", () => {
it("calls onChange with correct filter id when clicked", () => {
const onChange = vi.fn();
render(<FilterChips value="all" onChange={onChange} dark={false} counts={defaultCounts} />);
const buttons = document.querySelectorAll('[role="radio"]');
const onlineBtn = Array.from(buttons).find((b) => b.textContent?.startsWith("Online")) as Element;
fireEvent.click(onlineBtn);
expect(onChange).toHaveBeenCalledWith("online");
});
it("calls onChange when the already-active filter is clicked (component does not guard)", () => {
const onChange = vi.fn();
render(<FilterChips value="all" onChange={onChange} dark={false} counts={defaultCounts} />);
const buttons = document.querySelectorAll('[role="radio"]');
const allBtn = Array.from(buttons).find((b) => b.textContent?.startsWith("All")) as Element;
fireEvent.click(allBtn);
// Component calls onChange even for the already-active filter;
// the guard belongs at the consumer level (MobileHome) if needed.
expect(onChange).toHaveBeenCalledWith("all");
});
it("updating value prop changes aria-checked", () => {
const { rerender } = render(
<FilterChips value="all" onChange={vi.fn()} dark={false} counts={defaultCounts} />,
);
const allBtn = document.querySelector('[id="filter-all"]') as Element;
expect(allBtn.getAttribute("aria-checked")).toBe("true");
rerender(<FilterChips value="paused" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
expect(allBtn.getAttribute("aria-checked")).toBe("false");
const pausedBtn = document.querySelector('[id="filter-paused"]') as Element;
expect(pausedBtn.getAttribute("aria-checked")).toBe("true");
});
it("all filter labels are present", () => {
render(<FilterChips value="all" onChange={vi.fn()} dark={false} counts={defaultCounts} />);
const texts = Array.from(document.querySelectorAll('[role="radio"]')).map((b) =>
b.textContent?.trim(),
);
expect(texts.some((t) => t?.startsWith("All"))).toBe(true);
expect(texts.some((t) => t?.startsWith("Online"))).toBe(true);
expect(texts.some((t) => t?.startsWith("Issues"))).toBe(true);
expect(texts.some((t) => t?.startsWith("Paused"))).toBe(true);
});
});

View File

@ -1,185 +0,0 @@
// @vitest-environment jsdom
/**
* MobileCanvas mobile mini-graph with pinch-zoom and tap-to-open.
*
* Per WCAG 2.1 AA / mobile interaction:
* - Reset button visible only after zoom/pan (zoomed state)
* - Spawn FAB always visible with aria-label
* - Legend always visible with all 5 status types
* - WorkspacePill shows node count
* - Node buttons clickable with onOpen(id) callback
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render } from "@testing-library/react";
import React from "react";
import { MobileCanvas } from "../MobileCanvas";
// ─── Mock dependencies ──────────────────────────────────────────────────────────
vi.mock("@/lib/theme-provider", () => ({
useTheme: () => ({ theme: "dark", resolvedTheme: "dark", setTheme: vi.fn() }),
}));
const mockNodes = [
{
id: "ws-1",
position: { x: 100, y: 200 },
data: {
name: "Alpha Agent",
status: "online",
tier: 2,
parentId: null,
runtime: "langgraph",
activeTasks: 0,
role: "researcher",
},
},
{
id: "ws-2",
position: { x: 300, y: 400 },
data: {
name: "Beta Agent",
status: "degraded",
tier: 3,
parentId: "ws-1",
runtime: "claude-code",
activeTasks: 1,
role: "developer",
},
},
{
id: "ws-3",
position: { x: 0, y: 0 },
data: {
name: "Gamma Agent",
status: "offline",
tier: 1,
parentId: null,
runtime: "hermes",
activeTasks: 0,
role: "analyst",
},
},
];
vi.mock("@/store/canvas", () => ({
useCanvasStore: vi.fn((selector) => {
if (typeof selector === "function") {
return selector({ nodes: mockNodes });
}
return mockNodes;
}),
summarizeWorkspaceCapabilities: vi.fn((data: { status?: string; role?: string }) => ({
runtime: data.status ? "langgraph" : "unknown",
skillCount: 0,
currentTask: data.role ?? "",
})),
}));
afterEach(() => {
cleanup();
vi.restoreAllMocks();
});
// ─── Render ────────────────────────────────────────────────────────────────────
describe("MobileCanvas — render", () => {
it("renders the canvas container", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
const container = document.querySelector('[style*="position: absolute"]');
expect(container).toBeTruthy();
});
it("renders the legend with all 5 status types", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
const legend = Array.from(document.querySelectorAll("div")).find(
(d) => d.textContent?.includes("Legend"),
);
expect(legend).toBeTruthy();
expect(legend?.textContent).toContain("online");
expect(legend?.textContent).toContain("starting");
expect(legend?.textContent).toContain("degraded");
expect(legend?.textContent).toContain("failed");
expect(legend?.textContent).toContain("paused");
});
it("renders spawn FAB with correct aria-label", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
const fab = document.querySelector('button[aria-label="Spawn new agent"]');
expect(fab).toBeTruthy();
});
it("renders node buttons for each store node", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
const buttons = document.querySelectorAll('button[type="button"]');
// 3 nodes + spawn FAB = 4 buttons
expect(buttons.length).toBeGreaterThanOrEqual(4);
});
it("renders node with correct name text", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
expect(document.body.textContent).toContain("Alpha Agent");
expect(document.body.textContent).toContain("Beta Agent");
expect(document.body.textContent).toContain("Gamma Agent");
});
it("reset button is hidden when not zoomed", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
const reset = document.querySelector('button[aria-label="Reset zoom"]');
expect(reset).toBeNull();
});
it("renders FAB and legend regardless of node count", () => {
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={vi.fn()} />,
);
const fab = document.querySelector('button[aria-label="Spawn new agent"]');
expect(fab).toBeTruthy();
const legend = Array.from(document.querySelectorAll("div")).find(
(d) => d.textContent?.includes("Legend"),
);
expect(legend).toBeTruthy();
});
});
// ─── Interaction ──────────────────────────────────────────────────────────────
describe("MobileCanvas — interaction", () => {
it("onOpen called with correct node id when node button clicked", () => {
const onOpen = vi.fn();
render(
<MobileCanvas dark={true} onOpen={onOpen} onSpawn={vi.fn()} />,
);
const nodeButtons = Array.from(document.querySelectorAll('button[type="button"]')).filter(
(b) => b.textContent?.includes("Alpha Agent"),
);
expect(nodeButtons.length).toBeGreaterThanOrEqual(1);
nodeButtons[0]!.click();
expect(onOpen).toHaveBeenCalledWith("ws-1");
});
it("onSpawn called when spawn FAB is clicked", () => {
const onSpawn = vi.fn();
render(
<MobileCanvas dark={true} onOpen={vi.fn()} onSpawn={onSpawn} />,
);
const fab = document.querySelector('button[aria-label="Spawn new agent"]')!;
fab.click();
expect(onSpawn).toHaveBeenCalledTimes(1);
});
});

View File

@ -1,323 +0,0 @@
// @vitest-environment jsdom
/**
* MobileChat mobile message thread + composer + sub-tabs.
*
* Per spec §04: wired to /workspaces/:id/a2a (method message/send).
* Slimmer surface than desktop ChatTab: no attachments, no topology overlay.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { MobileChat } from "../MobileChat";
// ─── Mock store ───────────────────────────────────────────────────────────────
const mockAgentId = "ws-chat-test";
const mockOnBack = vi.fn();
// Module-level mutable state for the mock store.
const mockStoreState = {
nodes: [] as Array<{
id: string;
position: { x: number; y: number };
data: Record<string, unknown>;
width?: number;
height?: number;
}>,
agentMessages: {} as Record<string, Array<{ id: string; content: string; timestamp: string }>>,
};
vi.mock("@/store/canvas", () => ({
useCanvasStore: Object.assign(
vi.fn((sel) => sel(mockStoreState)),
{ getState: () => mockStoreState },
),
summarizeWorkspaceCapabilities: vi.fn((data: Record<string, unknown>) => {
const agentCard = data.agentCard as Record<string, unknown> | null;
const skills = Array.isArray(agentCard?.skills)
? (agentCard.skills as Array<Record<string, unknown>>).map(
(s) => String(s.name || s.id || ""),
).filter(Boolean)
: [];
return {
runtime: (typeof data.runtime === "string" && data.runtime)
? data.runtime
: (typeof agentCard?.runtime === "string" ? String(agentCard.runtime) : null),
skills,
skillCount: skills.length,
currentTask: String(data.currentTask ?? ""),
hasActiveTask: String(data.currentTask ?? "").trim().length > 0,
};
}),
}));
// ─── Mock API ─────────────────────────────────────────────────────────────────
const { mockApiPost } = vi.hoisted(() => ({
mockApiPost: vi.fn().mockResolvedValue({ result: { parts: [] } }),
}));
vi.mock("@/lib/api", () => ({
api: { post: mockApiPost },
}));
// ─── Fixtures ────────────────────────────────────────────────────────────────
const onlineNode = {
id: mockAgentId,
position: { x: 0, y: 0 },
data: {
name: "Chat Agent",
status: "online",
tier: 2,
agentCard: {
runtime: "claude-code",
skills: [{ name: "web-search" }],
},
currentTask: "",
activeTasks: 0,
collapsed: false,
role: "agent",
lastErrorRate: 0,
lastSampleError: "",
url: "",
parentId: null,
runtime: "claude-code",
needsRestart: false,
},
};
const offlineNode = {
id: "ws-offline",
position: { x: 0, y: 0 },
data: {
name: "Offline Agent",
status: "offline",
tier: 1,
agentCard: null,
currentTask: "",
activeTasks: 0,
collapsed: false,
role: "agent",
lastErrorRate: 0,
lastSampleError: "",
url: "",
parentId: null,
runtime: "claude-code",
needsRestart: false,
},
};
const degradedNode = {
id: "ws-degraded",
position: { x: 0, y: 0 },
data: {
name: "Degraded Agent",
status: "degraded",
tier: 3,
agentCard: null,
currentTask: "",
activeTasks: 0,
collapsed: false,
role: "agent",
lastErrorRate: 0,
lastSampleError: "",
url: "",
parentId: null,
runtime: "claude-code",
needsRestart: false,
},
};
// ─── Helpers ─────────────────────────────────────────────────────────────────
function renderChat(agentId: string, dark = false) {
return render(
<MobileChat
agentId={agentId}
dark={dark}
onBack={mockOnBack}
/>,
);
}
// ─── Setup / teardown ─────────────────────────────────────────────────────────
beforeEach(() => {
mockOnBack.mockClear();
mockStoreState.nodes = [];
mockStoreState.agentMessages = {};
mockApiPost.mockClear();
});
afterEach(() => {
cleanup();
vi.clearAllMocks();
});
// ─── Not found ───────────────────────────────────────────────────────────────
describe("MobileChat — agent not found", () => {
it('renders "Agent not found." when node is absent', () => {
mockStoreState.nodes = [onlineNode];
const { container } = renderChat("nonexistent-id");
expect(container.textContent ?? "").toContain("Agent not found.");
});
});
// ─── Header ──────────────────────────────────────────────────────────────────
describe("MobileChat — header", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders Back button with aria-label", () => {
const { container } = renderChat(mockAgentId);
const backBtn = container.querySelector('[aria-label="Back"]');
expect(backBtn).toBeTruthy();
});
it("Back button calls onBack", () => {
const { container } = renderChat(mockAgentId);
const backBtn = container.querySelector('[aria-label="Back"]') as HTMLButtonElement;
backBtn.click();
expect(mockOnBack).toHaveBeenCalledTimes(1);
});
it("renders agent name in header", () => {
const { container } = renderChat(mockAgentId);
expect(container.textContent ?? "").toContain("Chat Agent");
});
it("renders a More button", () => {
const { container } = renderChat(mockAgentId);
const moreBtn = container.querySelector('[aria-label="More"]');
expect(moreBtn).toBeTruthy();
});
it("renders footer with agentId", () => {
const { container } = renderChat(mockAgentId);
expect(container.textContent ?? "").toContain(mockAgentId);
});
});
// ─── Composer ────────────────────────────────────────────────────────────────
describe("MobileChat — composer", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders a textarea for message input", () => {
const { container } = renderChat(mockAgentId);
const textarea = container.querySelector("textarea");
expect(textarea).toBeTruthy();
});
it("textarea has placeholder text", () => {
const { container } = renderChat(mockAgentId);
const textarea = container.querySelector("textarea") as HTMLTextAreaElement;
expect(textarea.placeholder).toBeTruthy();
expect(textarea.placeholder).toContain("Send a message");
});
it("renders a Send button with aria-label", () => {
const { container } = renderChat(mockAgentId);
const sendBtn = container.querySelector('[aria-label="Send"]');
expect(sendBtn).toBeTruthy();
});
it("Send button is disabled when textarea is empty (no draft)", () => {
const { container } = renderChat(mockAgentId);
const sendBtn = container.querySelector('[aria-label="Send"]') as HTMLButtonElement;
expect(sendBtn.disabled).toBe(true);
});
});
// ─── Tabs ─────────────────────────────────────────────────────────────────────
describe("MobileChat — tabs", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders My Chat and Agent Comms tab labels", () => {
const { container } = renderChat(mockAgentId);
const text = container.textContent ?? "";
expect(text).toContain("My Chat");
expect(text).toContain("Agent Comms");
});
it("defaults to My Chat tab", () => {
const { container } = renderChat(mockAgentId);
// My Chat is the default; if there are no messages it should show the empty state
expect(container.textContent ?? "").toContain("My Chat");
});
});
// ─── Empty state ─────────────────────────────────────────────────────────────
describe("MobileChat — empty state", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it('shows "Send a message to start chatting." when no messages', () => {
const { container } = renderChat(mockAgentId);
expect(container.textContent ?? "").toContain("Send a message to start chatting.");
});
it("shows no messages when agentMessages[agentId] is absent (undefined)", () => {
// Explicitly set to empty to simulate no stored messages
mockStoreState.agentMessages = {};
const { container } = renderChat(mockAgentId);
expect(container.textContent ?? "").toContain("Send a message to start chatting.");
});
});
// ─── Agent status ────────────────────────────────────────────────────────────
describe("MobileChat — agent status", () => {
it("renders composer for online agent", () => {
mockStoreState.nodes = [onlineNode];
const { container } = renderChat(mockAgentId);
expect(container.querySelector("textarea")).toBeTruthy();
});
it("renders composer for offline agent (with status text)", () => {
mockStoreState.nodes = [offlineNode];
const { container } = renderChat("ws-offline");
const textarea = container.querySelector("textarea") as HTMLTextAreaElement;
// Offline agent: textarea should be disabled
expect(textarea.disabled).toBe(true);
});
it("renders composer for degraded agent", () => {
mockStoreState.nodes = [degradedNode];
const { container } = renderChat("ws-degraded");
expect(container.querySelector("textarea")).toBeTruthy();
});
it("offline agent shows agent name", () => {
mockStoreState.nodes = [offlineNode];
const { container } = renderChat("ws-offline");
expect(container.textContent ?? "").toContain("Offline Agent");
});
});
// ─── Dark mode ───────────────────────────────────────────────────────────────
describe("MobileChat — dark mode", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders without crashing in dark mode", () => {
const { container } = renderChat(mockAgentId, true);
expect(container.querySelector('[aria-label="Back"]')).toBeTruthy();
});
});

View File

@ -1,242 +0,0 @@
// @vitest-environment jsdom
/**
* MobileComms workspace A2A traffic feed with All/Errors filter.
*
* Per spec §5: loads from /workspaces/:id/activity, prepends live
* ACTIVITY_LOGGED socket events. Shows comm rows with fromto, kind,
* status badge (OK/ERR), duration, and relative timestamp.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render, screen } from "@testing-library/react";
import React from "react";
import { MobileComms } from "../MobileComms";
// ─── Mock dependencies ──────────────────────────────────────────────────────────
vi.mock("@/lib/theme-provider", () => ({
useTheme: () => ({ theme: "dark", resolvedTheme: "dark", setTheme: vi.fn() }),
}));
const mockNodes = [
{
id: "ws-alpha",
data: { name: "Alpha Agent", status: "online", tier: 2, parentId: null },
},
{
id: "ws-beta",
data: { name: "Beta Agent", status: "online", tier: 3, parentId: "ws-alpha" },
},
];
vi.mock("@/store/canvas", () => ({
useCanvasStore: vi.fn((selector) => {
if (typeof selector === "function") {
return selector({ nodes: mockNodes });
}
return mockNodes;
}),
summarizeWorkspaceCapabilities: vi.fn(() => ({ runtime: "langgraph", skillCount: 0, currentTask: "" })),
}));
const mockActivity: Array<{
id: string; workspace_id: string; activity_type: string;
source_id: string | null; target_id: string | null;
summary: string | null; status: string; duration_ms: number | null;
created_at: string;
}> = [
{
id: "act-1",
workspace_id: "ws-alpha",
activity_type: "a2a_delegate",
source_id: "ws-alpha",
target_id: "ws-beta",
summary: "Analyzing report",
status: "ok",
duration_ms: 1234,
created_at: new Date(Date.now() - 60000).toISOString(),
},
{
id: "act-2",
workspace_id: "ws-beta",
activity_type: "a2a_delegate",
source_id: "ws-beta",
target_id: "ws-alpha",
summary: "Task completed",
status: "error",
duration_ms: 500,
created_at: new Date(Date.now() - 120000).toISOString(),
},
];
const { apiGetSpy, socketHandlers } = vi.hoisted(() => {
const apiGetSpy = vi.fn();
return { apiGetSpy, socketHandlers: [] as Array<(msg: unknown) => void> };
});
vi.mock("@/lib/api", () => ({
api: {
get: apiGetSpy,
post: vi.fn(),
},
}));
vi.mock("@/hooks/useSocketEvent", () => ({
useSocketEvent: vi.fn((handler: (msg: unknown) => void) => {
socketHandlers.push(handler);
return vi.fn(); // unsubscribe
}),
}));
afterEach(() => {
cleanup();
socketHandlers.splice(0, socketHandlers.length);
apiGetSpy.mockReset();
vi.restoreAllMocks();
});
// ─── Render ────────────────────────────────────────────────────────────────────
describe("MobileComms — render", () => {
it("renders comms page with header", () => {
apiGetSpy.mockResolvedValue([]);
render(<MobileComms dark={true} />);
expect(document.body.textContent).toContain("Comms");
});
it("shows loading state when fetching", async () => {
let resolve!: () => void;
apiGetSpy.mockImplementation(
() => new Promise((r) => { resolve = r; }),
);
const { container } = render(<MobileComms dark={true} />);
// While pending, loading text is shown
expect(container.textContent ?? "").toContain("Loading");
resolve([]);
});
it("renders empty state when no activity", async () => {
apiGetSpy.mockResolvedValue([]);
render(<MobileComms dark={true} />);
// Wait for the effect to run
await vi.waitFor(() => {
expect(document.body.textContent).toContain("No A2A traffic yet");
});
});
it("renders All and Errors filter buttons", async () => {
apiGetSpy.mockResolvedValue([]);
render(<MobileComms dark={true} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("All");
expect(document.body.textContent).toContain("Errors");
});
});
it("shows event count in header", async () => {
apiGetSpy.mockImplementation((path: string) => {
if (path.includes("/activity")) return Promise.resolve(mockActivity);
return Promise.resolve([]);
});
render(<MobileComms dark={true} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("events");
});
});
});
// ─── Interaction ──────────────────────────────────────────────────────────────
describe("MobileComms — interaction", () => {
it("renders activity rows when data loaded", async () => {
apiGetSpy.mockImplementation((path: string) => {
if (path.includes("/activity")) return Promise.resolve(mockActivity);
return Promise.resolve([]);
});
render(<MobileComms dark={true} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("a2a_delegate");
});
});
it("switching to Errors filter shows only error rows", async () => {
apiGetSpy.mockImplementation((path: string) => {
if (path.includes("/activity")) return Promise.resolve(mockActivity);
return Promise.resolve([]);
});
render(<MobileComms dark={true} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("a2a_delegate");
});
const errorsBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Errors"));
expect(errorsBtn).toBeTruthy();
fireEvent.click(errorsBtn!);
// Only the error row should remain
const rows = Array.from(
document.querySelectorAll("div"),
).filter((d) => d.textContent?.includes("ERR"));
expect(rows.length).toBeGreaterThanOrEqual(1);
});
it("switching back to All shows all rows", async () => {
apiGetSpy.mockImplementation((path: string) => {
if (path.includes("/activity")) return Promise.resolve(mockActivity);
return Promise.resolve([]);
});
render(<MobileComms dark={true} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("a2a_delegate");
});
const allBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("All"));
fireEvent.click(allBtn!);
// Should show OK and ERR rows
const okRows = Array.from(
document.querySelectorAll("div"),
).filter((d) => d.textContent?.includes("OK"));
expect(okRows.length).toBeGreaterThanOrEqual(1);
});
it("live socket event prepended to list", async () => {
apiGetSpy.mockResolvedValue([]);
render(<MobileComms dark={true} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("No A2A traffic yet");
});
// Simulate live ACTIVITY_LOGGED event
const liveHandler = socketHandlers[socketHandlers.length - 1];
liveHandler({
event: "ACTIVITY_LOGGED",
payload: {
id: "act-live",
workspace_id: "ws-alpha",
activity_type: "a2a_delegate",
source_id: "ws-alpha",
target_id: "ws-beta",
status: "ok",
duration_ms: 999,
created_at: new Date().toISOString(),
},
});
await vi.waitFor(() => {
expect(document.body.textContent).toContain("a2a_delegate");
});
// Empty state should be gone
expect(document.body.textContent).not.toContain("No A2A traffic yet");
});
});

View File

@ -1,367 +0,0 @@
// @vitest-environment jsdom
/**
* MobileDetail agent detail page with tabbed content (Overview/Activity/Config/Memory).
*
* Per spec §03: tabbed agent detail page. MobileChat (MR !717) was also tested here.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { MobileDetail } from "../MobileDetail";
// ─── Mock store ───────────────────────────────────────────────────────────────
const mockNodeId = "ws-detail-test";
const mockOnBack = vi.fn();
const mockOnChat = vi.fn();
// Module-level mutable state for the mock store.
// Tests mutate this between cases to control what the component sees.
const mockStoreState = {
nodes: [] as Array<{
id: string;
position: { x: number; y: number };
data: Record<string, unknown>;
width?: number;
height?: number;
}>,
};
vi.mock("@/store/canvas", () => ({
useCanvasStore: Object.assign(
vi.fn((sel) => sel(mockStoreState)),
{ getState: () => mockStoreState },
),
summarizeWorkspaceCapabilities: vi.fn((data: Record<string, unknown>) => {
const agentCard = data.agentCard as Record<string, unknown> | null;
const skills = Array.isArray(agentCard?.skills)
? (agentCard.skills as Array<Record<string, unknown>>).map(
(s) => String(s.name || s.id || ""),
).filter(Boolean)
: [];
return {
runtime: (typeof data.runtime === "string" && data.runtime)
? data.runtime
: (typeof agentCard?.runtime === "string" ? String(agentCard.runtime) : null),
skills,
skillCount: skills.length,
currentTask: String(data.currentTask ?? ""),
hasActiveTask: String(data.currentTask ?? "").trim().length > 0,
};
}),
}));
// Stub the API so DetailActivity doesn't attempt real network calls.
vi.mock("@/lib/api", () => ({ api: { get: vi.fn().mockResolvedValue([]) } }));
// ─── Fixtures ────────────────────────────────────────────────────────────────
const onlineNode = {
id: mockNodeId,
position: { x: 100, y: 200 },
data: {
name: "Test Agent",
status: "online",
tier: 2,
agentCard: {
runtime: "claude-code",
skills: [
{ name: "web-search", id: "skill-1" },
{ name: "code-review", id: "skill-2" },
{ name: "file-ops", id: "skill-3" },
],
},
currentTask: "Reviewing PR #717",
activeTasks: 3,
collapsed: false,
role: "agent",
lastErrorRate: 0,
lastSampleError: "",
url: "",
parentId: null,
runtime: "claude-code",
needsRestart: false,
},
width: 240,
height: 130,
};
const failedNode = {
id: "ws-failed",
position: { x: 0, y: 0 },
data: {
name: "Failed Worker",
status: "failed",
tier: 4,
agentCard: null,
currentTask: "",
activeTasks: 0,
collapsed: false,
role: "agent",
lastErrorRate: 0.8,
lastSampleError: "Connection refused",
url: "",
parentId: null,
runtime: "external",
needsRestart: false,
},
};
const offlineNode = {
id: "ws-offline",
position: { x: 0, y: 0 },
data: {
name: "Offline Bot",
status: "offline",
tier: 1,
agentCard: null,
currentTask: "",
activeTasks: 0,
collapsed: false,
role: "agent",
lastErrorRate: 0,
lastSampleError: "",
url: "",
parentId: null,
runtime: "claude-code",
needsRestart: false,
},
};
// ─── Helpers ─────────────────────────────────────────────────────────────────
function renderDetail(agentId: string, dark = false) {
return render(
<MobileDetail
agentId={agentId}
dark={dark}
onBack={mockOnBack}
onChat={mockOnChat}
/>,
);
}
// ─── Setup / teardown ─────────────────────────────────────────────────────────
beforeEach(() => {
mockOnBack.mockClear();
mockOnChat.mockClear();
mockStoreState.nodes = [];
});
afterEach(() => {
cleanup();
vi.clearAllMocks();
});
// ─── Not found ────────────────────────────────────────────────────────────────
describe("MobileDetail — agent not found", () => {
it('renders "Agent not found." when no node matches agentId', () => {
mockStoreState.nodes = [onlineNode];
const { container } = renderDetail("nonexistent-id");
expect(container.textContent ?? "").toContain("Agent not found.");
});
it("does not render any tab buttons when agent not found", () => {
mockStoreState.nodes = [];
const { container } = renderDetail("ghost-agent");
expect(container.querySelectorAll("button").length).toBe(0);
});
});
// ─── Hero render ─────────────────────────────────────────────────────────────
describe("MobileDetail — hero section", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders the agent name as an h1", () => {
const { container } = renderDetail(mockNodeId);
const h1 = container.querySelector("h1");
expect(h1).toBeTruthy();
expect(h1!.textContent).toBe("Test Agent");
});
it("renders agent tag below the name", () => {
const { container } = renderDetail(mockNodeId);
// Tag appears in the hero section, styled differently from the name
expect(container.textContent ?? "").toContain("claude-code");
});
it("renders a Back button with aria-label", () => {
const { container } = renderDetail(mockNodeId);
const backBtn = container.querySelector('[aria-label="Back"]');
expect(backBtn).toBeTruthy();
});
it("Back button calls onBack", () => {
const { container } = renderDetail(mockNodeId);
const backBtn = container.querySelector('[aria-label="Back"]') as HTMLButtonElement;
backBtn.click();
expect(mockOnBack).toHaveBeenCalledTimes(1);
});
it("renders a More button", () => {
const { container } = renderDetail(mockNodeId);
const moreBtn = container.querySelector('[aria-label="More"]');
expect(moreBtn).toBeTruthy();
});
it("renders Chat CTA with icon text", () => {
const { container } = renderDetail(mockNodeId);
expect(container.textContent ?? "").toContain("Open chat");
});
it("Chat CTA calls onChat", () => {
const { container } = renderDetail(mockNodeId);
const chatBtn = Array.from(container.querySelectorAll("button")).find(
(b) => b.textContent?.includes("Open chat"),
);
expect(chatBtn).toBeTruthy();
(chatBtn as HTMLButtonElement).click();
expect(mockOnChat).toHaveBeenCalledTimes(1);
});
});
// ─── Pill stats ───────────────────────────────────────────────────────────────
describe("MobileDetail — pill stats", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders TIER pill with the agent tier", () => {
const { container } = renderDetail(mockNodeId);
expect(container.textContent ?? "").toContain("TIER");
});
it("renders RUNTIME pill", () => {
const { container } = renderDetail(mockNodeId);
expect(container.textContent ?? "").toContain("RUNTIME");
});
it("renders SKILLS pill with count", () => {
const { container } = renderDetail(mockNodeId);
// 3 skills in the agentCard fixture
expect(container.textContent ?? "").toContain("SKILLS");
});
it("renders STATUS pill", () => {
const { container } = renderDetail(mockNodeId);
expect(container.textContent ?? "").toContain("STATUS");
});
it("STATUS pill shows agent status value", () => {
const { container } = renderDetail(mockNodeId);
// online status from the fixture
expect(container.textContent ?? "").toContain("online");
});
it("renders all 4 pills for online agent", () => {
const { container } = renderDetail(mockNodeId);
// Count the pill container divs — each PillStat is a div with specific inline styles
// We verify by content: TIER, RUNTIME, SKILLS, STATUS should all be present
const text = container.textContent ?? "";
expect(text).toContain("TIER");
expect(text).toContain("RUNTIME");
expect(text).toContain("SKILLS");
expect(text).toContain("STATUS");
});
});
// ─── Tabs ─────────────────────────────────────────────────────────────────────
describe("MobileDetail — tab switching", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders all 4 tab buttons", () => {
const { container } = renderDetail(mockNodeId);
const text = container.textContent ?? "";
expect(text).toContain("Overview");
expect(text).toContain("Activity");
expect(text).toContain("Config");
expect(text).toContain("Memory");
});
it("defaults to Overview tab", () => {
const { container } = renderDetail(mockNodeId);
// DetailOverview renders ID, Tier, Runtime, Active tasks, Skills, Origin rows
expect(container.textContent ?? "").toContain("ID");
expect(container.textContent ?? "").toContain("Tier");
});
it("Overview tab shows agent ID", () => {
const { container } = renderDetail(mockNodeId);
expect(container.textContent ?? "").toContain(mockNodeId);
});
it("Overview tab shows active tasks count", () => {
const { container } = renderDetail(mockNodeId);
// onlineNode has activeTasks: 3
expect(container.textContent ?? "").toContain("Active tasks");
expect(container.textContent ?? "").toContain("3");
});
it("Overview tab shows skill count", () => {
const { container } = renderDetail(mockNodeId);
// 3 skills in agentCard
expect(container.textContent ?? "").toContain("Skills");
expect(container.textContent ?? "").toContain("3 loaded");
});
it("Config tab button is findable and is a button element", () => {
const { container } = renderDetail(mockNodeId);
const configTab = Array.from(container.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Config",
);
expect(configTab).toBeTruthy();
expect((configTab as HTMLButtonElement).type).toBe("button");
});
it("Memory tab button is findable and is a button element", () => {
const { container } = renderDetail(mockNodeId);
const memoryTab = Array.from(container.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Memory",
);
expect(memoryTab).toBeTruthy();
expect((memoryTab as HTMLButtonElement).type).toBe("button");
});
});
// ─── Status rendering ─────────────────────────────────────────────────────────
describe("MobileDetail — status rendering", () => {
it("renders failed status for failed agent", () => {
mockStoreState.nodes = [failedNode];
const { container } = renderDetail("ws-failed");
expect(container.textContent ?? "").toContain("Failed Worker");
expect(container.textContent ?? "").toContain("failed");
});
it("renders offline status for offline agent", () => {
mockStoreState.nodes = [offlineNode];
const { container } = renderDetail("ws-offline");
expect(container.textContent ?? "").toContain("Offline Bot");
expect(container.textContent ?? "").toContain("offline");
});
});
// ─── Dark mode ───────────────────────────────────────────────────────────────
describe("MobileDetail — dark mode", () => {
beforeEach(() => {
mockStoreState.nodes = [onlineNode];
});
it("renders without crashing in dark mode", () => {
const { container } = renderDetail(mockNodeId, true);
expect(container.querySelector("h1")?.textContent).toBe("Test Agent");
});
});

View File

@ -1,245 +0,0 @@
// @vitest-environment jsdom
/**
* MobileHome workspace agent list + filter chips + spawn FAB.
*
* Per spec §01: live store data, filter by status, spawn FAB.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { MobileHome } from "../MobileHome";
// ─── Mock store ───────────────────────────────────────────────────────────────
const mockOnOpen = vi.fn();
const mockOnSpawn = vi.fn();
const mockStoreState = {
nodes: [] as Array<{
id: string;
position: { x: number; y: number };
data: Record<string, unknown>;
width?: number;
height?: number;
}>,
};
vi.mock("@/store/canvas", () => ({
useCanvasStore: Object.assign(
vi.fn((sel) => sel(mockStoreState)),
{ getState: () => mockStoreState },
),
summarizeWorkspaceCapabilities: vi.fn((data: Record<string, unknown>) => {
const agentCard = data.agentCard as Record<string, unknown> | null;
const skills = Array.isArray(agentCard?.skills)
? (agentCard.skills as Array<Record<string, unknown>>).map(
(s) => String(s.name || s.id || ""),
).filter(Boolean)
: [];
return {
runtime: (typeof data.runtime === "string" && data.runtime)
? data.runtime
: (typeof agentCard?.runtime === "string" ? String(agentCard.runtime) : null),
skills,
skillCount: skills.length,
currentTask: String(data.currentTask ?? ""),
hasActiveTask: String(data.currentTask ?? "").trim().length > 0,
};
}),
}));
// ─── Fixtures ───────────────────────────────────────────────────────────────
function makeNode(overrides: Partial<Record<string, unknown>> = {}) {
return {
id: `ws-${Math.random().toString(36).slice(2, 7)}`,
position: { x: 0, y: 0 },
data: {
name: "Agent",
status: "online",
tier: 2,
agentCard: null,
currentTask: "",
activeTasks: 0,
collapsed: false,
role: "agent",
lastErrorRate: 0,
lastSampleError: "",
url: "",
parentId: null,
runtime: "claude-code",
needsRestart: false,
...overrides,
},
};
}
const onlineAgent = makeNode({ name: "Online Agent", status: "online", tier: 2 });
const failedAgent = makeNode({ name: "Failed Agent", status: "failed", tier: 4 });
const pausedAgent = makeNode({ name: "Paused Agent", status: "paused", tier: 1 });
// ─── Helpers ─────────────────────────────────────────────────────────────────
function renderHome(overrides: Partial<{
dark: boolean;
density: "compact" | "regular";
workspaceLabel: string;
username: string;
}> = {}) {
return render(
<MobileHome
dark={overrides.dark ?? false}
density={overrides.density ?? "regular"}
onOpen={mockOnOpen}
onSpawn={mockOnSpawn}
workspaceLabel={overrides.workspaceLabel}
username={overrides.username}
/>,
);
}
// ─── Setup / teardown ─────────────────────────────────────────────────────────
beforeEach(() => {
mockOnOpen.mockClear();
mockOnSpawn.mockClear();
mockStoreState.nodes = [];
});
afterEach(() => {
cleanup();
});
// ─── Structure ───────────────────────────────────────────────────────────────
describe("MobileHome — page structure", () => {
it('renders "Agents" heading', () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
const h1 = container.querySelector("h1");
expect(h1).toBeTruthy();
expect(h1!.textContent).toBe("Agents");
});
it("renders WorkspacePill with agent count", () => {
mockStoreState.nodes = [onlineAgent, failedAgent];
const { container } = renderHome();
// WorkspacePill renders the agent count somewhere in the DOM
expect(container.textContent ?? "").toContain("2");
});
it('shows "live" suffix in subheading', () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
// Single agent → "1 workspace · live" (singular)
expect(container.textContent ?? "").toContain("workspace");
expect(container.textContent ?? "").toContain("live");
});
it("renders FilterChips row", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
// FilterChips renders buttons for "All", "Online", "Issues", "Paused"
const text = container.textContent ?? "";
expect(text).toContain("All");
expect(text).toContain("Online");
expect(text).toContain("Issues");
});
it("renders Workspace section label", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
expect(container.textContent ?? "").toContain("Workspace");
});
it("renders spawn FAB with aria-label", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
const fab = container.querySelector('[aria-label="Spawn new agent"]');
expect(fab).toBeTruthy();
});
it("FAB calls onSpawn", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
const fab = container.querySelector('[aria-label="Spawn new agent"]') as HTMLButtonElement;
fab.click();
expect(mockOnSpawn).toHaveBeenCalledTimes(1);
});
it("shows username when provided", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome({ username: "alice@example.com" });
expect(container.textContent ?? "").toContain("alice@example.com");
});
it("omits username when not provided", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
expect(container.querySelector('[style*="letter-spacing"]')?.textContent).not.toContain("@");
});
it("renders with custom workspaceLabel", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome({ workspaceLabel: "Production" });
expect(container.textContent ?? "").toContain("Production");
});
});
// ─── Agent list ─────────────────────────────────────────────────────────────
describe("MobileHome — agent list", () => {
it("renders agent cards when nodes are present", () => {
mockStoreState.nodes = [onlineAgent, failedAgent, pausedAgent];
const { container } = renderHome();
expect(container.textContent ?? "").toContain("Online Agent");
expect(container.textContent ?? "").toContain("Failed Agent");
expect(container.textContent ?? "").toContain("Paused Agent");
});
it("shows 'No agents match this filter.' when filter returns empty", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
// By default filter is "all" — all agents match
expect(container.textContent ?? "").not.toContain("No agents match");
// If we could set filter to something that filters everything out...
// (filter is internal state, we test the "all" default)
expect(container.querySelectorAll("button").length).toBeGreaterThan(0);
});
it("renders no agents when node list is empty", () => {
mockStoreState.nodes = [];
const { container } = renderHome();
// Should show "0 workspaces" and "No agents match this filter."
expect(container.textContent ?? "").toContain("0 workspace");
});
});
// ─── Agent count display ──────────────────────────────────────────────────────
describe("MobileHome — agent count", () => {
it("shows singular 'workspace' when count is 1", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome();
expect(container.textContent ?? "").toContain("1 workspace");
});
it("shows plural 'workspaces' when count is > 1", () => {
mockStoreState.nodes = [onlineAgent, failedAgent];
const { container } = renderHome();
expect(container.textContent ?? "").toContain("2 workspaces");
});
});
// ─── Dark mode ───────────────────────────────────────────────────────────────
describe("MobileHome — dark mode", () => {
it("renders without crashing in dark mode", () => {
mockStoreState.nodes = [onlineAgent];
const { container } = renderHome({ dark: true });
expect(container.querySelector("h1")?.textContent).toBe("Agents");
});
});

View File

@ -1,212 +0,0 @@
// @vitest-environment jsdom
/**
* MobileMe theme, accent, and density preferences.
*
* Per spec: theme + accent + density settings for mobile.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { MobileMe } from "../MobileMe";
// ─── Mock theme provider ───────────────────────────────────────────────────────
const mockSetTheme = vi.fn();
const mockSetAccent = vi.fn();
const mockSetDensity = vi.fn();
vi.mock("@/lib/theme-provider", () => ({
useTheme: vi.fn(() => ({
theme: "system",
resolvedTheme: "light",
setTheme: mockSetTheme,
})),
}));
// ─── Helpers ─────────────────────────────────────────────────────────────────
function renderMe(overrides: Partial<{
dark: boolean;
accent: string;
density: "compact" | "regular";
}> = {}) {
return render(
<MobileMe
dark={overrides.dark ?? false}
accent={overrides.accent ?? "#2f9e6a"}
setAccent={mockSetAccent}
density={overrides.density ?? "regular"}
setDensity={mockSetDensity}
/>,
);
}
// ─── Setup / teardown ─────────────────────────────────────────────────────────
beforeEach(() => {
mockSetTheme.mockClear();
mockSetAccent.mockClear();
mockSetDensity.mockClear();
});
afterEach(() => {
cleanup();
});
// ─── Structure ───────────────────────────────────────────────────────────────
describe("MobileMe — page structure", () => {
it('renders "Me" heading', () => {
const { container } = renderMe();
const h1 = container.querySelector("h1");
expect(h1).toBeTruthy();
expect(h1!.textContent).toBe("Me");
});
it("renders theme section label", () => {
const { container } = renderMe();
expect(container.textContent ?? "").toContain("Theme");
});
it("renders theme options: System, Light, Dark", () => {
const { container } = renderMe();
const text = container.textContent ?? "";
expect(text).toContain("System");
expect(text).toContain("Light");
expect(text).toContain("Dark");
});
it("renders accent section label", () => {
const { container } = renderMe();
expect(container.textContent ?? "").toContain("Accent");
});
it("renders all 5 accent color swatches", () => {
const { container } = renderMe();
const swatches = container.querySelectorAll("button[aria-label]");
// 5 accent swatches + theme buttons + density buttons = more than 5
// We verify the accent swatches by checking aria-labels
const accentLabels = Array.from(swatches)
.map((b) => b.getAttribute("aria-label") ?? "")
.filter((l) => l.startsWith("Set accent"));
expect(accentLabels.length).toBe(5);
});
it("renders density section label", () => {
const { container } = renderMe();
expect(container.textContent ?? "").toContain("Density");
});
it("renders density options: Regular, Compact", () => {
const { container } = renderMe();
const text = container.textContent ?? "";
expect(text).toContain("Regular");
expect(text).toContain("Compact");
});
it("renders version footer", () => {
const { container } = renderMe();
expect(container.textContent ?? "").toContain("Mobile design preview");
});
});
// ─── Theme selection ──────────────────────────────────────────────────────────
describe("MobileMe — theme selection", () => {
it("renders System as the active theme (from mock)", () => {
const { container } = renderMe();
// The theme buttons are rendered; System is active in our mock
// We verify the buttons exist and are findable
const buttons = Array.from(container.querySelectorAll("button"));
const themeButtons = buttons.filter(
(b) => ["System", "Light", "Dark"].includes(b.textContent?.trim() ?? ""),
);
expect(themeButtons.length).toBe(3);
});
it("calls setTheme when a theme button is clicked", () => {
const { container } = renderMe();
const darkBtn = Array.from(container.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Dark",
);
expect(darkBtn).toBeTruthy();
darkBtn!.click();
expect(mockSetTheme).toHaveBeenCalledWith("dark");
});
});
// ─── Accent selection ────────────────────────────────────────────────────────
describe("MobileMe — accent selection", () => {
it("renders accent buttons with aria-label", () => {
const { container } = renderMe();
const swatches = container.querySelectorAll("button[aria-label]");
const accentSwatches = Array.from(swatches).filter(
(b) => (b.getAttribute("aria-label") ?? "").startsWith("Set accent"),
);
expect(accentSwatches.length).toBe(5);
});
it("calls setAccent with the correct color", () => {
const { container } = renderMe();
const swatch = Array.from(container.querySelectorAll("button[aria-label]")).find(
(b) => b.getAttribute("aria-label") === "Set accent #3b6fe0",
);
expect(swatch).toBeTruthy();
swatch!.click();
expect(mockSetAccent).toHaveBeenCalledWith("#3b6fe0");
});
});
// ─── Density selection ────────────────────────────────────────────────────────
describe("MobileMe — density selection", () => {
it("renders density buttons", () => {
const { container } = renderMe();
const buttons = Array.from(container.querySelectorAll("button"));
const densityButtons = buttons.filter(
(b) => ["Regular", "Compact"].includes(b.textContent?.trim() ?? ""),
);
expect(densityButtons.length).toBe(2);
});
it("calls setDensity when Compact is clicked", () => {
const { container } = renderMe({ density: "regular" });
const compactBtn = Array.from(container.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Compact",
);
expect(compactBtn).toBeTruthy();
compactBtn!.click();
expect(mockSetDensity).toHaveBeenCalledWith("compact");
});
it("calls setDensity when Regular is clicked", () => {
const { container } = renderMe({ density: "compact" });
const regularBtn = Array.from(container.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Regular",
);
expect(regularBtn).toBeTruthy();
regularBtn!.click();
expect(mockSetDensity).toHaveBeenCalledWith("regular");
});
});
// ─── Dark mode ───────────────────────────────────────────────────────────────
describe("MobileMe — dark mode", () => {
it("renders without crashing in dark mode", () => {
const { container } = renderMe({ dark: true });
expect(container.querySelector("h1")?.textContent).toBe("Me");
});
it("renders theme, accent, and density sections in dark mode", () => {
const { container } = renderMe({ dark: true });
const text = container.textContent ?? "";
expect(text).toContain("Theme");
expect(text).toContain("Accent");
expect(text).toContain("Density");
});
});

View File

@ -1,253 +0,0 @@
// @vitest-environment jsdom
/**
* MobileSpawn bottom-sheet agent spawn form.
*
* Per spec §6: fetches /templates, user picks tier + name,
* POST /workspaces. Backdrop click closes. Error surfaced inline.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render, screen } from "@testing-library/react";
import React from "react";
import { MobileSpawn } from "../MobileSpawn";
// ─── Mock dependencies ──────────────────────────────────────────────────────────
vi.mock("@/lib/theme-provider", () => ({
useTheme: () => ({ theme: "dark", resolvedTheme: "dark", setTheme: vi.fn() }),
}));
const mockTemplates = [
{
id: "tpl-langgraph",
name: "LangGraph Agent",
description: "Multi-step reasoning with state machines.",
tier: 2,
},
{
id: "tpl-claude-code",
name: "Claude Code",
description: "Autonomous coding agent.",
tier: 3,
},
{
id: "tpl-hermes",
name: "Hermes",
description: "OpenAI-compatible multi-provider agent.",
tier: 2,
},
];
const { apiGetSpy, apiPostSpy } = vi.hoisted(() => {
return { apiGetSpy: vi.fn(), apiPostSpy: vi.fn() };
});
vi.mock("@/lib/api", () => ({
api: {
get: apiGetSpy,
post: apiPostSpy,
},
}));
afterEach(() => {
cleanup();
apiGetSpy.mockReset();
apiPostSpy.mockReset();
vi.restoreAllMocks();
});
// ─── Render ────────────────────────────────────────────────────────────────────
describe("MobileSpawn — render", () => {
it("renders the dialog with aria-label", () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
const dialog = document.querySelector('[role="dialog"][aria-label="Spawn agent"]');
expect(dialog).toBeTruthy();
});
it("shows loading state while fetching templates", () => {
let resolve!: (v: unknown) => void;
apiGetSpy.mockImplementation(() => new Promise((r) => { resolve = r; }));
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
expect(document.body.textContent).toContain("Loading templates");
resolve(mockTemplates);
});
it("renders template cards once loaded", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("LangGraph Agent");
expect(document.body.textContent).toContain("Claude Code");
expect(document.body.textContent).toContain("Hermes");
});
});
it("renders name input", () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
const input = document.querySelector('input[placeholder]');
expect(input).toBeTruthy();
});
it("renders all 4 tier buttons", () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
expect(document.body.textContent).toContain("Sandboxed");
expect(document.body.textContent).toContain("Standard");
expect(document.body.textContent).toContain("Privileged");
expect(document.body.textContent).toContain("Full Access");
});
it("shows empty state when no templates installed", async () => {
apiGetSpy.mockResolvedValue([]);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("No templates installed");
});
});
it("renders spawn button with correct label", () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
const spawnBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Spawn agent"));
expect(spawnBtn).toBeTruthy();
});
it("renders close button", () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
const closeBtn = document.querySelector('button[aria-label="Close"]');
expect(closeBtn).toBeTruthy();
});
});
// ─── Interaction ──────────────────────────────────────────────────────────────
describe("MobileSpawn — interaction", () => {
it("calls onClose when close button clicked", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
const onClose = vi.fn();
render(<MobileSpawn dark={true} onClose={onClose} />);
await vi.waitFor(() => {
expect(document.querySelector('button[aria-label="Close"]')).toBeTruthy();
});
document.querySelector('button[aria-label="Close"]')!.click();
expect(onClose).toHaveBeenCalledTimes(1);
});
it("calls onClose when backdrop is clicked", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
const onClose = vi.fn();
const { container } = render(<MobileSpawn dark={true} onClose={onClose} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("Spawn Agent");
});
// Click on the outer dim backdrop (the dialog's outer div)
const dialog = container.querySelector('[role="dialog"]')!;
dialog.dispatchEvent(new MouseEvent("click", { bubbles: true, currentTarget: dialog }));
// The dialog's onClick checks e.target === e.currentTarget
// In jsdom the click event won't naturally hit the outer div as both target and currentTarget,
// so we verify the dialog renders and the backdrop area is clickable
expect(dialog).toBeTruthy();
});
it("POST /workspaces with correct payload on spawn", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
apiPostSpy.mockResolvedValue({ id: "ws-new" });
const onClose = vi.fn();
render(<MobileSpawn dark={true} onClose={onClose} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("LangGraph Agent");
});
// Fill name
const input = document.querySelector("input") as HTMLInputElement;
fireEvent.change(input, { target: { value: "My New Agent" } });
// Click spawn
const spawnBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Spawn agent"))!;
spawnBtn.click();
await vi.waitFor(() => {
expect(apiPostSpy).toHaveBeenCalledWith("/workspaces", expect.objectContaining({
name: "My New Agent",
template: "tpl-langgraph", // first template selected by default
}));
});
});
it("shows error message on spawn failure", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
apiPostSpy.mockRejectedValue(new Error("Template not found"));
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("LangGraph Agent");
});
const spawnBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Spawn agent"))!;
spawnBtn.click();
await vi.waitFor(() => {
expect(document.body.textContent).toContain("Template not found");
});
});
it("onClose NOT called when spawn fails", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
apiPostSpy.mockRejectedValue(new Error("Server error"));
const onClose = vi.fn();
render(<MobileSpawn dark={true} onClose={onClose} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("Spawn agent");
});
const spawnBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Spawn agent"))!;
spawnBtn.click();
await vi.waitFor(() => {
expect(onClose).not.toHaveBeenCalled();
});
});
it("tier selection updates state", async () => {
apiGetSpy.mockResolvedValue(mockTemplates);
render(<MobileSpawn dark={true} onClose={vi.fn()} />);
await vi.waitFor(() => {
expect(document.body.textContent).toContain("Spawn agent");
});
// Default tier is T2 (Standard). Click T4 (Full Access).
const t4Btn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Full Access"))!;
fireEvent.click(t4Btn);
// Spawn with T4
const spawnBtn = Array.from(
document.querySelectorAll("button"),
).find((b) => b.textContent?.includes("Spawn agent"))!;
spawnBtn.click();
await vi.waitFor(() => {
expect(apiPostSpy).toHaveBeenCalledWith("/workspaces", expect.objectContaining({
tier: 4, // T4 = tier 4
}));
});
});
});

View File

@ -1,154 +0,0 @@
// @vitest-environment jsdom
/**
* TabBar mobile bottom navigation bar.
*
* Per WCAG 2.1 AA / ARIA tab pattern:
* - Outer div has role="tablist" + aria-label
* - Each tab button has role="tab", aria-selected, aria-label
* - Icon span has aria-hidden="true" (label text is the accessible name)
* - Keyboard: Arrow keys cycle tabs, Home/End go to first/last
* - tabIndex: active tab is 0, others are -1
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render } from "@testing-library/react";
import React from "react";
import { TabBar, type MobileTabId } from "../components";
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
// ─── Render ───────────────────────────────────────────────────────────────────
describe("TabBar — render", () => {
it("renders 4 tab buttons", () => {
render(<TabBar active="agents" onChange={vi.fn()} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
expect(tabs.length).toBe(4);
});
it("outer div has role=tablist and aria-label", () => {
render(<TabBar active="agents" onChange={vi.fn()} dark={false} />);
const tablist = document.querySelector('[role="tablist"]');
expect(tablist).toBeTruthy();
expect(tablist?.getAttribute("aria-label")).toBe("Mobile navigation");
});
it("each tab button has role=tab and aria-label", () => {
render(<TabBar active="agents" onChange={vi.fn()} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
tabs.forEach((tab) => {
expect(tab.getAttribute("role")).toBe("tab");
expect(tab.getAttribute("aria-label")).toBeTruthy();
});
});
it("icon spans have aria-hidden=true", () => {
render(<TabBar active="agents" onChange={vi.fn()} dark={false} />);
const icons = document.querySelectorAll('[aria-hidden="true"]');
expect(icons.length).toBeGreaterThanOrEqual(4);
});
it("active tab has aria-selected=true, others false", () => {
render(<TabBar active="canvas" onChange={vi.fn()} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
tabs.forEach((tab) => {
const label = tab.getAttribute("aria-label");
if (label === "Canvas") {
expect(tab.getAttribute("aria-selected")).toBe("true");
} else {
expect(tab.getAttribute("aria-selected")).toBe("false");
}
});
});
it("active tab has tabIndex=0, others tabIndex=-1", () => {
render(<TabBar active="comms" onChange={vi.fn()} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
tabs.forEach((tab) => {
const label = tab.getAttribute("aria-label");
if (label === "Comms") {
expect(tab.getAttribute("tabIndex")).toBe("0");
} else {
expect(tab.getAttribute("tabIndex")).toBe("-1");
}
});
});
});
// ─── Interaction ─────────────────────────────────────────────────────────────
describe("TabBar — interaction", () => {
it("calls onChange with correct id when tab is clicked", () => {
const onChange = vi.fn();
render(<TabBar active="agents" onChange={onChange} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
const canvasTab = Array.from(tabs).find((t) => t.getAttribute("aria-label") === "Canvas") as Element;
fireEvent.click(canvasTab);
expect(onChange).toHaveBeenCalledWith("canvas");
});
it("ArrowRight moves focus to next tab and activates it", () => {
const onChange = vi.fn();
render(<TabBar active="agents" onChange={onChange} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
const agentsTab = tabs[0] as HTMLElement;
agentsTab.focus();
expect(document.activeElement).toBe(agentsTab);
fireEvent.keyDown(agentsTab, { key: "ArrowRight" });
// onChange called for the next tab
expect(onChange).toHaveBeenCalledWith("canvas");
// Focus should move to the canvas tab
// Use setTimeout(0) trick — after state update, focus moves
});
it("ArrowLeft on first tab wraps to last", () => {
const onChange = vi.fn();
render(<TabBar active="agents" onChange={onChange} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
const agentsTab = tabs[0] as HTMLElement;
agentsTab.focus();
fireEvent.keyDown(agentsTab, { key: "ArrowLeft" });
expect(onChange).toHaveBeenCalledWith("me");
});
it("Home key activates first tab", () => {
const onChange = vi.fn();
render(<TabBar active="comms" onChange={onChange} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
const commsTab = tabs[2] as HTMLElement;
commsTab.focus();
fireEvent.keyDown(commsTab, { key: "Home" });
expect(onChange).toHaveBeenCalledWith("agents");
});
it("End key activates last tab", () => {
const onChange = vi.fn();
render(<TabBar active="agents" onChange={onChange} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
const agentsTab = tabs[0] as HTMLElement;
agentsTab.focus();
fireEvent.keyDown(agentsTab, { key: "End" });
expect(onChange).toHaveBeenCalledWith("me");
});
it("ArrowDown also navigates (aliases ArrowRight)", () => {
const onChange = vi.fn();
render(<TabBar active="canvas" onChange={onChange} dark={false} />);
const tabs = document.querySelectorAll('[role="tab"]');
const canvasTab = tabs[1] as HTMLElement;
canvasTab.focus();
fireEvent.keyDown(canvasTab, { key: "ArrowDown" });
expect(onChange).toHaveBeenCalledWith("comms");
});
});

View File

@ -1,184 +0,0 @@
// @vitest-environment jsdom
/**
* mobile/components.tsx pure functions.
*
* Covers:
* - toMobileAgent: full transform, all status/tier/runtime cases
* - classifyForFilter: online "online", failed/degraded "issue",
* starting/paused/offline "paused"
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { Node } from "@xyflow/react";
import type { WorkspaceNodeData } from "@/store/canvas";
import {
AgentCard,
FilterChips,
RemoteBadge,
classifyForFilter,
toMobileAgent,
type MobileAgent,
type AgentFilter,
} from "../components";
// ─── Mock store ────────────────────────────────────────────────────────────────
const mockSummarize = vi.fn();
vi.mock("@/store/canvas", () => ({
summarizeWorkspaceCapabilities: (...args: unknown[]) => mockSummarize(...args),
}));
// ─── Helpers ─────────────────────────────────────────────────────────────────
function makeNode(overrides: Partial<WorkspaceNodeData> = {}): Node<WorkspaceNodeData> {
return {
id: "ws-1",
position: { x: 0, y: 0 },
data: {
name: "Test Agent",
status: "online",
tier: 2,
agentCard: null,
activeTasks: 0,
collapsed: false,
role: "assistant",
lastErrorRate: 0,
lastSampleError: "",
url: "http://localhost:9000",
parentId: null,
runtime: "langgraph",
currentTask: "",
budgetLimit: null,
...overrides,
} as WorkspaceNodeData,
};
}
// ─── toMobileAgent ────────────────────────────────────────────────────────────
describe("toMobileAgent — basic fields", () => {
beforeEach(() => {
mockSummarize.mockReturnValue({
runtime: "langgraph",
skills: [],
skillCount: 0,
currentTask: "",
hasActiveTask: false,
});
});
it("maps id and name", () => {
const node = makeNode({ name: "My Agent" });
const agent = toMobileAgent(node);
expect(agent.id).toBe("ws-1");
expect(agent.name).toBe("My Agent");
});
it("uses id as name when name is empty", () => {
const node = makeNode({ name: "" });
const agent = toMobileAgent(node);
expect(agent.name).toBe("ws-1");
});
it("maps tier correctly for tier 1-4", () => {
const tiers: Array<[number, MobileAgent["tier"]]> = [
[1, "T1"],
[2, "T2"],
[3, "T3"],
[4, "T4"],
];
for (const [tier, code] of tiers) {
const agent = toMobileAgent(makeNode({ tier }));
expect(agent.tier).toBe(code);
}
});
it("maps status to MobileStatus", () => {
const statuses: Array<[string, MobileAgent["status"]]> = [
["online", "online"],
["starting", "starting"],
["degraded", "degraded"],
["failed", "failed"],
["paused", "paused"],
["offline", "offline"],
];
for (const [status, mobileStatus] of statuses) {
const agent = toMobileAgent(makeNode({ status }));
expect(agent.status).toBe(mobileStatus);
}
});
it("marks remote=true for external runtime", () => {
mockSummarize.mockReturnValue({ runtime: "external", skills: [], skillCount: 0, currentTask: "", hasActiveTask: false });
const agent = toMobileAgent(makeNode({ runtime: "external" }));
expect(agent.remote).toBe(true);
});
it("marks remote=false for non-external runtime", () => {
mockSummarize.mockReturnValue({ runtime: "langgraph", skills: [], skillCount: 0, currentTask: "", hasActiveTask: false });
const agent = toMobileAgent(makeNode({ runtime: "langgraph" }));
expect(agent.remote).toBe(false);
});
it("maps runtime from summarizeWorkspaceCapabilities", () => {
mockSummarize.mockReturnValue({ runtime: "claude-code", skills: [], skillCount: 0, currentTask: "", hasActiveTask: false });
const agent = toMobileAgent(makeNode({ runtime: "" }));
expect(agent.runtime).toBe("claude-code");
});
it("maps skills count from summarizeWorkspaceCapabilities", () => {
mockSummarize.mockReturnValue({ runtime: "langgraph", skills: ["skill1", "skill2"], skillCount: 2, currentTask: "", hasActiveTask: false });
const agent = toMobileAgent(makeNode());
expect(agent.skills).toBe(2);
});
it("maps activeTasks to calls", () => {
const agent = toMobileAgent(makeNode({ activeTasks: 5 }));
expect(agent.calls).toBe(5);
});
it("defaults calls to 0 when activeTasks is not a number", () => {
const node = makeNode() as Node<WorkspaceNodeData>;
node.data.activeTasks = "not a number" as unknown as number;
const agent = toMobileAgent(node);
expect(agent.calls).toBe(0);
});
it("maps role as desc fallback to currentTask", () => {
mockSummarize.mockReturnValue({ runtime: "langgraph", skills: [], skillCount: 0, currentTask: "Doing analysis", hasActiveTask: true });
const agent = toMobileAgent(makeNode({ role: "" }));
expect(agent.desc).toBe("Doing analysis");
});
it("uses role as desc when currentTask is empty", () => {
mockSummarize.mockReturnValue({ runtime: "langgraph", skills: [], skillCount: 0, currentTask: "", hasActiveTask: false });
const agent = toMobileAgent(makeNode({ role: "researcher" }));
expect(agent.desc).toBe("researcher");
});
it("maps parentId from node data", () => {
const node = makeNode({ parentId: "ws-parent" });
const agent = toMobileAgent(node);
expect(agent.parentId).toBe("ws-parent");
});
});
// ─── classifyForFilter ─────────────────────────────────────────────────────────
describe("classifyForFilter", () => {
const cases: Array<[MobileAgent["status"], AgentFilter]> = [
["online", "online"],
["starting", "paused"],
["degraded", "issue"],
["failed", "issue"],
["paused", "paused"],
["offline", "paused"],
];
it.each(cases)("normalizeStatus(%s) → %s", (status, expected) => {
expect(classifyForFilter(status)).toBe(expected);
});
});

View File

@ -1,137 +0,0 @@
/** @vitest-environment jsdom */
/**
* Tests for rendering components exported from components.tsx:
* RemoteBadge, WorkspacePill.
*
* Note: TabBar, FilterChips, AgentCard are tested in their own files.
* toMobileAgent and classifyForFilter are tested in components.test.ts.
*/
import { describe, expect, it } from "vitest";
import { render } from "@testing-library/react";
import { RemoteBadge, WorkspacePill } from "../components";
import { MOL_DARK, MOL_LIGHT } from "../palette";
import { MobileAccentProvider } from "../palette-context";
// ─── Palette provider wrapper ────────────────────────────────────────────────
// RemoteBadge uses palette directly; WorkspacePill calls usePalette(dark) internally,
// so WorkspacePill must be rendered inside MobileAccentProvider.
function renderWithProvider(ui: React.ReactElement) {
return render(<MobileAccentProvider accent="#2f9e6a">{ui}</MobileAccentProvider>);
}
// ─── RemoteBadge ─────────────────────────────────────────────────────────────
describe("RemoteBadge", () => {
it("renders the ★ REMOTE label text", () => {
const { container } = render(
<RemoteBadge palette={MOL_LIGHT} />
);
expect(container.textContent).toContain("REMOTE");
expect(container.textContent).toContain("★");
});
it("renders a span element", () => {
const { container } = render(
<RemoteBadge palette={MOL_DARK} />
);
expect(container.querySelector("span")).toBeTruthy();
});
it("has border-radius 4px (compact badge shape)", () => {
const { container } = render(
<RemoteBadge palette={MOL_LIGHT} />
);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.borderRadius).toBe("4px");
});
it("applies the palette's remote color as text color", () => {
const { container } = render(
<RemoteBadge palette={MOL_DARK} />
);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.color).toBeTruthy();
});
it("applies the palette's remoteBg as background", () => {
const { container } = render(
<RemoteBadge palette={MOL_LIGHT} />
);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.background).toBeTruthy();
});
it("dark and light palettes produce different background colors", () => {
const { container: darkContainer } = render(
<RemoteBadge palette={MOL_DARK} />
);
const { container: lightContainer } = render(
<RemoteBadge palette={MOL_LIGHT} />
);
const darkSpan = darkContainer.querySelector("span") as HTMLSpanElement;
const lightSpan = lightContainer.querySelector("span") as HTMLSpanElement;
expect(darkSpan.style.background).not.toBe(lightSpan.style.background);
});
});
// ─── WorkspacePill ────────────────────────────────────────────────────────────
describe("WorkspacePill", () => {
it("renders the Molecule AI brand text", () => {
const { container } = renderWithProvider(<WorkspacePill dark={false} count={3} />);
expect(container.textContent).toContain("Molecule AI");
});
it("renders the count value", () => {
const { container } = renderWithProvider(<WorkspacePill dark={true} count={7} />);
expect(container.textContent).toContain("7");
});
it("accepts a string count (e.g. LIVE)", () => {
const { container } = renderWithProvider(
<WorkspacePill dark={false} count="LIVE" live={true} />
);
expect(container.textContent).toContain("LIVE");
});
it("does NOT render LIVE when live=false", () => {
const { container } = renderWithProvider(
<WorkspacePill dark={false} count={5} live={false} />
);
expect(container.textContent).not.toContain("LIVE");
});
it("renders LIVE by default (live=true)", () => {
const { container } = renderWithProvider(
<WorkspacePill dark={true} count={2} />
);
expect(container.textContent).toContain("LIVE");
});
it("renders the brand initial M in the logo badge", () => {
const { container } = renderWithProvider(<WorkspacePill dark={false} count={1} />);
expect(container.textContent).toContain("M");
});
it("has an inline borderRadius style (pill shape)", () => {
const { container } = renderWithProvider(<WorkspacePill dark={false} count={0} />);
// Walk the DOM tree to find the outermost pill div (has inline borderRadius)
let el: HTMLElement | null = container.firstElementChild as HTMLElement | null;
while (el && !el.style.borderRadius) {
el = el.parentElement;
}
expect(el?.style.borderRadius).toBeTruthy();
});
it("dark and light palettes produce different root container backgrounds", () => {
const { container: dark } = renderWithProvider(<WorkspacePill dark={true} count={1} />);
const { container: light } = renderWithProvider(<WorkspacePill dark={false} count={1} />);
// The outermost element should have an inline background color set by the dark/light prop
const darkRoot = dark.firstElementChild as HTMLElement | null;
const lightRoot = light.firstElementChild as HTMLElement | null;
expect(darkRoot?.style.background).toBeTruthy();
expect(lightRoot?.style.background).toBeTruthy();
});
});

View File

@ -1,161 +0,0 @@
// @vitest-environment jsdom
/**
* Mobile primitives StatusDot, TierChip, Chip, SectionLabel.
*
* NOTE: No @testing-library/jest-dom use DOM APIs.
*/
import { afterEach, describe, expect, it } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { Chip, SectionLabel, StatusDot, TierChip } from "../primitives";
afterEach(() => {
cleanup();
});
// ─── StatusDot ──────────────────────────────────────────────────────────────
describe("StatusDot", () => {
it("renders a span with correct size", () => {
const { container } = render(<StatusDot size={12} />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span).toBeTruthy();
expect(span.style.width).toBe("12px");
expect(span.style.height).toBe("12px");
});
it("has border-radius 999 (circle)", () => {
const { container } = render(<StatusDot size={8} />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.borderRadius).toBe("999px");
});
it("has flexShrink: 0 to prevent collapsing in flex rows", () => {
const { container } = render(<StatusDot size={6} />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.flexShrink).toBe("0");
});
it("has halo boxShadow by default (halo=true)", () => {
const { container } = render(<StatusDot size={8} />);
const span = container.querySelector("span") as HTMLSpanElement;
// Math.max(2, 8*0.45) = Math.max(2, 3.6) = 3.6 → "3.6px"
expect(span.style.boxShadow).toContain("px");
});
it("has no boxShadow when halo=false", () => {
const { container } = render(<StatusDot size={8} halo={false} />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.boxShadow).toBe("none");
});
it("renders with default props (size=8, halo=true, status=online)", () => {
const { container } = render(<StatusDot />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.width).toBe("8px");
expect(span.style.height).toBe("8px");
expect(span.style.boxShadow).not.toBe("none");
});
});
// ─── TierChip ───────────────────────────────────────────────────────────────
describe("TierChip", () => {
it("renders the tier text inside a span", () => {
const { container } = render(<TierChip tier="T1" />);
expect(container.textContent).toContain("T1");
});
it("renders T1, T2, T3, T4 with correct text", () => {
for (const tier of ["T1", "T2", "T3", "T4"] as const) {
const { container } = render(<TierChip tier={tier} />);
expect(container.textContent).toBe(tier);
}
});
it("sm size renders smaller dimensions than lg", () => {
const { container: sm } = render(<TierChip tier="T2" size="sm" />);
const { container: lg } = render(<TierChip tier="T2" size="lg" />);
const smSpan = sm.querySelector("span") as HTMLSpanElement;
const lgSpan = lg.querySelector("span") as HTMLSpanElement;
expect(smSpan.style.width).toBe("26px");
expect(smSpan.style.height).toBe("19px");
expect(lgSpan.style.width).toBe("32px");
expect(lgSpan.style.height).toBe("22px");
});
it("uses flexShrink: 0 to prevent collapsing", () => {
const { container } = render(<TierChip tier="T3" />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.flexShrink).toBe("0");
});
it("renders with default props (tier=T2, size=sm)", () => {
const { container } = render(<TierChip />);
expect(container.textContent).toBe("T2");
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.width).toBe("26px");
});
});
// ─── Chip ───────────────────────────────────────────────────────────────────
describe("Chip", () => {
it("renders the value text", () => {
const { container } = render(<Chip value="12 skills" />);
expect(container.textContent).toContain("12 skills");
});
it("renders label + value when label is provided", () => {
const { container } = render(<Chip label="SKILLS" value="3" />);
const text = container.textContent ?? "";
expect(text).toContain("SKILLS");
expect(text).toContain("3");
});
it("has border-radius 999 (pill shape)", () => {
const { container } = render(<Chip value="test" />);
const span = container.querySelector("span") as HTMLSpanElement;
expect(span.style.borderRadius).toBe("999px");
});
it("soft mode applies accent background", () => {
const { container: normal } = render(<Chip value="a" />);
const { container: soft } = render(<Chip value="a" soft={true} accent="#2f9e6a" />);
const normalSpan = normal.querySelector("span") as HTMLSpanElement;
const softSpan = soft.querySelector("span") as HTMLSpanElement;
// soft uses accent+1a hex, normal uses dark/light hardcoded
expect(normalSpan.style.background).toBeTruthy();
expect(softSpan.style.background).toBeTruthy();
expect(normalSpan.style.background).not.toBe(softSpan.style.background);
});
});
// ─── SectionLabel ───────────────────────────────────────────────────────────
describe("SectionLabel", () => {
it("renders children text", () => {
const { container } = render(<SectionLabel>Runtime config</SectionLabel>);
expect(container.textContent).toContain("Runtime config");
});
it("renders right slot content when provided", () => {
const { container } = render(
<SectionLabel right={<button>Edit</button>}>Runtime config</SectionLabel>,
);
expect(container.textContent).toContain("Edit");
expect(container.querySelector("button")).toBeTruthy();
});
it("renders without right slot", () => {
const { container } = render(<SectionLabel>Runtime config</SectionLabel>);
expect(container.querySelector("button")).toBeNull();
});
it("uses uppercase text transform", () => {
const { container } = render(<SectionLabel>Runtime config</SectionLabel>);
const div = container.querySelector("div") as HTMLDivElement;
expect(div.style.textTransform).toBe("uppercase");
});
});

View File

@ -72,33 +72,8 @@ export function TabBar({
{ id: "comms", label: "Comms", icon: "pulse" },
{ id: "me", label: "Me", icon: "user" },
];
const handleKeyDown = (e: React.KeyboardEvent, idx: number) => {
let nextIdx: number | null = null;
if (e.key === "ArrowRight" || e.key === "ArrowDown") {
nextIdx = (idx + 1) % tabs.length;
} else if (e.key === "ArrowLeft" || e.key === "ArrowUp") {
nextIdx = (idx - 1 + tabs.length) % tabs.length;
} else if (e.key === "Home") {
nextIdx = 0;
} else if (e.key === "End") {
nextIdx = tabs.length - 1;
}
if (nextIdx !== null) {
e.preventDefault();
onChange(tabs[nextIdx]!.id);
// Move focus to the new tab button after state updates
setTimeout(() => {
const btns = document.querySelectorAll('[role="tab"]');
(btns[nextIdx!] as HTMLButtonElement | null)?.focus();
}, 0);
}
};
return (
<div
role="tablist"
aria-label="Mobile navigation"
style={{
position: "absolute",
left: 14,
@ -120,18 +95,13 @@ export function TabBar({
padding: "0 10px",
}}
>
{tabs.map((t, idx) => {
{tabs.map((t) => {
const on = active === t.id;
return (
<button
key={t.id}
role="tab"
type="button"
tabIndex={on ? 0 : -1}
aria-selected={on}
aria-label={t.label}
onClick={() => onChange(t.id)}
onKeyDown={(e) => handleKeyDown(e, idx)}
style={{
background: "none",
border: "none",
@ -146,7 +116,6 @@ export function TabBar({
}}
>
<span
aria-hidden="true"
style={{
width: 36,
height: 28,
@ -287,7 +256,6 @@ export function AgentCard({
return (
<button
type="button"
aria-label={`${agent.name}, status: ${agent.status}, tier ${agent.tier}${agent.remote ? ", remote" : ""}`}
onClick={onClick}
style={{
display: "block",
@ -421,9 +389,6 @@ export function FilterChips({
];
return (
<div
role="toolbar"
aria-label="Filter agents"
aria-activedescendant={value ? `filter-${value}` : undefined}
style={{
display: "flex",
gap: 6,
@ -437,10 +402,7 @@ export function FilterChips({
return (
<button
key={o.id}
id={`filter-${o.id}`}
role="radio"
type="button"
aria-checked={on}
onClick={() => onChange(o.id)}
style={{
display: "inline-flex",
@ -460,7 +422,6 @@ export function FilterChips({
>
{o.label}
<span
aria-hidden="true"
style={{
fontSize: 10.5,
opacity: 0.7,

View File

@ -1,6 +1,5 @@
'use client';
import { useRef } from 'react';
import * as AlertDialog from '@radix-ui/react-alert-dialog';
interface UnsavedChangesGuardProps {
@ -22,30 +21,11 @@ export function UnsavedChangesGuard({
onKeepEditing,
onDiscard,
}: UnsavedChangesGuardProps) {
const pendingDiscard = useRef(false);
return (
<AlertDialog.Root
open={open}
onOpenChange={(o) => {
if (!o) {
if (pendingDiscard.current) {
pendingDiscard.current = false;
onDiscard();
} else {
onKeepEditing();
}
}
}}
>
<AlertDialog.Root open={open} onOpenChange={(o) => { if (!o) onKeepEditing(); }}>
<AlertDialog.Portal>
<AlertDialog.Overlay className="guard-dialog__overlay" />
<AlertDialog.Content className="guard-dialog">
{/* Screen-reader-only description satisfies Radix aria-describedby requirement
without adding visible text to the dialog. */}
<AlertDialog.Description className="sr-only">
This dialog asks whether to discard or keep editing unsaved changes.
</AlertDialog.Description>
<AlertDialog.Title className="guard-dialog__title">
Discard unsaved changes?
</AlertDialog.Title>
@ -55,15 +35,8 @@ export function UnsavedChangesGuard({
Keep editing
</button>
</AlertDialog.Cancel>
{/* eslint-disable-next-line jsx-a11y/click-events-have-key-events */}
<AlertDialog.Action asChild>
<button
type="button"
className="guard-dialog__discard-btn"
onClick={() => {
pendingDiscard.current = true;
}}
>
<button type="button" className="guard-dialog__discard-btn">
Discard
</button>
</AlertDialog.Action>

View File

@ -1,340 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for AddKeyForm inline form for adding a new API key.
*
* Covers:
* - Header + key name + value fields rendered
* - Key name auto-uppercased on input
* - Validation: UPPER_SNAKE_CASE required, duplicate name blocked
* - Provider hint shown for known providers (GitHub, Anthropic, OpenRouter)
* - Provider hint hidden for custom key names
* - Debounced value validation
* - Save button disabled when form invalid / saving
* - createSecret called on save with correct args
* - onCancel called on Cancel click
* - Save error shown on failure
* - TestConnectionButton shown when value is format-valid and provider supports it
*/
import React from "react";
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { AddKeyForm } from "../AddKeyForm";
// ── Mocks ─────────────────────────────────────────────────────────────────────
const { mockValidateSecretValue, mockIsValidKeyName, mockInferGroup } = vi.hoisted(() => ({
mockValidateSecretValue: vi.fn((value: string) => {
// Return error for "bad-value" to test ValidationHint display
if (value === "bad-value") return "Invalid format";
return null;
}),
mockIsValidKeyName: vi.fn((name: string) => /^[A-Z][A-Z0-9_]*$/.test(name)),
mockInferGroup: vi.fn((name: string) => {
const u = name.toUpperCase();
if (u.includes("GITHUB")) return "github" as const;
if (u.includes("ANTHROPIC")) return "anthropic" as const;
if (u.includes("OPENROUTER")) return "openrouter" as const;
return "custom" as const;
}),
}));
const mockCreateSecret = vi.fn();
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: Object.assign(
vi.fn((selector?: (s: { createSecret: typeof mockCreateSecret }) => unknown) =>
selector ? selector({ createSecret: mockCreateSecret }) : { createSecret: mockCreateSecret }
),
{ getState: () => ({ createSecret: mockCreateSecret }) },
),
}));
vi.mock("@/lib/validation/secret-formats", () => ({
validateSecretValue: mockValidateSecretValue,
isValidKeyName: mockIsValidKeyName,
inferGroup: mockInferGroup,
}));
vi.mock("@/lib/services", () => ({
SERVICES: {
github: { label: "GitHub", icon: "github", keyNames: [], docsUrl: "https://github.com", testSupported: true },
anthropic: { label: "Anthropic", icon: "anthropic", keyNames: [], docsUrl: "https://anthropic.com", testSupported: true },
openrouter: { label: "OpenRouter", icon: "openrouter", keyNames: [], docsUrl: "https://openrouter.ai", testSupported: true },
custom: { label: "Other", icon: "key", keyNames: [], docsUrl: "", testSupported: false },
},
KEY_NAME_SUGGESTIONS: [],
}));
vi.mock("@/components/ui/KeyValueField", () => ({
KeyValueField: ({ value, onChange, disabled }: { value: string; onChange: (v: string) => void; disabled?: boolean }) => (
<textarea
data-testid="key-value-field"
value={value}
onChange={(e) => onChange(e.target.value)}
disabled={disabled}
aria-label="Key value"
/>
),
}));
vi.mock("@/components/ui/ValidationHint", () => ({
ValidationHint: ({ error }: { error: string | null }) =>
error ? <span role="alert">{error}</span> : null,
}));
vi.mock("@/components/ui/TestConnectionButton", () => ({
TestConnectionButton: () => <button data-testid="test-connection-btn" type="button">Test connection</button>,
}));
beforeEach(() => {
mockCreateSecret.mockReset().mockResolvedValue(undefined);
});
afterEach(() => {
cleanup();
vi.useRealTimers();
});
// ── Helpers ──────────────────────────────────────────────────────────────────
async function typeKeyName(name: string) {
const input = screen.getByLabelText("Key name");
fireEvent.change(input, { target: { value: name } });
await act(async () => { await Promise.resolve(); });
}
async function typeValue(val: string) {
const textarea = screen.getByTestId("key-value-field");
fireEvent.change(textarea, { target: { value: val } });
await act(async () => { await Promise.resolve(); });
}
// ─── Initial render ─────────────────────────────────────────────────────────
describe("AddKeyForm — initial render", () => {
it("renders header 'Add New Key'", () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
expect(screen.getByText("Add New Key")).toBeTruthy();
});
it("has key name and value inputs", () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
expect(screen.getByLabelText("Key name")).toBeTruthy();
expect(screen.getByTestId("key-value-field")).toBeTruthy();
});
it("Save and Cancel buttons present", () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
expect(screen.getByRole("button", { name: /save key/i })).toBeTruthy();
expect(screen.getByRole("button", { name: /cancel/i })).toBeTruthy();
});
it("Save button disabled initially", () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
expect((screen.getByRole("button", { name: /save key/i }) as HTMLButtonElement).disabled).toBe(true);
});
});
// ─── Key name validation ────────────────────────────────────────────────────
describe("AddKeyForm — key name validation", () => {
it("auto-uppercases key name input", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
const input = screen.getByLabelText("Key name") as HTMLInputElement;
fireEvent.change(input, { target: { value: "github_token" } });
expect(input.value).toBe("GITHUB_TOKEN");
});
it("shows error for key name starting with digit (invalid UPPER_SNAKE_CASE)", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
// The key name input auto-uppercases, so "123_token" → "123_TOKEN"
// which fails /^[A-Z][A-Z0-9_]*$/ (must start with uppercase letter)
const input = screen.getByLabelText("Key name");
fireEvent.change(input, { target: { value: "123_token" } });
await act(async () => { await Promise.resolve(); });
expect(screen.getByRole("alert")).toBeTruthy();
expect(screen.getByText(/upper_snake_case/i)).toBeTruthy();
});
it("shows error for key name starting with number", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("123_TOKEN");
expect(screen.getByText(/upper_snake_case/i)).toBeTruthy();
});
it("shows duplicate error when key name already exists", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={["ANTHROPIC_API_KEY"]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await act(async () => { await Promise.resolve(); });
expect(screen.getByText(/already exists/i)).toBeTruthy();
});
it("no error for valid new key name", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("MY_SECRET_KEY");
await act(async () => { await Promise.resolve(); });
expect(screen.queryByRole("alert")).toBeNull();
});
});
// ─── Provider hint ──────────────────────────────────────────────────────────
describe("AddKeyForm — provider hint", () => {
it("shows provider hint for ANTHROPIC_API_KEY (known provider)", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await act(async () => { await Promise.resolve(); });
expect(screen.getByTestId("provider-hint")).toBeTruthy();
expect(screen.getByText("Anthropic")).toBeTruthy();
});
it("shows provider hint for GITHUB_TOKEN", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("GITHUB_TOKEN");
await act(async () => { await Promise.resolve(); });
expect(screen.getByTestId("provider-hint")).toBeTruthy();
expect(screen.getByText("GitHub")).toBeTruthy();
});
it("shows provider hint for OPENROUTER_API_KEY", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("OPENROUTER_API_KEY");
await act(async () => { await Promise.resolve(); });
expect(screen.getByTestId("provider-hint")).toBeTruthy();
expect(screen.getByText("OpenRouter")).toBeTruthy();
});
it("hides provider hint for unknown custom key name", async () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("MY_CUSTOM_TOKEN");
await act(async () => { await Promise.resolve(); });
expect(screen.queryByTestId("provider-hint")).toBeNull();
});
});
// ─── Value validation (debounced) ───────────────────────────────────────────
describe("AddKeyForm — value validation (debounced)", () => {
it("ValidationHint shown after debounce for invalid value", async () => {
vi.useFakeTimers();
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
const textarea = screen.getByTestId("key-value-field");
// "bad-value" is the mock's sentinel for invalid input
fireEvent.change(textarea, { target: { value: "bad-value" } });
// Advance past debounce (VALIDATION_DEBOUNCE_MS = 400)
await act(async () => { vi.advanceTimersByTime(400); });
expect(screen.getByRole("alert")).toBeTruthy();
vi.useRealTimers();
});
});
// ─── Save ───────────────────────────────────────────────────────────────────
describe("AddKeyForm — save", () => {
it("Save button disabled when key name or value missing", () => {
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
const saveBtn = screen.getByRole("button", { name: /save key/i });
expect((saveBtn as HTMLButtonElement).disabled).toBe(true);
});
it("Save button enabled when valid key name + value", async () => {
vi.useFakeTimers();
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await typeValue("GITHUB_FAKE_VALUE_FOR_TEST");
await act(async () => { vi.advanceTimersByTime(400); });
const saveBtn = screen.getByRole("button", { name: /save key/i });
expect((saveBtn as HTMLButtonElement).disabled).toBe(false);
vi.useRealTimers();
});
it("calls createSecret(workspaceId, keyName, value) on save", async () => {
vi.useFakeTimers();
render(<AddKeyForm workspaceId="ws-test" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await typeValue("GITHUB_FAKE_VALUE_FOR_TEST");
await act(async () => { vi.advanceTimersByTime(400); });
fireEvent.click(screen.getByRole("button", { name: /save key/i }));
await act(async () => { vi.advanceTimersByTime(0); });
expect(mockCreateSecret).toHaveBeenCalledWith(
"ws-test",
"ANTHROPIC_API_KEY",
"GITHUB_FAKE_VALUE_FOR_TEST",
);
vi.useRealTimers();
});
it("Save button shows 'Saving…' during save", async () => {
vi.useFakeTimers();
mockCreateSecret.mockImplementation(() => new Promise(() => {}));
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await typeValue("GITHUB_FAKE_VALUE_FOR_TEST");
await act(async () => { vi.advanceTimersByTime(400); });
fireEvent.click(screen.getByRole("button", { name: /save key/i }));
await act(async () => { vi.advanceTimersByTime(0); });
expect(screen.getByRole("button", { name: /saving/i })).toBeTruthy();
vi.useRealTimers();
});
it("shows error on save failure", async () => {
mockCreateSecret.mockRejectedValue(new Error("network error"));
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await typeValue("GITHUB_FAKE_VALUE_FOR_TEST");
fireEvent.click(screen.getByRole("button", { name: /save key/i }));
await act(async () => { await Promise.resolve(); });
expect(screen.getByText(/network error/i)).toBeTruthy();
});
});
// ─── Cancel ─────────────────────────────────────────────────────────────────
describe("AddKeyForm — cancel", () => {
it("onCancel called when Cancel button clicked", () => {
const onCancel = vi.fn();
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={onCancel} />);
fireEvent.click(screen.getByRole("button", { name: /cancel/i }));
expect(onCancel).toHaveBeenCalled();
});
it("Cancel button disabled during save", async () => {
vi.useFakeTimers();
mockCreateSecret.mockImplementation(() => new Promise(() => {}));
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await typeValue("GITHUB_FAKE_VALUE_FOR_TEST");
await act(async () => { vi.advanceTimersByTime(400); });
fireEvent.click(screen.getByRole("button", { name: /save key/i }));
await act(async () => { vi.advanceTimersByTime(0); });
expect((screen.getByRole("button", { name: /cancel/i }) as HTMLButtonElement).disabled).toBe(true);
vi.useRealTimers();
});
});
// ─── TestConnectionButton ────────────────────────────────────────────────────
describe("AddKeyForm — TestConnectionButton", () => {
it("TestConnectionButton shown for known provider with valid-format value", async () => {
vi.useFakeTimers();
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
// Use a value that passes the regex (sk-ant- prefix + 90+ chars)
const validValue = "GHP_FAKEPLACEHOLDER_NOTREAL_ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234567890";
await typeValue(validValue);
await act(async () => { vi.advanceTimersByTime(400); });
expect(screen.getByTestId("test-connection-btn")).toBeTruthy();
vi.useRealTimers();
});
it("TestConnectionButton NOT shown when value is invalid format", async () => {
vi.useFakeTimers();
render(<AddKeyForm workspaceId="ws-1" existingNames={[]} onCancel={vi.fn()} />);
await typeKeyName("ANTHROPIC_API_KEY");
await typeValue("bad-value");
await act(async () => { vi.advanceTimersByTime(400); });
expect(screen.queryByTestId("test-connection-btn")).toBeNull();
vi.useRealTimers();
});
});

View File

@ -1,225 +0,0 @@
// @vitest-environment jsdom
/**
* DeleteConfirmDialog destructive confirmation for deleting a secret key.
*
* Per spec §3.5 & §4.5:
* - Opens via window 'secret:delete-request' custom event
* - Shows title "Delete \"{name}\"?"
* - Fetches dependents live on open
* - Delete button disabled for 1s (CONFIRM_DELAY_MS)
* - Focus-trapped (AlertDialog)
*
* NOTE: No @testing-library/jest-dom import use DOM APIs.
*
* Covers:
* - Does not render when no delete request pending
* - Renders dialog when secret:delete-request fires
* - Title contains secret name
* - Cancel and Delete buttons present
* - role=alertdialog on dialog content
* - Delete button disabled initially (1s delay)
* - Delete button enabled after delay
* - Loading state while fetching dependents
* - Shows dependents list when present
* - Shows no-dependents message when none
* - Cancel closes dialog
* - Delete button calls deleteSecret and shows Deleting state
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { act, cleanup, fireEvent, render, waitFor } from "@testing-library/react";
import React from "react";
import { DeleteConfirmDialog } from "../DeleteConfirmDialog";
// ─── Mocks ─────────────────────────────────────────────────────────────────────
const _mockDeleteSecret = vi.fn<() => Promise<void>>();
const _mockFetchDependents = vi.fn<() => Promise<string[]>>();
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: (selector?: (s: { deleteSecret: () => Promise<void> }) => unknown) => {
const state = { deleteSecret: _mockDeleteSecret };
return selector ? selector(state) : state;
},
}));
vi.mock("@/lib/api/secrets", () => ({
fetchDependents: (workspaceId: string, name: string) =>
_mockFetchDependents(workspaceId, name),
}));
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
beforeEach(() => {
_mockDeleteSecret.mockResolvedValue(undefined);
_mockFetchDependents.mockResolvedValue([]);
});
// ─── Helpers ───────────────────────────────────────────────────────────────────
/** Dispatches secret:delete-request inside act() so React processes the event. */
function fireDeleteRequest(secretName: string) {
act(() => {
window.dispatchEvent(
new CustomEvent("secret:delete-request", {
detail: secretName,
}),
);
});
}
// ─── Render ────────────────────────────────────────────────────────────────────
describe("DeleteConfirmDialog — render", () => {
it("does not render when no delete request pending", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
expect(document.body.textContent ?? "").toBe("");
});
it("renders dialog when secret:delete-request fires", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("ANTHROPIC_API_KEY");
expect(document.querySelector('[role="alertdialog"]')).toBeTruthy();
});
it("title contains secret name", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("GITHUB_TOKEN");
const dialog = document.querySelector('[role="alertdialog"]');
expect(dialog?.textContent ?? "").toContain("GITHUB_TOKEN");
});
it("Cancel button present", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("TEST_KEY");
const cancelBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Cancel",
);
expect(cancelBtn).toBeTruthy();
});
it("Delete button present", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("TEST_KEY");
const deleteBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("Delete key"),
);
expect(deleteBtn).toBeTruthy();
});
it("role=alertdialog on dialog content", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("TEST_KEY");
expect(document.querySelector('[role="alertdialog"]')).toBeTruthy();
});
});
// ─── Confirm delay ─────────────────────────────────────────────────────────────
describe("DeleteConfirmDialog — confirm delay", () => {
it("Delete button disabled initially (< 1s)", () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("FAST_KEY");
const deleteBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("Delete key"),
) as HTMLButtonElement;
expect(deleteBtn.disabled).toBe(true);
});
it("Delete button enabled after 1s delay", async () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("DELAYED_KEY");
const deleteBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("Delete key"),
) as HTMLButtonElement;
// Wait just over 1s
await new Promise((r) => setTimeout(r, 1010));
expect(deleteBtn.disabled).toBe(false);
});
});
// ─── Dependents fetch ─────────────────────────────────────────────────────────
describe("DeleteConfirmDialog — dependents", () => {
it("shows loading state while fetching", () => {
_mockFetchDependents.mockImplementation(
() => new Promise(() => {}), // never resolves
);
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("LOADING_KEY");
expect(document.body.textContent ?? "").toContain("Checking for dependent agents");
});
it("shows dependents list when present", async () => {
_mockFetchDependents.mockResolvedValue(["agent-alpha", "agent-beta"]);
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("SHARED_KEY");
// Wait for fetch to resolve
await new Promise((r) => setTimeout(r, 10));
expect(document.body.textContent ?? "").toContain("agent-alpha");
});
it("shows no-dependents message when none", async () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("SOLO_KEY");
await new Promise((r) => setTimeout(r, 10));
expect(document.body.textContent ?? "").toContain("No agents currently use this key");
});
it("fetchDependents called with workspaceId and secretName", async () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("MY_SECRET");
await new Promise((r) => setTimeout(r, 10));
expect(_mockFetchDependents).toHaveBeenCalledWith("ws1", "MY_SECRET");
});
});
// ─── Interaction ───────────────────────────────────────────────────────────────
describe("DeleteConfirmDialog — interaction", () => {
it("Cancel closes the dialog", async () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("CANCEL_KEY");
expect(document.querySelector('[role="alertdialog"]')).toBeTruthy();
const cancelBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.trim() === "Cancel",
) as HTMLButtonElement;
act(() => {
cancelBtn.click();
});
expect(document.querySelector('[role="alertdialog"]')).toBeNull();
});
it("Delete calls deleteSecret when enabled and clicked", async () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("DELETE_ME");
// Wait for 1s delay
await new Promise((r) => setTimeout(r, 1010));
const deleteBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("Delete key"),
) as HTMLButtonElement;
act(() => {
deleteBtn.click();
});
expect(_mockDeleteSecret).toHaveBeenCalledTimes(1);
});
it("Delete button text is 'Delete key' before clicking", async () => {
render(<DeleteConfirmDialog workspaceId="ws1" />);
fireDeleteRequest("BTN_TEXT_KEY");
await new Promise((r) => setTimeout(r, 1010));
const deleteBtn = Array.from(document.querySelectorAll("button")).find(
(b) => b.textContent?.includes("Delete key"),
);
expect(deleteBtn).toBeTruthy();
// Confirm text is NOT "Deleting…" before click
const deletingBtn = Array.from(document.querySelectorAll("button")).find(
(b) => (b.textContent ?? "").includes("Deleting"),
);
expect(deletingBtn).toBeUndefined();
});
});

View File

@ -1,82 +0,0 @@
// @vitest-environment jsdom
/**
* Settings EmptyState shown when no secrets exist.
*
* Per spec §3.2:
* 🔑
* No API keys yet
* Add your API keys to let agents connect
* to GitHub, Anthropic, OpenRouter, and more.
* [+ Add your first API key]
*
* NOTE: No @testing-library/jest-dom import use DOM APIs.
*
* Covers:
* - Icon is aria-hidden (decorative)
* - Title text is "No API keys yet"
* - Body text contains service names
* - CTA button has correct text
* - onAddFirst called when CTA button clicked
* - CTA button is the only button
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { EmptyState } from "../EmptyState";
afterEach(() => {
cleanup();
vi.restoreAllMocks();
});
// ─── Render ────────────────────────────────────────────────────────────────────
describe("Settings EmptyState — render", () => {
it("icon is aria-hidden", () => {
const { container } = render(
<EmptyState onAddFirst={vi.fn()} />,
);
const icon = container.querySelector('[aria-hidden="true"]');
expect(icon).toBeTruthy();
expect(icon?.textContent).toContain("🔑");
});
it("title text is 'No API keys yet'", () => {
render(<EmptyState onAddFirst={vi.fn()} />);
expect(document.body.textContent).toContain("No API keys yet");
});
it("body text contains service names", () => {
render(<EmptyState onAddFirst={vi.fn()} />);
const text = document.body.textContent ?? "";
expect(text).toContain("GitHub");
expect(text).toContain("Anthropic");
expect(text).toContain("OpenRouter");
});
it("CTA button has correct text", () => {
render(<EmptyState onAddFirst={vi.fn()} />);
const btn = document.querySelector("button");
expect(btn?.textContent).toContain("Add your first API key");
});
it("CTA button is the only button in the component", () => {
const { container } = render(
<EmptyState onAddFirst={vi.fn()} />,
);
expect(container.querySelectorAll("button")).toHaveLength(1);
});
});
// ─── Interaction ───────────────────────────────────────────────────────────────
describe("Settings EmptyState — interaction", () => {
it("onAddFirst called when CTA button clicked", () => {
const onAddFirst = vi.fn();
render(<EmptyState onAddFirst={onAddFirst} />);
const btn = document.querySelector("button") as HTMLButtonElement;
btn.click();
expect(onAddFirst).toHaveBeenCalledTimes(1);
});
});

View File

@ -1,407 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for OrgTokensTab org-scoped API key management.
*
* Covers:
* - Loading state (spinner + aria-busy)
* - Empty state when no tokens
* - Token list rendering (single + multiple)
* - Token age display (just now, minutes, hours, days)
* - New key form: label input + Create button
* - Create: POST with optional name payload
* - Create: loading spinner during creation
* - New-token success box with copy button
* - Copy button writes to clipboard + shows "Copied"
* - Copy auto-resets to "Copy" after 2s
* - Dismiss button hides new-token box
* - Revoke button opens ConfirmDialog
* - ConfirmDialog cancel closes without calling API
* - ConfirmDialog confirm calls DELETE and re-fetches
* - Error banner on fetch failure
* - Error banner on create failure
* - Error banner on revoke failure
*/
import React from "react";
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { OrgTokensTab } from "../OrgTokensTab";
vi.mock("@/components/ConfirmDialog", () => ({
ConfirmDialog: vi.fn(() => null),
}));
const mockGet = vi.fn();
const mockPost = vi.fn();
const mockDel = vi.fn();
vi.mock("@/lib/api", () => ({
api: { get: (...args: unknown[]) => mockGet(...args), post: (...args: unknown[]) => mockPost(...args), del: (...args: unknown[]) => mockDel(...args) },
}));
// Stub clipboard
vi.stubGlobal("navigator", { clipboard: { writeText: vi.fn().mockResolvedValue(undefined) } });
beforeEach(() => {
vi.useRealTimers();
mockGet.mockReset();
mockPost.mockReset();
mockDel.mockReset();
vi.mocked(navigator.clipboard.writeText).mockReset();
});
afterEach(() => {
cleanup();
vi.useRealTimers();
});
// ─── Helpers ──────────────────────────────────────────────────────────────────
async function flush() {
await act(async () => { await Promise.resolve(); });
}
function token(overrides: Partial<{
id: string; prefix: string; name?: string; created_by?: string; created_at: string; last_used_at?: string;
}> = {}) {
return {
id: "tok-1",
prefix: "mol_pk_test",
name: undefined,
created_by: undefined,
created_at: new Date(Date.now() - 120_000).toISOString(),
last_used_at: undefined,
...overrides,
};
}
// ─── Loading ─────────────────────────────────────────────────────────────────
describe("OrgTokensTab — loading", () => {
it("shows spinner while fetching", () => {
mockGet.mockImplementation(() => new Promise(() => {}));
render(<OrgTokensTab />);
expect(screen.getByRole("status")).toBeTruthy();
expect(screen.getByText("Loading keys...")).toBeTruthy();
});
it("loading indicator has role=status and aria-live=polite", () => {
mockGet.mockImplementation(() => new Promise(() => {}));
render(<OrgTokensTab />);
const status = screen.getByRole("status");
expect(status.getAttribute("aria-live")).toBe("polite");
expect(status.textContent).toContain("Loading keys");
});
});
// ─── Empty state ─────────────────────────────────────────────────────────────
describe("OrgTokensTab — empty", () => {
it("shows empty state when no tokens", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
render(<OrgTokensTab />);
await flush();
expect(screen.getByText("No active keys")).toBeTruthy();
expect(screen.getByText(/Create a key above to authenticate/i)).toBeTruthy();
});
});
// ─── Token list ─────────────────────────────────────────────────────────────
describe("OrgTokensTab — token list", () => {
it("renders token rows", async () => {
mockGet.mockResolvedValue({ tokens: [token({ id: "tok-1", prefix: "mol_pk_abc" })], count: 1 });
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/mol_pk_abc/)).toBeTruthy();
});
it("renders multiple token rows", async () => {
mockGet.mockResolvedValue({
tokens: [
token({ id: "tok-1", prefix: "mol_pk_a" }),
token({ id: "tok-2", prefix: "mol_pk_b" }),
],
count: 2,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/mol_pk_a/)).toBeTruthy();
expect(screen.getByText(/mol_pk_b/)).toBeTruthy();
});
it("shows token name when present", async () => {
mockGet.mockResolvedValue({
tokens: [token({ id: "tok-1", prefix: "mol_pk_abc", name: "zapier-integration" })],
count: 1,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText("zapier-integration")).toBeTruthy();
});
it("age shows 'just now' for very recent tokens", async () => {
mockGet.mockResolvedValue({
tokens: [token({ id: "tok-1", created_at: new Date().toISOString() })],
count: 1,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/just now/)).toBeTruthy();
});
it("age shows minutes ago", async () => {
mockGet.mockResolvedValue({
tokens: [token({ id: "tok-1", created_at: new Date(Date.now() - 5 * 60_000).toISOString() })],
count: 1,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/5m ago/)).toBeTruthy();
});
it("age shows hours ago", async () => {
mockGet.mockResolvedValue({
tokens: [token({ id: "tok-1", created_at: new Date(Date.now() - 3 * 3600_000).toISOString() })],
count: 1,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/3h ago/)).toBeTruthy();
});
it("age shows days ago", async () => {
mockGet.mockResolvedValue({
tokens: [token({ id: "tok-1", created_at: new Date(Date.now() - 2 * 86400_000).toISOString() })],
count: 1,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/2d ago/)).toBeTruthy();
});
it("each token has a Revoke button", async () => {
mockGet.mockResolvedValue({
tokens: [token({ id: "tok-1" }), token({ id: "tok-2" })],
count: 2,
});
render(<OrgTokensTab />);
await flush();
const revokeBtns = Array.from(document.querySelectorAll("button")).filter(b => b.textContent === "Revoke");
expect(revokeBtns.length).toBe(2);
});
it("last_used_at is shown when present", async () => {
mockGet.mockResolvedValue({
tokens: [token({
id: "tok-1",
created_at: new Date(Date.now() - 86400_000).toISOString(),
last_used_at: new Date(Date.now() - 3600_000).toISOString(),
})],
count: 1,
});
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/Last used/i)).toBeTruthy();
});
});
// ─── Create token ─────────────────────────────────────────────────────────────
describe("OrgTokensTab — create", () => {
it("Create button calls POST with empty body when no label", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_new_secret", prefix: "tok_new" });
render(<OrgTokensTab />);
await flush();
const createBtn = screen.getByRole("button", { name: "+ New Key" });
await act(async () => { createBtn.click(); });
await flush();
expect(mockPost).toHaveBeenCalledWith("/org/tokens", {});
});
it("Create button calls POST with name when label is filled", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_new_secret", prefix: "tok_new" });
render(<OrgTokensTab />);
await flush();
const input = screen.getByRole("textbox");
fireEvent.change(input, { target: { value: "zapier-prod" } });
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
expect(mockPost).toHaveBeenCalledWith("/org/tokens", { name: "zapier-prod" });
});
it("shows spinner while creating", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockImplementation(() => new Promise(() => {}));
render(<OrgTokensTab />);
await flush();
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
expect(screen.getByText(/Creating/)).toBeTruthy();
});
it("shows new token box after creation", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_new_secret_xyz", prefix: "tok_new" });
render(<OrgTokensTab />);
await flush();
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
expect(screen.getByText(/tok_new_secret_xyz/)).toBeTruthy();
expect(screen.getByText(/Copy now/)).toBeTruthy();
});
it("new token shows label when provided", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_abc123", prefix: "tok_abc" });
render(<OrgTokensTab />);
await flush();
const input = screen.getByRole("textbox");
fireEvent.change(input, { target: { value: "my-label" } });
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
expect(screen.getByText(/New Key: my-label/)).toBeTruthy();
});
it("dismiss hides the new-token box", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_dismiss", prefix: "tok_d" });
render(<OrgTokensTab />);
await flush();
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
expect(screen.getByText(/tok_dismiss/)).toBeTruthy();
await act(async () => { screen.getByText("Dismiss").closest("button")!.click(); });
await flush();
expect(screen.queryByText(/tok_dismiss/)).toBeNull();
});
});
// ─── Copy button ─────────────────────────────────────────────────────────────
describe("OrgTokensTab — copy", () => {
it("Copy button writes token to clipboard", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_copy_test", prefix: "tok_c" });
render(<OrgTokensTab />);
await flush();
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
const copyBtn = screen.getByRole("button", { name: "Copy" });
await act(async () => { copyBtn.click(); });
expect(navigator.clipboard.writeText).toHaveBeenCalledWith("tok_copy_test");
});
it("Copy button shows 'Copied' after click", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_copy_2", prefix: "tok_c" });
render(<OrgTokensTab />);
await flush();
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
await act(async () => { screen.getByRole("button", { name: "Copy" }).click(); });
await flush();
expect(screen.getByRole("button", { name: "Copied" })).toBeTruthy();
});
it("Copy resets to 'Copy' after 2s", async () => {
vi.useFakeTimers();
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockResolvedValue({ auth_token: "tok_timer", prefix: "tok_t" });
render(<OrgTokensTab />);
await act(async () => { await Promise.resolve(); });
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await act(async () => { await Promise.resolve(); });
await act(async () => { screen.getByRole("button", { name: "Copy" }).click(); });
await act(async () => { await Promise.resolve(); });
expect(screen.getByRole("button", { name: "Copied" })).toBeTruthy();
act(() => { vi.advanceTimersByTime(2000); });
await act(async () => { await Promise.resolve(); });
expect(screen.getByRole("button", { name: "Copy" })).toBeTruthy();
vi.useRealTimers();
});
});
// ─── Revoke ─────────────────────────────────────────────────────────────────
describe("OrgTokensTab — revoke", () => {
it("Revoke button opens ConfirmDialog", async () => {
mockGet.mockResolvedValue({ tokens: [token({ id: "tok-revoke", prefix: "mol_pk_rev" })], count: 1 });
render(<OrgTokensTab />);
await flush();
expect(screen.queryByRole("dialog")).toBeNull();
await act(async () => {
Array.from(document.querySelectorAll("button")).find(b => b.textContent === "Revoke")!.click();
});
await flush();
// ConfirmDialog is mocked — verify it was called with open=true
const ConfirmDialog = (await import("@/components/ConfirmDialog")).ConfirmDialog as ReturnType<typeof vi.fn>;
const lastCall = ConfirmDialog.mock.calls[ConfirmDialog.mock.calls.length - 1];
expect(lastCall[0]).toMatchObject({ open: true, title: "Revoke API Key" });
});
it("DELETE is called with correct URL on confirm", async () => {
mockGet.mockResolvedValue({ tokens: [token({ id: "tok-del", prefix: "mol_pk_del" })], count: 1 });
mockDel.mockResolvedValue(undefined);
render(<OrgTokensTab />);
await flush();
// Open confirm
await act(async () => {
Array.from(document.querySelectorAll("button")).find(b => b.textContent === "Revoke")!.click();
});
await flush();
// Get the onConfirm prop from the last ConfirmDialog call
const ConfirmDialog = (await import("@/components/ConfirmDialog")).ConfirmDialog as ReturnType<typeof vi.fn>;
const lastCall = ConfirmDialog.mock.calls[ConfirmDialog.mock.calls.length - 1];
const onConfirm = lastCall[0]?.onConfirm;
// Call onConfirm
await act(async () => { onConfirm?.(); });
await flush();
expect(mockDel).toHaveBeenCalledWith("/org/tokens/tok-del");
});
});
// ─── Error states ─────────────────────────────────────────────────────────────
describe("OrgTokensTab — errors", () => {
it("shows error when fetch fails", async () => {
mockGet.mockRejectedValue(new Error("network failure"));
render(<OrgTokensTab />);
await flush();
expect(screen.getByText(/network failure/i)).toBeTruthy();
});
it("shows error when create fails", async () => {
mockGet.mockResolvedValue({ tokens: [], count: 0 });
mockPost.mockRejectedValue(new Error("server error"));
render(<OrgTokensTab />);
await flush();
await act(async () => { screen.getByRole("button", { name: "+ New Key" }).click(); });
await flush();
expect(screen.getByText(/server error/i)).toBeTruthy();
});
it("shows error when revoke fails", async () => {
mockGet.mockResolvedValue({ tokens: [token({ id: "tok-err" })], count: 1 });
mockDel.mockRejectedValue(new Error("revoke denied"));
render(<OrgTokensTab />);
await flush();
await act(async () => {
Array.from(document.querySelectorAll("button")).find(b => b.textContent === "Revoke")!.click();
});
await flush();
const ConfirmDialog = (await import("@/components/ConfirmDialog")).ConfirmDialog as ReturnType<typeof vi.fn>;
const onConfirm = ConfirmDialog.mock.calls[ConfirmDialog.mock.calls.length - 1][0]?.onConfirm;
await act(async () => { onConfirm?.(); });
await flush();
expect(screen.getByText(/revoke denied/i)).toBeTruthy();
});
});

View File

@ -1,160 +0,0 @@
// @vitest-environment jsdom
/**
* SearchBar client-side search/filter for secret key names.
*
* Per spec §9:
* - Filters KeyNameLabel text, case-insensitive, on every keystroke
* - Escape clears search (does NOT close panel) + blurs input
* - Cmd+F / Ctrl+F focuses search when panel is open
* - Icon is aria-hidden (decorative)
*
* NOTE: No @testing-library/jest-dom import use DOM APIs.
*
* Covers:
* - Renders search icon with aria-hidden
* - Input has correct aria-label
* - Input renders placeholder text
* - Input has correct class name
* - Renders empty initially (searchQuery from store)
* - onChange updates searchQuery in store
* - Escape clears searchQuery and blurs input
* - Escape does not propagate (does not close panel)
* - Ctrl+F / Cmd+F focuses the input
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { cleanup, fireEvent, render } from "@testing-library/react";
import React from "react";
import { SearchBar } from "../SearchBar";
// ─── Store mock ────────────────────────────────────────────────────────────────
const _mockSetSearchQuery = vi.fn();
const _mockSearchQuery = vi.fn(() => "");
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: (selector?: (s: { searchQuery: string; setSearchQuery: (q: string) => void }) => unknown) => {
const state = { searchQuery: _mockSearchQuery(), setSearchQuery: _mockSetSearchQuery };
return selector ? selector(state) : state;
},
}));
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
beforeEach(() => {
_mockSetSearchQuery.mockClear();
_mockSearchQuery.mockReturnValue("");
});
// ─── Render ──────────────────────────────────────────────────────────────────
describe("SearchBar — render", () => {
it("renders search icon with aria-hidden", () => {
const { container } = render(<SearchBar />);
const icon = container.querySelector('[aria-hidden="true"]');
expect(icon).toBeTruthy();
expect(icon?.textContent).toContain("🔍");
});
it("input has aria-label='Search API keys'", () => {
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
expect(input.getAttribute("aria-label")).toBe("Search API keys");
});
it("input renders placeholder 'Search keys…'", () => {
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
expect(input.getAttribute("placeholder")).toBe("Search keys…");
});
it("input has search-bar__input class", () => {
const { container } = render(<SearchBar />);
const input = container.querySelector("input") as HTMLInputElement;
expect(input.className).toContain("search-bar__input");
});
it("input value reflects searchQuery from store", () => {
_mockSearchQuery.mockReturnValue("anthropic");
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
expect(input.value).toBe("anthropic");
});
it("renders empty string when searchQuery is empty", () => {
_mockSearchQuery.mockReturnValue("");
const { container } = render(<SearchBar />);
const input = container.querySelector("input") as HTMLInputElement;
expect(input.value).toBe("");
});
});
// ─── Interaction ───────────────────────────────────────────────────────────────
describe("SearchBar — interaction", () => {
it("onChange calls setSearchQuery with new value", () => {
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
fireEvent.change(input, { target: { value: "github" } });
expect(_mockSetSearchQuery).toHaveBeenCalledWith("github");
});
it("Escape clears searchQuery", () => {
_mockSearchQuery.mockReturnValue("openrouter");
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
// Focus the input first
input.focus();
fireEvent.keyDown(input, { key: "Escape" });
expect(_mockSetSearchQuery).toHaveBeenCalledWith("");
});
it("Escape blurs the input", () => {
_mockSearchQuery.mockReturnValue("test");
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
input.focus();
expect(document.activeElement).toBe(input);
fireEvent.keyDown(input, { key: "Escape" });
expect(document.activeElement).not.toBe(input);
});
it("Escape clears search without relying on propagation-stop behavior", () => {
// Escape clearing search is verified by the "Escape clears searchQuery" test above.
// fireEvent.keyDown bypasses React's synthetic event system, so stopPropagation
// on the React event cannot be tested directly via a native DOM listener.
// This test serves as a documentation placeholder for that limitation.
expect(true).toBe(true);
});
it("Ctrl+F focuses the input", () => {
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
// Ensure input is not focused
document.body.focus();
expect(document.activeElement).not.toBe(input);
// Simulate Ctrl+F
fireEvent.keyDown(document, { key: "f", ctrlKey: true, metaKey: false });
expect(document.activeElement).toBe(input);
});
it("Cmd+F focuses the input on Mac", () => {
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
document.body.focus();
fireEvent.keyDown(document, { key: "f", metaKey: true, ctrlKey: false });
expect(document.activeElement).toBe(input);
});
it("Ctrl+F does not focus input for other keys", () => {
render(<SearchBar />);
const input = document.querySelector("input") as HTMLInputElement;
document.body.focus();
fireEvent.keyDown(document, { key: "g", ctrlKey: true });
expect(document.activeElement).not.toBe(input);
});
});

View File

@ -1,291 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for SecretRow single secret display/edit row.
*
* Covers:
* - Display mode: key name, masked value, action buttons
* - StatusBadge shown with correct status
* - role="row" with aria-label
* - Edit button sets editingKey in store
* - Reveal toggle button rendered
* - Copy button calls navigator.clipboard.writeText
* - Delete button dispatches secret:delete-request event
* - Edit mode: KeyValueField + save/cancel rendered
* - Cancel calls setEditingKey(null)
* - Save calls updateSecret + setSecretStatus
* - Save error shown on failure
* - TestConnectionButton shown when testSupported + value entered
*/
import React from "react";
import { render, screen, fireEvent, cleanup, act } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { SecretRow } from "../SecretRow";
// ── Hoisted mocks — vi.hoisted() so they're stable references ────────────────
const { mockUpdateSecret, mockSetSecretStatus, mockSetEditingKey, mockValidateSecretValue } = vi.hoisted(() => ({
mockUpdateSecret: vi.fn(),
mockSetSecretStatus: vi.fn(),
mockSetEditingKey: vi.fn(),
mockValidateSecretValue: vi.fn(() => null), // always valid to avoid secret-pattern triggers
}));
// ── Store mock — single shared mutable object ───────────────────────────────
const storeState = {
editingKey: null as string | null,
setEditingKey: mockSetEditingKey,
updateSecret: mockUpdateSecret,
setSecretStatus: mockSetSecretStatus,
};
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: Object.assign(
vi.fn((selector?: (s: typeof storeState) => unknown) =>
selector ? selector(storeState) : storeState
),
{ getState: () => storeState },
),
}));
// ── Child component stubs ────────────────────────────────────────────────────
vi.mock("@/lib/validation/secret-formats", () => ({
validateSecretValue: mockValidateSecretValue,
}));
vi.mock("@/components/ui/StatusBadge", () => ({
StatusBadge: ({ status }: { status: string }) => (
<span data-testid="status-badge" data-status={status}>{status}</span>
),
}));
vi.mock("@/components/ui/RevealToggle", () => ({
RevealToggle: ({ revealed, onToggle, label }: { revealed: boolean; onToggle: () => void; label: string }) => (
<button type="button" data-testid="reveal-toggle" aria-label={label} onClick={onToggle}>
{revealed ? "HIDE" : "REVEAL"}
</button>
),
}));
vi.mock("@/components/ui/KeyValueField", () => ({
KeyValueField: ({ value, onChange, disabled }: { value: string; onChange: (v: string) => void; disabled?: boolean }) => (
<textarea
data-testid="edit-value-field"
value={value}
onChange={(e) => { onChange(e.target.value); }}
disabled={disabled}
/>
),
}));
vi.mock("@/components/ui/ValidationHint", () => ({
ValidationHint: ({ error }: { error: string | null }) =>
error ? <span role="alert">{error}</span> : null,
}));
vi.mock("@/components/ui/TestConnectionButton", () => ({
TestConnectionButton: () => <button data-testid="test-connection-btn" type="button">Test connection</button>,
}));
// ── Test data ────────────────────────────────────────────────────────────────
const GITHUB_SECRET = { name: "GITHUB_TOKEN", masked_value: "ghp_••••••••••••xK9f", group: "github" as const, status: "verified" as const, updated_at: "2024-01-01" };
const ANTHROPIC_SECRET = { name: "ANTHROPIC_API_KEY", masked_value: "sk-ant-•••••••••••••••••a3Zq", group: "anthropic" as const, status: "unverified" as const, updated_at: "2024-01-02" };
const CUSTOM_SECRET = { name: "MY_CUSTOM_KEY", masked_value: "••••••••••••••••9d2a", group: "custom" as const, status: "invalid" as const, updated_at: "2024-01-03" };
// Use a value that definitely does NOT match any secret format regex
const EDIT_VALUE = "TEST_VALID_TOKEN_VALUE_PLACEHOLDER_FOR_EDIT_MODE";
beforeEach(() => {
// Mutate the shared object so all closures see the update
storeState.editingKey = null;
storeState.setEditingKey = vi.fn();
storeState.updateSecret = vi.fn().mockResolvedValue(undefined);
storeState.setSecretStatus = vi.fn();
});
afterEach(() => {
cleanup();
vi.useRealTimers();
});
// ─── Display mode ───────────────────────────────────────────────────────────
describe("SecretRow — display mode", () => {
it("shows secret name", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.getByText("GITHUB_TOKEN")).toBeTruthy();
});
it("shows masked value", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.getByText("ghp_••••••••••••xK9f")).toBeTruthy();
});
it("shows StatusBadge", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.getByTestId("status-badge")).toBeTruthy();
});
it("StatusBadge has correct data-status attribute", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.getByTestId("status-badge").getAttribute("data-status")).toBe("verified");
});
it("role=row", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(document.querySelector('[role="row"]')).toBeTruthy();
});
it("has Reveal, Copy, Edit, Delete buttons", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.getByTestId("reveal-toggle")).toBeTruthy();
expect(screen.getByRole("button", { name: /copy/i })).toBeTruthy();
expect(screen.getByRole("button", { name: /edit/i })).toBeTruthy();
expect(screen.getByRole("button", { name: /delete/i })).toBeTruthy();
});
it("shows invalid status correctly", () => {
render(<SecretRow secret={CUSTOM_SECRET} workspaceId="ws-1" />);
expect(screen.getByTestId("status-badge").getAttribute("data-status")).toBe("invalid");
});
});
// ─── Edit ───────────────────────────────────────────────────────────────────
describe("SecretRow — edit", () => {
it("Edit button calls setEditingKey(secret.name)", () => {
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /edit/i }));
expect(storeState.setEditingKey).toHaveBeenCalledWith("GITHUB_TOKEN");
});
it("shows edit form (KeyValueField + save/cancel) when editingKey set", () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.getByTestId("edit-value-field")).toBeTruthy();
expect(screen.getByRole("button", { name: /cancel/i })).toBeTruthy();
expect(screen.getByRole("button", { name: /save/i })).toBeTruthy();
});
it("Cancel calls setEditingKey(null)", () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /cancel/i }));
expect(storeState.setEditingKey).toHaveBeenCalledWith(null);
});
it("Save button disabled when editValue is empty", () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect((screen.getByRole("button", { name: /save/i }) as HTMLButtonElement).disabled).toBe(true);
});
it("Save enabled when editValue is non-empty", async () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-abc" />);
const textarea = screen.getByTestId("edit-value-field");
fireEvent.change(textarea, { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
expect((screen.getByRole("button", { name: /save/i }) as HTMLButtonElement).disabled).toBe(false);
});
it("Save calls updateSecret(workspaceId, name, editValue)", async () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-test" />);
fireEvent.change(screen.getByTestId("edit-value-field"), { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
fireEvent.click(screen.getByRole("button", { name: /save/i }));
await act(async () => { await Promise.resolve(); });
expect(storeState.updateSecret).toHaveBeenCalledWith("ws-test", "GITHUB_TOKEN", EDIT_VALUE);
});
it("Save calls setSecretStatus(secret.name, 'unverified')", async () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.change(screen.getByTestId("edit-value-field"), { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
fireEvent.click(screen.getByRole("button", { name: /save/i }));
await act(async () => { await Promise.resolve(); });
expect(storeState.setSecretStatus).toHaveBeenCalledWith("GITHUB_TOKEN", "unverified");
});
it("Save button shows 'Saving…' during pending save", async () => {
storeState.editingKey = "GITHUB_TOKEN";
storeState.updateSecret = vi.fn(() => new Promise(() => {}));
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.change(screen.getByTestId("edit-value-field"), { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
fireEvent.click(screen.getByRole("button", { name: /save/i }));
await act(async () => { await Promise.resolve(); });
expect(screen.getByText("Saving…")).toBeTruthy();
});
it("shows error on save failure", async () => {
storeState.editingKey = "GITHUB_TOKEN";
storeState.updateSecret = vi.fn().mockRejectedValue(new Error("network error"));
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.change(screen.getByTestId("edit-value-field"), { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
fireEvent.click(screen.getByRole("button", { name: /save/i }));
await act(async () => { await Promise.resolve(); });
expect(screen.getByText(/network error/i)).toBeTruthy();
});
});
// ─── Copy ───────────────────────────────────────────────────────────────────
describe("SecretRow — copy", () => {
it("Copy calls navigator.clipboard.writeText with masked value", async () => {
const writeText = vi.fn().mockResolvedValue(undefined);
Object.defineProperty(navigator, "clipboard", {
value: { writeText },
configurable: true,
});
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /copy/i }));
expect(writeText).toHaveBeenCalledWith("ghp_••••••••••••xK9f");
});
});
// ─── Delete ─────────────────────────────────────────────────────────────────
describe("SecretRow — delete", () => {
it("Delete dispatches secret:delete-request with secret name", () => {
const listener = vi.fn();
window.addEventListener("secret:delete-request", listener);
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /delete/i }));
expect(listener).toHaveBeenCalledWith(
expect.objectContaining({ detail: "GITHUB_TOKEN" })
);
window.removeEventListener("secret:delete-request", listener);
});
});
// ─── TestConnectionButton ────────────────────────────────────────────────────
describe("SecretRow — TestConnectionButton", () => {
it("shown for github secret when editValue is entered", async () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
fireEvent.change(screen.getByTestId("edit-value-field"), { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
expect(screen.getByTestId("test-connection-btn")).toBeTruthy();
});
it("NOT shown for custom secret (testSupported=false)", async () => {
storeState.editingKey = "MY_CUSTOM_KEY";
render(<SecretRow secret={CUSTOM_SECRET} workspaceId="ws-1" />);
fireEvent.change(screen.getByTestId("edit-value-field"), { target: { value: EDIT_VALUE } });
await act(async () => { await Promise.resolve(); });
expect(screen.queryByTestId("test-connection-btn")).toBeNull();
});
it("NOT shown when editValue is empty", () => {
storeState.editingKey = "GITHUB_TOKEN";
render(<SecretRow secret={GITHUB_SECRET} workspaceId="ws-1" />);
expect(screen.queryByTestId("test-connection-btn")).toBeNull();
});
});

View File

@ -1,308 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for SecretsTab API keys tab inside SettingsPanel.
*
* Covers:
* - Loading state (aria-busy, "Loading API keys…")
* - Error state (role=alert, error text, Refresh button)
* - Empty state (renders EmptyState)
* - Secret list renders ServiceGroup per group
* - SearchBar shown only when secrets.length >= 4
* - Search filters results no-results state + Clear search
* - "+ Add API Key" button toggles AddKeyForm
* - AddKeyForm visible when isAddFormOpen=true
* - ServiceGroup with multiple groups rendered
* - Single-key group count label ("1 key")
* - Multi-key group count label ("N keys")
*/
import React from "react";
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { SecretsTab } from "../SecretsTab";
// ── Secrets store mock ───────────────────────────────────────────────────────
type SecretsStoreState = {
secrets: Array<{ name: string; masked_value: string; group: string; status: string; updated_at: string }>;
isLoading: boolean;
error: string | null;
isAddFormOpen: boolean;
searchQuery: string;
fetchSecrets: ReturnType<typeof vi.fn>;
setAddFormOpen: ReturnType<typeof vi.fn>;
setSearchQuery: ReturnType<typeof vi.fn>;
};
// Mutable store state — tests reassign fields to test different states
let storeState: SecretsStoreState;
const mockFetchSecrets = vi.fn().mockResolvedValue(undefined);
const mockSetAddFormOpen = vi.fn();
const mockSetSearchQuery = vi.fn();
storeState = {
secrets: [],
isLoading: false,
error: null,
isAddFormOpen: false,
searchQuery: "",
fetchSecrets: mockFetchSecrets,
setAddFormOpen: mockSetAddFormOpen,
setSearchQuery: mockSetSearchQuery,
};
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: Object.assign(
vi.fn((selector: (s: SecretsStoreState) => unknown) => selector(storeState)),
{ getState: () => storeState },
),
}));
// ── Child component stubs ────────────────────────────────────────────────────
vi.mock("../ServiceGroup", () => ({
ServiceGroup: ({ group, secrets }: { group: string; secrets: unknown[] }) => (
<div data-testid={`service-group-${group}`}>
<span data-testid={`service-group-${group}-count`}>{secrets.length}</span>
</div>
),
}));
vi.mock("../EmptyState", () => ({
EmptyState: ({ onAddFirst }: { onAddFirst: () => void }) => (
<div data-testid="secrets-empty-state">
<button onClick={onAddFirst}>Add first key</button>
</div>
),
}));
vi.mock("../AddKeyForm", () => ({
AddKeyForm: ({ workspaceId, onCancel }: { workspaceId: string; onCancel: () => void }) => (
<div data-testid="add-key-form">AddKeyForm workspaceId={workspaceId} <button onClick={onCancel}>Cancel</button></div>
),
}));
vi.mock("../SearchBar", () => ({
SearchBar: () => <div data-testid="search-bar" />,
}));
beforeEach(() => {
storeState = {
secrets: [],
isLoading: false,
error: null,
isAddFormOpen: false,
searchQuery: "",
fetchSecrets: mockFetchSecrets,
setAddFormOpen: mockSetAddFormOpen,
setSearchQuery: mockSetSearchQuery,
};
mockFetchSecrets.mockReset().mockResolvedValue(undefined);
mockSetAddFormOpen.mockReset();
mockSetSearchQuery.mockReset();
});
afterEach(() => {
cleanup();
});
async function flush() {
await act(async () => { await Promise.resolve(); });
}
// ─── Loading ────────────────────────────────────────────────────────────────
describe("SecretsTab — loading", () => {
it("shows loading state", () => {
storeState.isLoading = true;
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByText("Loading API keys…")).toBeTruthy();
});
});
// ─── Error ─────────────────────────────────────────────────────────────────
describe("SecretsTab — error", () => {
it("shows error with role=alert", () => {
storeState.error = "network failure";
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByRole("alert")).toBeTruthy();
expect(screen.getByText("network failure")).toBeTruthy();
});
it("shows Refresh button in error state", () => {
storeState.error = "server error";
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByRole("button", { name: "Refresh" })).toBeTruthy();
});
it("Refresh button calls fetchSecrets with workspaceId", () => {
storeState.error = "server error";
render(<SecretsTab workspaceId="ws-123" />);
fireEvent.click(screen.getByRole("button", { name: "Refresh" }));
expect(mockFetchSecrets).toHaveBeenCalledWith("ws-123");
});
});
// ─── Empty state ────────────────────────────────────────────────────────────
describe("SecretsTab — empty", () => {
it("shows EmptyState when secrets is empty and not loading", () => {
storeState.secrets = [];
storeState.isLoading = false;
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByTestId("secrets-empty-state")).toBeTruthy();
});
it("EmptyState Add first button opens add form", () => {
storeState.secrets = [];
render(<SecretsTab workspaceId="ws-test" />);
fireEvent.click(screen.getByText("Add first key"));
expect(mockSetAddFormOpen).toHaveBeenCalledWith(true);
});
});
// ─── Secret list ────────────────────────────────────────────────────────────
describe("SecretsTab — secret list", () => {
const ANTHROPIC_SECRET = { name: "ANTHROPIC_API_KEY", masked_value: "sk-ant-••••", group: "anthropic", status: "active", updated_at: "2024-01-01" };
const GITHUB_SECRET = { name: "GITHUB_TOKEN", masked_value: "ghp_••••", group: "github", status: "active", updated_at: "2024-01-02" };
const OPENROUTER_SECRET = { name: "OPENROUTER_API_KEY", masked_value: "sk-or-••••", group: "openrouter", status: "active", updated_at: "2024-01-03" };
const CUSTOM_SECRET = { name: "MY_CUSTOM_KEY", masked_value: "••••", group: "custom", status: "active", updated_at: "2024-01-04" };
it("renders one ServiceGroup per non-empty group", () => {
storeState.secrets = [ANTHROPIC_SECRET, GITHUB_SECRET];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByTestId("service-group-anthropic")).toBeTruthy();
expect(screen.getByTestId("service-group-github")).toBeTruthy();
});
it("does NOT render empty groups", () => {
storeState.secrets = [ANTHROPIC_SECRET]; // only anthropic has secrets
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.queryByTestId("service-group-github")).toBeNull();
expect(screen.queryByTestId("service-group-openrouter")).toBeNull();
});
it("renders all 4 groups when all are populated", () => {
storeState.secrets = [ANTHROPIC_SECRET, GITHUB_SECRET, OPENROUTER_SECRET, CUSTOM_SECRET];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByTestId("service-group-anthropic")).toBeTruthy();
expect(screen.getByTestId("service-group-github")).toBeTruthy();
expect(screen.getByTestId("service-group-openrouter")).toBeTruthy();
expect(screen.getByTestId("service-group-custom")).toBeTruthy();
});
it("shows '+ Add API Key' button", () => {
storeState.secrets = [ANTHROPIC_SECRET];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByRole("button", { name: /add api key/i })).toBeTruthy();
});
it("'+ Add API Key' opens AddKeyForm", () => {
storeState.secrets = [ANTHROPIC_SECRET];
render(<SecretsTab workspaceId="ws-test" />);
fireEvent.click(screen.getByRole("button", { name: /add api key/i }));
expect(mockSetAddFormOpen).toHaveBeenCalledWith(true);
});
it("shows AddKeyForm when isAddFormOpen=true", () => {
storeState.secrets = [ANTHROPIC_SECRET];
storeState.isAddFormOpen = true;
render(<SecretsTab workspaceId="ws-456" />);
expect(screen.getByTestId("add-key-form")).toBeTruthy();
});
it("AddKeyForm Cancel closes the form", () => {
storeState.secrets = [ANTHROPIC_SECRET];
storeState.isAddFormOpen = true;
render(<SecretsTab workspaceId="ws-test" />);
fireEvent.click(screen.getByText("Cancel"));
expect(mockSetAddFormOpen).toHaveBeenCalledWith(false);
});
it("shows SearchBar when secrets.length >= 4", () => {
storeState.secrets = [
ANTHROPIC_SECRET, GITHUB_SECRET, OPENROUTER_SECRET,
{ ...CUSTOM_SECRET, name: "EXTRA_KEY_1" },
];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByTestId("search-bar")).toBeTruthy();
});
it("hides SearchBar when secrets.length < 4", () => {
storeState.secrets = [ANTHROPIC_SECRET, GITHUB_SECRET];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.queryByTestId("search-bar")).toBeNull();
});
});
// ─── Search / filtering ──────────────────────────────────────────────────────
describe("SecretsTab — search", () => {
const S1 = { name: "ANTHROPIC_API_KEY", masked_value: "sk-ant-••••", group: "anthropic", status: "active", updated_at: "2024-01-01" };
const S2 = { name: "GITHUB_TOKEN", masked_value: "ghp_••••", group: "github", status: "active", updated_at: "2024-01-02" };
const S3 = { name: "OPENROUTER_API_KEY", masked_value: "sk-or-••••", group: "openrouter", status: "active", updated_at: "2024-01-03" };
const S4 = { name: "MY_CUSTOM_KEY", masked_value: "••••", group: "custom", status: "active", updated_at: "2024-01-04" };
beforeEach(() => {
// Need 4+ secrets for SearchBar to appear
storeState.secrets = [S1, S2, S3, S4];
});
it("shows no-results message when search filters all secrets", () => {
storeState.searchQuery = "nonexistent-key";
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByText(/no keys match/i)).toBeTruthy();
expect(screen.getByText(/nonexistent-key/i)).toBeTruthy();
});
it("shows 'Clear search' button in no-results state", () => {
storeState.searchQuery = "nonexistent";
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByRole("button", { name: /clear search/i })).toBeTruthy();
});
it("'Clear search' clears searchQuery via store.getState()", () => {
storeState.searchQuery = "nonexistent";
render(<SecretsTab workspaceId="ws-test" />);
fireEvent.click(screen.getByRole("button", { name: /clear search/i }));
expect(mockSetSearchQuery).toHaveBeenCalledWith("");
});
it("shows matching group when search matches one secret", () => {
storeState.searchQuery = "anthropic";
storeState.secrets = [S1, S2, S3, S4];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByTestId("service-group-anthropic")).toBeTruthy();
// Other groups should be filtered out
expect(screen.queryByTestId("service-group-github")).toBeNull();
});
});
// ─── SearchBar visibility threshold ─────────────────────────────────────────
describe("SecretsTab — search bar threshold", () => {
const makeSecret = (n: number) => ({
name: `KEY_${n}`, masked_value: "••••", group: "custom" as const, status: "active" as const, updated_at: "2024-01-01",
});
it("SearchBar hidden at 3 secrets", () => {
storeState.secrets = [makeSecret(1), makeSecret(2), makeSecret(3)];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.queryByTestId("search-bar")).toBeNull();
});
it("SearchBar shown at 4 secrets (threshold)", () => {
storeState.secrets = [makeSecret(1), makeSecret(2), makeSecret(3), makeSecret(4)];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.getByTestId("search-bar")).toBeTruthy();
});
it("SearchBar hidden when secrets drop to 3 below threshold", () => {
// Separate render with 3 secrets — plain object state won't
// re-render React on mutation, so test the logic directly.
storeState.secrets = [makeSecret(1), makeSecret(2), makeSecret(3)];
render(<SecretsTab workspaceId="ws-test" />);
expect(screen.queryByTestId("search-bar")).toBeNull();
});
});

View File

@ -1,196 +0,0 @@
// @vitest-environment jsdom
/**
* ServiceGroup collapsible group of secret rows under a service header.
*
* Per spec §3.1:
* GitHub 1 key
* GITHUB_TOKEN
* ghp_xK9f [👁] [] [] [] [🗑]
*
* NOTE: No @testing-library/jest-dom import use DOM APIs.
*
* Covers:
* - Renders group with role=group and aria-label
* - Service icon is aria-hidden
* - Label text matches service
* - Count: "1 key" for single, "N keys" for multiple
* - Renders SecretRow for each secret
* - Renders nothing when secrets array is empty (not called)
* - Different services show correct label and icon
*/
import { afterEach, describe, expect, it, vi } from "vitest";
import { cleanup, render } from "@testing-library/react";
import React from "react";
import { ServiceGroup } from "../ServiceGroup";
import type { Secret, SecretGroup, ServiceConfig } from "@/types/secrets";
// ─── Mock SecretRow ────────────────────────────────────────────────────────────
vi.mock("../SecretRow", () => ({
SecretRow: ({ secret, workspaceId }: { secret: Secret; workspaceId: string }) => (
<div data-testid="secret-row" data-name={secret.name}>
SecretRow:{secret.name}
</div>
),
}));
// ─── Helpers ───────────────────────────────────────────────────────────────────
function makeService(icon: string, label: string): ServiceConfig {
return { icon, label, docsUrl: "https://example.com/docs" };
}
function makeSecret(name: string): Secret {
return {
name,
value: "sk-test-••••••••••••",
group: "custom" as SecretGroup,
masked: true,
};
}
// ─── Tests ────────────────────────────────────────────────────────────────────
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
describe("ServiceGroup — render", () => {
it("renders group with role=group", () => {
const { container } = render(
<ServiceGroup
group="github"
service={makeService("github", "GitHub")}
secrets={[makeSecret("GITHUB_TOKEN")]}
workspaceId="ws1"
/>,
);
expect(container.querySelector('[role="group"]')).toBeTruthy();
});
it("group aria-label contains service label", () => {
const { container } = render(
<ServiceGroup
group="anthropic"
service={makeService("anthropic", "Anthropic")}
secrets={[makeSecret("ANTHROPIC_API_KEY")]}
workspaceId="ws1"
/>,
);
const group = container.querySelector('[role="group"]');
expect(group?.getAttribute("aria-label")).toContain("Anthropic");
});
it("service icon is aria-hidden", () => {
const { container } = render(
<ServiceGroup
group="openrouter"
service={makeService("openrouter", "OpenRouter")}
secrets={[makeSecret("OPENROUTER_API_KEY")]}
workspaceId="ws1"
/>,
);
const icon = container.querySelector('[aria-hidden="true"]');
expect(icon).toBeTruthy();
expect(icon?.textContent).toContain("🔀");
});
it("label text matches service label", () => {
const { container } = render(
<ServiceGroup
group="github"
service={makeService("github", "GitHub")}
secrets={[makeSecret("GITHUB_TOKEN")]}
workspaceId="ws1"
/>,
);
expect(container.textContent ?? "").toContain("GitHub");
});
it('count label is "1 key" for single secret', () => {
const { container } = render(
<ServiceGroup
group="github"
service={makeService("github", "GitHub")}
secrets={[makeSecret("GITHUB_TOKEN")]}
workspaceId="ws1"
/>,
);
expect(container.textContent ?? "").toContain("1 key");
});
it("count label is 'N keys' for multiple secrets", () => {
const { container } = render(
<ServiceGroup
group="anthropic"
service={makeService("anthropic", "Anthropic")}
secrets={[
makeSecret("ANTHROPIC_API_KEY"),
makeSecret("ANTHROPIC_MODEL_PREF"),
]}
workspaceId="ws1"
/>,
);
expect(container.textContent ?? "").toContain("2 keys");
});
it("renders SecretRow for each secret", () => {
const { container } = render(
<ServiceGroup
group="github"
service={makeService("github", "GitHub")}
secrets={[
makeSecret("GITHUB_TOKEN"),
makeSecret("GITHUB_ORG"),
]}
workspaceId="ws1"
/>,
);
const rows = container.querySelectorAll('[data-testid="secret-row"]');
expect(rows).toHaveLength(2);
expect(rows[0].getAttribute("data-name")).toBe("GITHUB_TOKEN");
expect(rows[1].getAttribute("data-name")).toBe("GITHUB_ORG");
});
it("renders header and rows divs", () => {
const { container } = render(
<ServiceGroup
group="github"
service={makeService("github", "GitHub")}
secrets={[makeSecret("GITHUB_TOKEN")]}
workspaceId="ws1"
/>,
);
expect(container.querySelector(".service-group__header")).toBeTruthy();
expect(container.querySelector(".service-group__rows")).toBeTruthy();
});
it("renders correct icon emoji for github", () => {
const { container } = render(
<ServiceGroup
group="github"
service={makeService("github", "GitHub")}
secrets={[makeSecret("GITHUB_TOKEN")]}
workspaceId="ws1"
/>,
);
const icon = container.querySelector(".service-group__icon");
expect(icon?.textContent).toContain("🐙");
});
it("renders default icon for unknown service name", () => {
const { container } = render(
<ServiceGroup
group="custom"
service={makeService("unknown-service", "Custom Service")}
secrets={[makeSecret("MY_CUSTOM_KEY")]}
workspaceId="ws1"
/>,
);
const icon = container.querySelector(".service-group__icon");
expect(icon?.textContent).toContain("🔑");
});
});

View File

@ -1,175 +0,0 @@
// @vitest-environment jsdom
/**
* SettingsButton gear icon in top bar, toggles SettingsPanel.
*
* Per spec §1.1:
* - Gear icon, aria-label="Settings"
* - aria-expanded reflects panel open state
* - Tooltip shows keyboard shortcut
* - Active state class when panel open
*
* NOTE: No @testing-library/jest-dom import use DOM APIs.
*
* Covers:
* - Button has aria-label="Settings"
* - Gear SVG has aria-hidden="true"
* - aria-expanded is false when panel closed
* - aria-expanded is true when panel open
* - Toggle calls openPanel / closePanel
* - Active class applied when panel open
* - Tooltip content shows correct shortcut
*/
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { act, cleanup, fireEvent, render, waitFor } from "@testing-library/react";
import React from "react";
// ResizeObserver polyfill required by Radix Tooltip's use-size hook
globalThis.ResizeObserver = class ResizeObserver {
observe() {}
unobserve() {}
disconnect() {}
};
import { SettingsButton } from "../SettingsButton";
// ─── Store mock ────────────────────────────────────────────────────────────────
const _mockIsPanelOpen = vi.fn<() => boolean>(() => false);
const _mockOpenPanel = vi.fn();
const _mockClosePanel = vi.fn();
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: (selector?: (s: {
isPanelOpen: boolean;
openPanel: () => void;
closePanel: () => void;
}) => unknown) => {
const state = {
isPanelOpen: _mockIsPanelOpen(),
openPanel: _mockOpenPanel,
closePanel: _mockClosePanel,
};
return selector ? selector(state) : state;
},
}));
// Mock navigator for isMac detection
Object.defineProperty(navigator, "userAgent", {
configurable: true,
value: "Macintosh",
});
afterEach(() => {
cleanup();
vi.restoreAllMocks();
vi.resetModules();
});
beforeEach(() => {
_mockIsPanelOpen.mockReturnValue(false);
_mockOpenPanel.mockClear();
_mockClosePanel.mockClear();
});
// ─── Render ────────────────────────────────────────────────────────────────────
describe("SettingsButton — render", () => {
it("button has aria-label='Settings'", () => {
render(<SettingsButton />);
const btn = document.querySelector("button");
expect(btn?.getAttribute("aria-label")).toBe("Settings");
});
it("gear SVG has aria-hidden='true'", () => {
render(<SettingsButton />);
const svg = document.querySelector("svg");
expect(svg?.getAttribute("aria-hidden")).toBe("true");
});
it("aria-expanded is false when panel is closed", () => {
_mockIsPanelOpen.mockReturnValue(false);
render(<SettingsButton />);
const btn = document.querySelector("button");
expect(btn?.getAttribute("aria-expanded")).toBe("false");
});
it("aria-expanded is true when panel is open", () => {
_mockIsPanelOpen.mockReturnValue(true);
render(<SettingsButton />);
const btn = document.querySelector("button");
expect(btn?.getAttribute("aria-expanded")).toBe("true");
});
it("button has settings-button class", () => {
render(<SettingsButton />);
const btn = document.querySelector("button");
expect(btn?.className).toContain("settings-button");
});
it("active class applied when panel is open", () => {
_mockIsPanelOpen.mockReturnValue(true);
render(<SettingsButton />);
const btn = document.querySelector("button");
expect(btn?.className).toContain("settings-button--active");
});
it("active class NOT applied when panel is closed", () => {
_mockIsPanelOpen.mockReturnValue(false);
render(<SettingsButton />);
const btn = document.querySelector("button");
expect(btn?.className).not.toContain("settings-button--active");
});
});
// ─── Interaction ───────────────────────────────────────────────────────────────
describe("SettingsButton — interaction", () => {
it("clicking when panel closed calls openPanel", () => {
_mockIsPanelOpen.mockReturnValue(false);
render(<SettingsButton />);
const btn = document.querySelector("button") as HTMLButtonElement;
btn.click();
expect(_mockOpenPanel).toHaveBeenCalledTimes(1);
expect(_mockClosePanel).not.toHaveBeenCalled();
});
it("clicking when panel open calls closePanel", () => {
_mockIsPanelOpen.mockReturnValue(true);
render(<SettingsButton />);
const btn = document.querySelector("button") as HTMLButtonElement;
btn.click();
expect(_mockClosePanel).toHaveBeenCalledTimes(1);
expect(_mockOpenPanel).not.toHaveBeenCalled();
});
it("tooltip shows Mac shortcut on Mac", async () => {
Object.defineProperty(navigator, "userAgent", {
configurable: true,
value: "Macintosh",
});
render(<SettingsButton />);
const btn = document.querySelector("button") as HTMLButtonElement;
act(() => { fireEvent.focus(btn); });
// Wait for Radix tooltip delay (300ms) + render
await waitFor(() => {
const tooltipText = document.body.textContent ?? "";
expect(tooltipText).toContain("Settings");
expect(tooltipText).toContain("⌘");
});
});
it("tooltip shows Ctrl+ shortcut on non-Mac", async () => {
Object.defineProperty(navigator, "userAgent", {
configurable: true,
value: "Windows",
});
render(<SettingsButton />);
const btn = document.querySelector("button") as HTMLButtonElement;
act(() => { fireEvent.focus(btn); });
await waitFor(() => {
const tooltipText = document.body.textContent ?? "";
expect(tooltipText).toContain("Settings");
expect(tooltipText).toContain("Ctrl");
});
});
});

View File

@ -1,233 +0,0 @@
// @vitest-environment jsdom
/**
* Tests for SettingsPanel right-anchored slide-over drawer for workspace settings.
*
* Covers:
* - Closed by default (Dialog closed when isPanelOpen=false)
* - Opens when isPanelOpen=true
* - Three tabs: Secrets, Workspace Tokens, Org API Keys
* - Cmd+, keyboard shortcut toggles panel
* - Clicking backdrop/close with dirty form (editingKey set) shows UnsavedChangesGuard
* - Guard "Keep editing" closes guard (does NOT close panel)
* - Guard "Discard" closes guard AND closes panel
* - fetchSecrets called when panel opens
* - Close button closes panel
* - aria-modal="false" canvas stays interactive
*/
import React from "react";
import { render, screen, fireEvent, cleanup, act, waitFor } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { SettingsPanel } from "../SettingsPanel";
// ── Store mock ──────────────────────────────────────────────────────────────
type PanelStoreState = {
isPanelOpen: boolean;
isAddFormOpen: boolean;
editingKey: string | null;
closePanel: () => void;
openPanel: () => void;
fetchSecrets: (workspaceId: string) => Promise<void>;
};
let storeState: PanelStoreState;
const mockClosePanel = vi.fn();
const mockOpenPanel = vi.fn();
const mockFetchSecrets = vi.fn();
storeState = {
isPanelOpen: false,
isAddFormOpen: false,
editingKey: null,
closePanel: mockClosePanel,
openPanel: mockOpenPanel,
fetchSecrets: mockFetchSecrets,
};
vi.mock("@/stores/secrets-store", () => ({
useSecretsStore: Object.assign(
vi.fn((selector?: (s: PanelStoreState) => unknown) =>
selector ? selector(storeState) : storeState
),
{ getState: () => storeState },
),
}));
vi.mock("@/hooks/use-keyboard-shortcut", () => ({
useKeyboardShortcut: vi.fn(),
}));
// ── Child component stubs ────────────────────────────────────────────────────
vi.mock("../SecretsTab", () => ({
SecretsTab: ({ workspaceId }: { workspaceId: string }) => (
<div data-testid="secrets-tab">SecretsTab workspaceId={workspaceId}</div>
),
}));
vi.mock("../TokensTab", () => ({
TokensTab: ({ workspaceId }: { workspaceId: string }) => (
<div data-testid="tokens-tab">TokensTab workspaceId={workspaceId}</div>
),
}));
vi.mock("../OrgTokensTab", () => ({
OrgTokensTab: () => <div data-testid="org-tokens-tab">OrgTokensTab</div>,
}));
vi.mock("../UnsavedChangesGuard", () => ({
UnsavedChangesGuard: ({ open, onKeepEditing, onDiscard }: {
open: boolean;
onKeepEditing: () => void;
onDiscard: () => void;
}) =>
open ? (
<div data-testid="unsaved-guard" role="alertdialog">
<button onClick={onKeepEditing} data-testid="guard-keep">Keep editing</button>
<button onClick={onDiscard} data-testid="guard-discard">Discard</button>
</div>
) : null,
}));
beforeEach(() => {
storeState = {
isPanelOpen: false,
isAddFormOpen: false,
editingKey: null,
closePanel: mockClosePanel,
openPanel: mockOpenPanel,
fetchSecrets: mockFetchSecrets,
};
mockClosePanel.mockReset();
mockOpenPanel.mockReset();
mockFetchSecrets.mockReset().mockResolvedValue(undefined);
});
afterEach(() => {
cleanup();
});
// ─── Closed by default ─────────────────────────────────────────────────────
describe("SettingsPanel — closed by default", () => {
it("no dialog content when isPanelOpen=false", () => {
render(<SettingsPanel workspaceId="ws-1" />);
// Radix Dialog doesn't render content when open=false
expect(screen.queryByTestId("secrets-tab")).toBeNull();
});
});
// ─── Open / close ──────────────────────────────────────────────────────────
describe("SettingsPanel — open / close", () => {
it("renders SecretsTab when panel is open", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-xyz" />);
expect(screen.getByTestId("secrets-tab")).toBeTruthy();
expect(screen.getByText(/workspaceId=ws-xyz/i)).toBeTruthy();
});
it("renders TokensTab tab in tabs list", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
expect(screen.getByRole("tab", { name: /workspace tokens/i })).toBeTruthy();
});
it("renders Org API Keys tab in tabs list", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
expect(screen.getByRole("tab", { name: /org api keys/i })).toBeTruthy();
});
it("Secrets tab is default active", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
expect(screen.getByTestId("secrets-tab")).toBeTruthy();
expect(screen.getByRole("tab", { name: /secrets/i }).getAttribute("data-state")).toBe("active");
});
it("Tokens tab trigger exists with correct aria attributes", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
const tab = screen.getByRole("tab", { name: /workspace tokens/i });
// Radix Tabs.Trigger has role="tab" and aria-selected
expect(tab).toBeTruthy();
// Secrets tab is active by default
const secretsTab = screen.getByRole("tab", { name: /secrets/i });
expect(secretsTab.getAttribute("data-state")).toBe("active");
// Tokens tab should not be active initially
expect(tab.getAttribute("data-state")).not.toBe("active");
});
it("Close button calls closePanel", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /close settings/i }));
expect(mockClosePanel).toHaveBeenCalled();
});
it("calls fetchSecrets(workspaceId) when panel opens", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-fetch-test" />);
expect(mockFetchSecrets).toHaveBeenCalledWith("ws-fetch-test");
});
});
// ─── Unsaved changes guard ──────────────────────────────────────────────────
describe("SettingsPanel — unsaved changes guard", () => {
it("shows guard when panel closing with isAddFormOpen=true", () => {
storeState.isPanelOpen = true;
storeState.isAddFormOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /close settings/i }));
expect(screen.getByTestId("unsaved-guard")).toBeTruthy();
});
it("guard shows when editingKey is set (dirty form)", () => {
storeState.isPanelOpen = true;
storeState.editingKey = "GITHUB_TOKEN";
render(<SettingsPanel workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /close settings/i }));
expect(screen.getByTestId("unsaved-guard")).toBeTruthy();
});
it("'Keep editing' closes guard but panel stays open", () => {
storeState.isPanelOpen = true;
storeState.editingKey = "GITHUB_TOKEN";
render(<SettingsPanel workspaceId="ws-1" />);
// Trigger close attempt
fireEvent.click(screen.getByRole("button", { name: /close settings/i }));
expect(screen.getByTestId("unsaved-guard")).toBeTruthy();
// Keep editing closes the guard
fireEvent.click(screen.getByTestId("guard-keep"));
expect(screen.queryByTestId("unsaved-guard")).toBeNull();
// Panel content still visible (panel not closed)
expect(screen.getByTestId("secrets-tab")).toBeTruthy();
});
it("'Discard' button on guard calls closePanel", () => {
storeState.isPanelOpen = true;
storeState.isAddFormOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
fireEvent.click(screen.getByRole("button", { name: /close settings/i }));
fireEvent.click(screen.getByTestId("guard-discard"));
expect(mockClosePanel).toHaveBeenCalled();
});
});
// ─── Accessibility ──────────────────────────────────────────────────────────
describe("SettingsPanel — accessibility", () => {
it("Dialog.Content has aria-label='Settings: API Keys'", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
expect(document.querySelector('[aria-label="Settings: API Keys"]')).toBeTruthy();
});
it("TabList has aria-label='Settings sections'", () => {
storeState.isPanelOpen = true;
render(<SettingsPanel workspaceId="ws-1" />);
expect(document.querySelector('[aria-label="Settings sections"]')).toBeTruthy();
});
});

Some files were not shown because too many files have changed in this diff Show More