forked from molecule-ai/molecule-core
chore: apply code-review round-6 suggestions
All 5 suggestions from the latest review pass.
## tests/e2e/_extract_token.py (new)
Extracted the 14-line python-in-bash heredoc from _lib.sh into a real
Python file. Easier to edit, fewer escaping traps, same behavior.
Shell helper now just shells out to it.
## tests/e2e/_lib.sh
- Replaced inline python with: python3 "$(dirname "${BASH_SOURCE[0]}")/_extract_token.py"
- Removed redundant sys.exit(0) as part of the extraction
## Shellcheck-clean scripts (new CI job enforces)
- Removed dead captures: BEFORE_COUNT (test_activity_e2e.sh), ORIG_SKILLS,
REIMPORT_SKILLS (test_api.sh), QA_TOKEN (test_comprehensive_e2e.sh)
- Renamed unused loop vars `i`, `j` -> `_` in 4 sites
- Added `# shellcheck disable=SC2046` on the two intentional word-splits
in test_claude_code_e2e.sh (docker stop/rm of multiple container IDs)
- Removed a useless re-register of QA mid-script (was done in Section 2)
## CI (.github/workflows/ci.yml)
- Replaced `sudo apt-get install postgresql-client` + psql with a direct
`docker exec` into the existing postgres:16 service container. Saves
~10-20s per CI run.
- Added new `shellcheck` job that lints tests/e2e/*.sh on every PR.
Local: shellcheck --severity=warning returns 0 across all 5 scripts.
## Verification
- go test -race ./internal/handlers/... : pass
- mcp-server: 96/96 jest
- canvas: 357/357 vitest + clean build
- tests/e2e/test_api.sh: 62/62
- tests/e2e/test_comprehensive_e2e.sh: 67/67
- shellcheck tests/e2e/*.sh : clean
- CI YAML: valid
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
1f1b2d731b
commit
f8ba8a2847
20
.github/workflows/ci.yml
vendored
20
.github/workflows/ci.yml
vendored
@ -137,9 +137,15 @@ jobs:
|
||||
# Migrations auto-run at platform boot. Fail fast if they silently
|
||||
# didn't — catches future migration-author mistakes (e.g. a new
|
||||
# privileged op Postgres "dev" can't execute) before the E2E run.
|
||||
# Uses docker exec into the service container's own psql — avoids
|
||||
# a 10-20s apt-install step in the runner.
|
||||
run: |
|
||||
sudo apt-get install -y -qq postgresql-client > /dev/null
|
||||
tables=$(PGPASSWORD=dev psql -h localhost -U dev -d molecule -tAc "SELECT count(*) FROM information_schema.tables WHERE table_schema='public' AND table_name='workspaces'")
|
||||
pg_container=$(docker ps --filter "ancestor=postgres:16" --format "{{.ID}}" | head -1)
|
||||
if [ -z "$pg_container" ]; then
|
||||
echo "::error::Could not find postgres service container"
|
||||
exit 1
|
||||
fi
|
||||
tables=$(docker exec "$pg_container" psql -U dev -d molecule -tAc "SELECT count(*) FROM information_schema.tables WHERE table_schema='public' AND table_name='workspaces'")
|
||||
if [ "$tables" != "1" ]; then
|
||||
echo "::error::Migrations did not apply — 'workspaces' table missing"
|
||||
cat platform/platform.log || true
|
||||
@ -158,6 +164,16 @@ jobs:
|
||||
kill "$(cat platform/platform.pid)" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
shellcheck:
|
||||
name: Shellcheck (E2E scripts)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run shellcheck on tests/e2e/*.sh
|
||||
run: |
|
||||
sudo apt-get update -qq && sudo apt-get install -y -qq shellcheck
|
||||
shellcheck --severity=warning tests/e2e/*.sh
|
||||
|
||||
python-lint:
|
||||
name: Python Lint & Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
24
tests/e2e/_extract_token.py
Executable file
24
tests/e2e/_extract_token.py
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Stdin: JSON response from POST /registry/register.
|
||||
Stdout: the auth_token value, or empty string.
|
||||
Stderr: diagnostic when the response is unparseable or missing a token.
|
||||
|
||||
Exit code is always 0 — the empty string on stdout is how callers
|
||||
distinguish "no token issued" (legitimate on re-registration) from
|
||||
success. The warning on stderr surfaces the no-token case so it
|
||||
stops masking downstream "missing workspace auth token" 401s.
|
||||
"""
|
||||
import json
|
||||
import sys
|
||||
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f"e2e_extract_token: invalid JSON response ({e})\n")
|
||||
print("")
|
||||
raise SystemExit(0)
|
||||
|
||||
token = data.get("auth_token", "")
|
||||
if not token:
|
||||
sys.stderr.write("e2e_extract_token: response contained no auth_token field\n")
|
||||
print(token)
|
||||
@ -12,24 +12,9 @@
|
||||
export BASE
|
||||
|
||||
# Emit the auth_token from a /registry/register response on stdout.
|
||||
# Logs a warning to stderr when the JSON parse fails or the token is
|
||||
# missing — silent empty strings masked real failures as the
|
||||
# downstream "missing workspace auth token" 401. Return value is
|
||||
# still empty-string-on-failure so grandfather-path callers work.
|
||||
# See _extract_token.py for the exact semantics.
|
||||
e2e_extract_token() {
|
||||
python3 -c "
|
||||
import sys, json
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
except Exception as e:
|
||||
sys.stderr.write(f'e2e_extract_token: invalid JSON response ({e})\n')
|
||||
print('')
|
||||
sys.exit(0)
|
||||
tok = data.get('auth_token', '')
|
||||
if not tok:
|
||||
sys.stderr.write('e2e_extract_token: response contained no auth_token field\n')
|
||||
print(tok)
|
||||
"
|
||||
python3 "$(dirname "${BASH_SOURCE[0]}")/_extract_token.py"
|
||||
}
|
||||
|
||||
# Delete every workspace currently on the platform. Use at the top of a
|
||||
|
||||
@ -76,9 +76,6 @@ AGENT_TOKEN=$(echo "$RREG" | e2e_extract_token)
|
||||
# ---------- A2A Communication Logging ----------
|
||||
echo "--- A2A Communication Logging ---"
|
||||
|
||||
# Clear any existing activity by noting the count
|
||||
BEFORE_COUNT=$(curl -s "$BASE/workspaces/$AGENT_ID/activity" | python3 -c "import sys,json; print(len(json.load(sys.stdin)))")
|
||||
|
||||
# Test 1: Send A2A message and verify activity is logged
|
||||
R=$(curl -s --max-time "$TIMEOUT" -X POST "$BASE/workspaces/$AGENT_ID/a2a" \
|
||||
-H "Content-Type: application/json" \
|
||||
@ -96,7 +93,7 @@ check "A2A message/send returns response" 'result' "$R"
|
||||
# Test 2: Activity log should have a new a2a_receive entry
|
||||
# Retry up to 3s for the async LogActivity goroutine to complete
|
||||
AFTER_COUNT=0
|
||||
for i in 1 2 3 4 5 6; do
|
||||
for _ in 1 2 3 4 5 6; do
|
||||
R=$(curl -s "$BASE/workspaces/$AGENT_ID/activity?type=a2a_receive")
|
||||
AFTER_COUNT=$(echo "$R" | python3 -c "import sys,json; print(len(json.load(sys.stdin)))")
|
||||
[ "$AFTER_COUNT" -gt "0" ] && break
|
||||
|
||||
@ -259,7 +259,6 @@ check "GET /bundles/export/:id" '"name":"Summarizer Agent"' "$BUNDLE"
|
||||
# Capture original config for comparison
|
||||
ORIG_NAME=$(echo "$BUNDLE" | python3 -c "import sys,json; print(json.load(sys.stdin)['name'])")
|
||||
ORIG_TIER=$(echo "$BUNDLE" | python3 -c "import sys,json; print(json.load(sys.stdin)['tier'])")
|
||||
ORIG_SKILLS=$(echo "$BUNDLE" | python3 -c "import sys,json; b=json.load(sys.stdin); card=b.get('agent_card') or {}; skills=card.get('skills',[]); print(','.join(s.get('name','') for s in skills))")
|
||||
|
||||
# Delete the workspace
|
||||
R=$(curl -s -X DELETE "$BASE/workspaces/$SUM_ID")
|
||||
@ -311,9 +310,8 @@ R=$(curl -s -X POST "$BASE/registry/register" -H "Content-Type: application/json
|
||||
-d "{\"id\":\"$NEW_ID\",\"url\":\"http://localhost:8002\",\"agent_card\":{\"name\":\"Summarizer\",\"skills\":[{\"id\":\"summarize\",\"name\":\"Summarize\"}]}}")
|
||||
check "Register re-imported workspace" '"status":"registered"' "$R"
|
||||
|
||||
# Re-export and compare skills survived the round-trip
|
||||
# Re-export and verify agent_card survives the round-trip
|
||||
REBUNDLE=$(curl -s "$BASE/bundles/export/$NEW_ID")
|
||||
REIMPORT_SKILLS=$(echo "$REBUNDLE" | python3 -c "import sys,json; b=json.load(sys.stdin); card=b.get('agent_card') or {}; skills=card.get('skills',[]); print(','.join(s.get('name','') for s in skills))")
|
||||
check "Re-exported bundle has agent_card" '"agent_card"' "$REBUNDLE"
|
||||
|
||||
# Clean up
|
||||
|
||||
@ -41,7 +41,9 @@ fi
|
||||
for id in $(curl -s $PLATFORM/workspaces | python3 -c "import sys,json; [print(w['id']) for w in json.load(sys.stdin)]" 2>/dev/null); do
|
||||
curl -s -X DELETE "$PLATFORM/workspaces/$id" > /dev/null
|
||||
done
|
||||
# shellcheck disable=SC2046 # Intentional word-split over container IDs
|
||||
docker stop $(docker ps -q --filter "name=ws-") 2>/dev/null || true
|
||||
# shellcheck disable=SC2046
|
||||
docker rm $(docker ps -aq --filter "name=ws-") 2>/dev/null || true
|
||||
|
||||
# --- Create Org Chart ---
|
||||
|
||||
@ -15,7 +15,6 @@ SKIP=0
|
||||
# heartbeat, update-card, discover, and peers calls.
|
||||
PM_TOKEN=""
|
||||
DEV_TOKEN=""
|
||||
QA_TOKEN=""
|
||||
|
||||
e2e_cleanup_all_workspaces
|
||||
|
||||
@ -99,9 +98,8 @@ R=$(curl -s -X POST "$BASE/workspaces" -H "Content-Type: application/json" \
|
||||
-d "{\"name\":\"Test QA\",\"role\":\"QA\",\"tier\":1,\"parent_id\":\"$PM_ID\"}")
|
||||
check "Create QA (sibling of Dev)" '"status":"provisioning"' "$R"
|
||||
QA_ID=$(echo "$R" | jq_extract "['id']")
|
||||
RR=$(curl -s -X POST "$BASE/registry/register" -H "Content-Type: application/json" \
|
||||
-d "{\"id\":\"$QA_ID\",\"url\":\"http://localhost:9002\",\"agent_card\":{\"name\":\"QA\",\"skills\":[]}}")
|
||||
QA_TOKEN=$(echo "$RR" | e2e_extract_token)
|
||||
curl -s -X POST "$BASE/registry/register" -H "Content-Type: application/json" \
|
||||
-d "{\"id\":\"$QA_ID\",\"url\":\"http://localhost:9002\",\"agent_card\":{\"name\":\"QA\",\"skills\":[]}}" > /dev/null
|
||||
|
||||
# Create unrelated workspace
|
||||
R=$(curl -s -X POST "$BASE/workspaces" -H "Content-Type: application/json" \
|
||||
@ -151,7 +149,7 @@ RT_CR_ID=$(echo "$R" | jq_extract "['id']")
|
||||
# Wait for containers to start (poll up to 30s for first one to appear)
|
||||
if command -v docker &>/dev/null; then
|
||||
short_cc="${RT_CC_ID:0:12}"
|
||||
for i in 1 2 3 4 5 6; do
|
||||
for _ in 1 2 3 4 5 6; do
|
||||
sleep 5
|
||||
if docker inspect "ws-${short_cc}" >/dev/null 2>&1; then break; fi
|
||||
done
|
||||
@ -161,7 +159,7 @@ if command -v docker &>/dev/null; then
|
||||
local short_id="${ws_id:0:12}"
|
||||
# Poll up to 30s for image to appear
|
||||
local actual_image="NOT_FOUND"
|
||||
for j in 1 2 3 4 5 6; do
|
||||
for _ in 1 2 3 4 5 6; do
|
||||
actual_image=$(docker inspect "ws-${short_id}" --format '{{.Config.Image}}' 2>/dev/null || echo "NOT_FOUND")
|
||||
if echo "$actual_image" | grep -qF "$expected_tag"; then break; fi
|
||||
sleep 5
|
||||
@ -214,7 +212,7 @@ if echo "$R" | grep -qF "saved"; then
|
||||
# Poll up to 30s for the new container image to appear (restart can take a while)
|
||||
if command -v docker &>/dev/null; then
|
||||
short_id="${RT_LG_ID:0:12}"
|
||||
for i in 1 2 3 4 5 6; do
|
||||
for _ in 1 2 3 4 5 6; do
|
||||
sleep 5
|
||||
actual=$(docker inspect "ws-${short_id}" --format '{{.Config.Image}}' 2>/dev/null || echo "")
|
||||
if echo "$actual" | grep -qF "deepagents"; then break; fi
|
||||
@ -293,9 +291,6 @@ R=$(curl -s -H "X-Workspace-ID: $PM_ID" -H "Authorization: Bearer $PM_TOKEN" "$B
|
||||
check "PM discovers Dev (parent→child)" "$DEV_ID" "$R"
|
||||
|
||||
# Dev discovers QA (siblings: allowed) — QA was registered in Section 2
|
||||
RQA=$(curl -s -X POST "$BASE/registry/register" -H "Content-Type: application/json" \
|
||||
-d "{\"id\":\"$QA_ID\",\"url\":\"http://localhost:9002\",\"agent_card\":{\"name\":\"QA\",\"skills\":[]}}")
|
||||
QA_TOKEN=$(echo "$RQA" | e2e_extract_token)
|
||||
R=$(curl -s -H "X-Workspace-ID: $DEV_ID" -H "Authorization: Bearer $DEV_TOKEN" "$BASE/registry/discover/$QA_ID")
|
||||
check "Dev discovers QA (siblings)" "$QA_ID" "$R"
|
||||
|
||||
@ -573,7 +568,7 @@ for w in ws:
|
||||
" 2>/dev/null
|
||||
|
||||
# Poll for clean state up to 30s — DB cascade + container stop is async on busy systems
|
||||
for i in 1 2 3 4 5 6; do
|
||||
for _ in 1 2 3 4 5 6; do
|
||||
sleep 5
|
||||
R=$(curl -s "$BASE/workspaces")
|
||||
if [ "$R" = "[]" ]; then break; fi
|
||||
|
||||
Loading…
Reference in New Issue
Block a user