fix(sweep-cf-tunnels): buffer pages to disk to avoid argv ARG_MAX

The page-merge loop passed the entire accumulating tunnel JSON to
python3 -c via argv on every iteration. On a busy account (verified
2026-05-02: 672 tunnels, 14 pages on Hongmingwangrabbit account) this
exceeds the GH Ubuntu runner's combined argv+envp limit (~128 KB) and
dies with `python3: Argument list too long` at exit 126 — the workflow
has been silently failing this way since the very first run that hit a
real account, masked earlier by a missing-CF_ACCOUNT_ID secret check.

Buffer each page response to a file under a temp dir, merge from disk
at the end. Also bumps the page cap from 20 to 40 (1000 → 2000 tunnel
ceiling) so the existing soft-cap warning has headroom; the disk-merge
shape is O(n) in tunnel count rather than the previous O(n^2) so the
larger ceiling is cheap.

Verified locally against the live account (672 tunnels): script now
runs cleanly to the existing MAX_DELETE_PCT safety gate, which trips
at 99% > 90% as designed and surfaces the actual orphan backlog for
operator-driven cleanup.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Hongming Wang 2026-05-02 00:42:25 -07:00
parent cdbf54beed
commit a117a60eed

View File

@ -94,27 +94,37 @@ log " staging orgs: $(echo "$STAGING_SLUGS" | wc -w | tr -d ' ')"
log "Fetching Cloudflare tunnels..." log "Fetching Cloudflare tunnels..."
# The cfd_tunnel list endpoint is paginated; per_page max is 50. # The cfd_tunnel list endpoint is paginated; per_page max is 50.
# Walk all pages so we don't silently miss orphans on busy accounts. # Walk all pages so we don't silently miss orphans on busy accounts.
#
# Pages are buffered to a temp dir and merged at the end. The earlier
# shape passed the accumulating JSON on argv every iteration, which on
# a busy account (700+ tunnels = 14+ pages) blows past Linux ARG_MAX
# (~128 KB combined argv+envp on the GH Ubuntu runner) and dies with
# `python3: Argument list too long`. Disk-buffering also makes the
# accumulator O(n) instead of O(n^2).
PAGES_DIR=$(mktemp -d -t cf-tunnels-XXXXXX)
trap 'rm -rf "$PAGES_DIR"' EXIT
PAGE=1 PAGE=1
TUNNEL_JSON='{"result":[]}'
while :; do while :; do
page_json=$(curl -sS -m 15 -H "Authorization: Bearer $CF_API_TOKEN" \ page_file="$PAGES_DIR/page-$(printf '%05d' "$PAGE").json"
"https://api.cloudflare.com/client/v4/accounts/$CF_ACCOUNT_ID/cfd_tunnel?per_page=50&page=$PAGE&is_deleted=false") curl -sS -m 15 -H "Authorization: Bearer $CF_API_TOKEN" \
page_count=$(echo "$page_json" | python3 -c "import json,sys; print(len(json.load(sys.stdin).get('result') or []))") "https://api.cloudflare.com/client/v4/accounts/$CF_ACCOUNT_ID/cfd_tunnel?per_page=50&page=$PAGE&is_deleted=false" \
if [ "$page_count" = "0" ]; then break; fi > "$page_file"
# Merge pages page_count=$(python3 -c "import json,sys; print(len(json.load(open(sys.argv[1])).get('result') or []))" "$page_file")
TUNNEL_JSON=$(python3 -c " if [ "$page_count" = "0" ]; then rm -f "$page_file"; break; fi
import json, sys
acc = json.loads(sys.argv[1])
new = json.loads(sys.argv[2])
acc['result'].extend(new.get('result') or [])
print(json.dumps(acc))
" "$TUNNEL_JSON" "$page_json")
PAGE=$((PAGE + 1)) PAGE=$((PAGE + 1))
if [ "$PAGE" -gt 20 ]; then if [ "$PAGE" -gt 40 ]; then
log "::warning::stopping pagination at page 20 (1000 tunnels) — re-run if more" log "::warning::stopping pagination at page 40 (2000 tunnels) — re-run if more"
break break
fi fi
done done
TUNNEL_JSON=$(python3 -c '
import glob, json, os, sys
acc = {"result": []}
for f in sorted(glob.glob(os.path.join(sys.argv[1], "page-*.json"))):
with open(f) as fh:
acc["result"].extend(json.load(fh).get("result") or [])
print(json.dumps(acc))
' "$PAGES_DIR")
TOTAL_TUNNELS=$(echo "$TUNNEL_JSON" | python3 -c "import json,sys; print(len(json.load(sys.stdin)['result']))") TOTAL_TUNNELS=$(echo "$TUNNEL_JSON" | python3 -c "import json,sys; print(len(json.load(sys.stdin)['result']))")
log " total tunnels: $TOTAL_TUNNELS" log " total tunnels: $TOTAL_TUNNELS"