# cf-proxy — Cloudflare-tunnel-shape reverse proxy for the local harness. # # Production path: agent → CF tunnel → AWS LB → tenant container. # This config replays the same header rewrites the CF tunnel does so # the tenant sees the same Host + X-Forwarded-* it would in production. # # Multi-tenant: nginx routes by Host header to the right tenant # container — exactly the same way the production CF tunnel does # (URL is the public CF endpoint, Host carries the tenant identity). # # How tests reach it (no /etc/hosts required): # curl -H 'Host: harness-tenant-alpha.localhost' http://localhost:8080/health # curl -H 'Host: harness-tenant-beta.localhost' http://localhost:8080/health # # Backwards-compat: harness-tenant.localhost (no -alpha/-beta suffix) maps # to alpha for legacy single-tenant replays. worker_processes 1; events { worker_connections 256; } http { # Docker's embedded DNS at 127.0.0.11. Required because the # `proxy_pass http://$tenant_upstream:8080` below uses a variable — # nginx needs an explicit resolver to do per-request DNS lookups # (literal hostnames are resolved once at startup, variables are # resolved per-request). Without this, nginx fails closed with # "no resolver defined" + 502. # # `valid=30s` caps cache life so a tenant container restart picks # up a new IP within 30 seconds. ipv6=off skips AAAA lookups that # Docker DNS doesn't always serve cleanly. resolver 127.0.0.11 valid=30s ipv6=off; # Reusable proxy block so each tenant server only carries the # upstream-pointer + its identity-specific tweaks. Keeping the # header rewrites + buffering settings centralised prevents drift # between alpha and beta as the harness grows. map $host $tenant_upstream { default tenant-alpha; harness-tenant.localhost tenant-alpha; harness-tenant-alpha.localhost tenant-alpha; harness-tenant-beta.localhost tenant-beta; } server { listen 8080 default_server; # Reject Host headers we don't recognise — without this, an # unknown Host would silently route to the default tenant and # mask cross-tenant routing bugs in test output. server_name harness-tenant.localhost harness-tenant-alpha.localhost harness-tenant-beta.localhost localhost; # Cap upload at 50MB to mirror the staging tenant nginx limit; # chat upload tests will fail closed if the platform handler # ever silently expands its limit (catches the failure mode # opposite of the chat-files lazy-heal incident). client_max_body_size 50m; location / { # The map above resolves $tenant_upstream to the right # container based on the Host header — production CF tunnel # behavior in one line. proxy_pass http://$tenant_upstream:8080; # Header parity with CF tunnel + AWS LB. Production CF sets # X-Forwarded-Proto=https; we keep http here because TLS # termination in compose is unnecessary for testing the # tenant logic — TLS is a CF concern, not a tenant bug # surface. If TLS-specific bugs ever bite, add cert-manager # + listen 8443 ssl here. proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Proto $scheme; # Streamable HTTP / SSE / WebSocket — the tenant exposes /ws # and /events/stream + MCP /mcp/stream. Disabling buffering # reproduces CF tunnel's pass-through streaming semantics # (CF tunnel = no buffering by default; nginx default IS # buffering, which would mask issue #2397-class streaming # bugs by accumulating output until the client disconnects). proxy_buffering off; proxy_request_buffering off; proxy_http_version 1.1; proxy_set_header Connection ""; # Read timeout — CF tunnel default is 100s. Setting this to # the same value catches "long agent run finishes after the # proxy already closed the upstream" failure mode. proxy_read_timeout 100s; } } }