Closes #93 and #105. #93 — add research/plugins/template/channels entries to org.yaml category_routing defaults. Without them, evolution crons firing with these categories found no target and their audit summaries silently dropped at PM. Routes each back to the role that generated it so the author acts on their own findings. #105 — emit X-RateLimit-Limit / -Remaining / -Reset on every response (allowed and throttled) and Retry-After on 429s per RFC 6585. 2 tests cover both paths. Clients and monitoring tools can now back off proactively instead of polling into 429 walls. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
a60477ed1e
commit
a435dd3055
@ -53,6 +53,16 @@ defaults:
|
||||
performance: [Backend Engineer]
|
||||
docs: [Documentation Specialist]
|
||||
mixed: [Dev Lead]
|
||||
# Evolution-cron categories (#93): these four are fired by hourly
|
||||
# self-review schedules (Research Lead, Technical Researcher, Dev Lead,
|
||||
# DevOps Engineer). Routing them to the same role that generated them
|
||||
# is a safe default — it converts the summary into a delegation back
|
||||
# to the author so they act on their own findings. Override per-org
|
||||
# if you want a different fan-out.
|
||||
research: [Research Lead]
|
||||
plugins: [Technical Researcher]
|
||||
template: [Dev Lead]
|
||||
channels: [DevOps Engineer]
|
||||
|
||||
# workspace_dir: not set by default — each agent gets an isolated Docker volume
|
||||
# Set per-workspace to bind-mount a host directory as /workspace
|
||||
|
||||
@ -4,6 +4,7 @@ package middleware
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -71,11 +72,33 @@ func (rl *RateLimiter) Middleware() gin.HandlerFunc {
|
||||
b.lastReset = time.Now()
|
||||
}
|
||||
|
||||
// Issue #105 — advertise the current bucket state so clients and
|
||||
// monitoring tools can back off proactively. Headers are set on every
|
||||
// response (both allowed and throttled) so they're observable against
|
||||
// any endpoint — /health, /metrics, and every /workspaces/* route.
|
||||
//
|
||||
// The `reset` value is seconds until the current bucket refills,
|
||||
// matching the RFC 6585 Retry-After spec for 429 responses and the
|
||||
// de-facto X-RateLimit-Reset convention (GitHub, Stripe, etc.).
|
||||
remaining := b.tokens - 1
|
||||
if remaining < 0 {
|
||||
remaining = 0
|
||||
}
|
||||
resetSeconds := int(time.Until(b.lastReset.Add(rl.interval)).Seconds())
|
||||
if resetSeconds < 0 {
|
||||
resetSeconds = 0
|
||||
}
|
||||
c.Header("X-RateLimit-Limit", strconv.Itoa(rl.rate))
|
||||
c.Header("X-RateLimit-Remaining", strconv.Itoa(remaining))
|
||||
c.Header("X-RateLimit-Reset", strconv.Itoa(resetSeconds))
|
||||
|
||||
if b.tokens <= 0 {
|
||||
rl.mu.Unlock()
|
||||
// Retry-After is the canonical 429 signal per RFC 6585.
|
||||
c.Header("Retry-After", strconv.Itoa(resetSeconds))
|
||||
c.JSON(http.StatusTooManyRequests, gin.H{
|
||||
"error": "rate limit exceeded",
|
||||
"retry_after": rl.interval.Seconds(),
|
||||
"retry_after": resetSeconds,
|
||||
})
|
||||
c.Abort()
|
||||
return
|
||||
|
||||
72
platform/internal/middleware/ratelimit_test.go
Normal file
72
platform/internal/middleware/ratelimit_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// newTestLimiter spins up a tiny limiter with a 2-token/5s budget so tests can
|
||||
// exhaust + recover without real-time delays.
|
||||
func newTestLimiter(t *testing.T) (*RateLimiter, *gin.Engine) {
|
||||
t.Helper()
|
||||
gin.SetMode(gin.TestMode)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
rl := NewRateLimiter(2, 5*time.Second, ctx)
|
||||
r := gin.New()
|
||||
r.Use(rl.Middleware())
|
||||
r.GET("/x", func(c *gin.Context) { c.String(http.StatusOK, "ok") })
|
||||
return rl, r
|
||||
}
|
||||
|
||||
// TestRateLimit_HeadersPresentOnAllowedRequest covers issue #105 — every
|
||||
// response (not just 429s) must carry the X-RateLimit-* triplet so clients
|
||||
// can back off proactively.
|
||||
func TestRateLimit_HeadersPresentOnAllowedRequest(t *testing.T) {
|
||||
_, r := newTestLimiter(t)
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, httptest.NewRequest(http.MethodGet, "/x", nil))
|
||||
|
||||
if got := w.Header().Get("X-RateLimit-Limit"); got != "2" {
|
||||
t.Errorf("X-RateLimit-Limit = %q, want 2", got)
|
||||
}
|
||||
if got := w.Header().Get("X-RateLimit-Remaining"); got != "1" {
|
||||
t.Errorf("X-RateLimit-Remaining = %q, want 1", got)
|
||||
}
|
||||
reset, err := strconv.Atoi(w.Header().Get("X-RateLimit-Reset"))
|
||||
if err != nil || reset < 0 || reset > 5 {
|
||||
t.Errorf("X-RateLimit-Reset = %q, want 0-5", w.Header().Get("X-RateLimit-Reset"))
|
||||
}
|
||||
}
|
||||
|
||||
// TestRateLimit_RetryAfterOn429 — throttled responses must carry Retry-After
|
||||
// per RFC 6585, so curl/fetch clients back off the exact required window.
|
||||
func TestRateLimit_RetryAfterOn429(t *testing.T) {
|
||||
_, r := newTestLimiter(t)
|
||||
// Burn through both tokens.
|
||||
for i := 0; i < 2; i++ {
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, httptest.NewRequest(http.MethodGet, "/x", nil))
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("request %d: want 200, got %d", i+1, w.Code)
|
||||
}
|
||||
}
|
||||
// Third should 429.
|
||||
w := httptest.NewRecorder()
|
||||
r.ServeHTTP(w, httptest.NewRequest(http.MethodGet, "/x", nil))
|
||||
if w.Code != http.StatusTooManyRequests {
|
||||
t.Fatalf("3rd request: want 429, got %d", w.Code)
|
||||
}
|
||||
if got := w.Header().Get("Retry-After"); got == "" {
|
||||
t.Error("missing Retry-After header on 429")
|
||||
}
|
||||
if got := w.Header().Get("X-RateLimit-Remaining"); got != "0" {
|
||||
t.Errorf("X-RateLimit-Remaining = %q on 429, want 0", got)
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user