test(ci): kill absolute wall-clock perf assertions that false-red CI under host load #1381

Open
infra-sre wants to merge 4 commits from ci/timing-test-hygiene-host-load-internal into main
2 changed files with 58 additions and 24 deletions

View File

@ -192,11 +192,22 @@ def test_pop_missing_id_returns_none(state: inbox.InboxState):
def test_wait_returns_existing_head_immediately(state: inbox.InboxState):
state.record(_msg("a"))
wait_timeout = 5.0
start = time.monotonic()
msg = state.wait(timeout_secs=5.0)
msg = state.wait(timeout_secs=wait_timeout)
elapsed = time.monotonic() - start
assert msg is not None and msg.activity_id == "a"
assert elapsed < 0.5, f"wait should not block when queue non-empty (took {elapsed:.2f}s)"
# Structural assertion (NOT an absolute deadline): with a non-empty
# queue wait() returns its head without ever entering the timed
# block, so it consumes a tiny fraction of timeout_secs. Comparing
# against the timeout (rather than a magic "< 0.5s") keeps this
# robust on a CPU-starved CI host: only an implementation that
# actually blocks for the timeout (≈1.0×) trips it. 0.2× = wide
# jitter margin for a scheduler under contention.
assert elapsed < wait_timeout * 0.2, (
f"wait should not block when queue non-empty: took {elapsed:.2f}s "
f"({elapsed / wait_timeout:.2f}× the {wait_timeout:.0f}s timeout)"
)
def test_wait_blocks_until_message_arrives(state: inbox.InboxState):

View File

@ -829,16 +829,26 @@ def _stub_client_for_batch(get_responses: dict[str, MagicMock]) -> MagicMock:
def test_batch_fetcher_runs_submitted_rows_concurrently():
# Three rows whose .get() blocks for ~120ms each. With 4 workers the
# batch should complete in ~120ms (parallel), not ~360ms (serial).
# The 250ms ceiling accommodates CI scheduler jitter while still
# discriminating concurrent (~120ms) from serial (~360ms).
# Three rows whose .get() blocks for ~PER_ROW each. With >=3 workers
# the batch should overlap the sleeps (~1× PER_ROW wall time), not
# run them back-to-back (~3× PER_ROW).
#
# NOTE: we assert a *structural* property — observed wall time is
# meaningfully below the serial sum — NOT an absolute deadline. An
# absolute "< 250ms" ceiling silently encodes "the runner host is
# idle"; under CI contention (load >100) the same concurrent run
# measures >250ms and false-reds an unrelated PR (motivating
# incident: #190 / PR#1348, a 1.6ms overshoot on a load-107 host).
# Host load scales serial and concurrent timing together, so the
# ratio between them stays a reliable concurrency discriminator.
import time
barrier_start = [0.0]
PER_ROW = 0.12
N_ROWS = 3
serial_total = PER_ROW * N_ROWS
def _slow_get(url: str, headers: dict[str, str] | None = None) -> MagicMock:
time.sleep(0.12)
time.sleep(PER_ROW)
for fid in ("a", "b", "c"):
if f"/pending-uploads/{fid}/content" in url:
return _make_resp(200, content=b"X", content_type="text/plain")
@ -855,16 +865,22 @@ def test_batch_fetcher_runs_submitted_rows_concurrently():
client=client,
max_workers=4,
)
barrier_start[0] = time.time()
start = time.monotonic()
for fid in ("a", "b", "c"):
bf.submit(_row_with_id(f"act-{fid}", fid))
bf.wait_all()
elapsed = time.time() - barrier_start[0]
elapsed = time.monotonic() - start
bf.close()
assert elapsed < 0.25, (
f"3 rows × 120ms with 4 workers should finish in <250ms; got {elapsed:.3f}s "
"(suggests serial execution — Phase 5b regression)"
# Concurrent execution overlaps the three sleeps, so wall time is far
# below the serial sum. 0.6× leaves generous headroom for scheduler
# jitter while still failing loudly if the rows ran serially (which
# would land at ~1.0× serial_total regardless of host load).
assert elapsed < serial_total * 0.6, (
f"3 rows × {PER_ROW}s with 4 workers ran in {elapsed:.3f}s; expected "
f"well under the {serial_total:.2f}s serial sum (got "
f"{elapsed / serial_total:.2f}× — suggests serial execution, "
"Phase 5b regression)"
)
assert client.get.call_count == 3
assert client.post.call_count == 3
@ -1050,13 +1066,14 @@ def test_batch_fetcher_close_after_timeout_does_not_block_on_running_workers():
import threading
import time
BLOCK_SECS = 5.0 # the leaked worker's max self-unblock window
blocker = threading.Event() # never set — workers stay running
def _hang_get(url, headers=None):
# Wait at most ~5s so a buggy implementation eventually unblocks
# the test instead of timing out the whole pytest run, but
# nothing legitimate should reach this fallback.
blocker.wait(timeout=5.0)
# Wait at most BLOCK_SECS so a buggy implementation eventually
# unblocks the test instead of timing out the whole pytest run,
# but nothing legitimate should reach this fallback.
blocker.wait(timeout=BLOCK_SECS)
return _make_resp(200, content=b"x", content_type="text/plain")
client = MagicMock()
@ -1073,17 +1090,23 @@ def test_batch_fetcher_close_after_timeout_does_not_block_on_running_workers():
bf.submit(_row_with_id("act-a", "a"))
# Tiny timeout — wait_all must report the future as not_done.
bf.wait_all(timeout=0.05)
t0 = time.time()
start = time.monotonic()
bf.close()
elapsed = time.time() - t0
elapsed = time.monotonic() - start
# Unblock the lingering worker so it doesn't pollute later tests.
blocker.set()
# Without the cancel-on-timeout fix, close() would block until
# blocker.set() — i.e., the full ~5s. With the fix it returns
# immediately because shutdown(wait=False) doesn't drain.
assert elapsed < 1.0, (
f"close() blocked for {elapsed:.2f}s after wait_all timeout — "
# Structural assertion (NOT an absolute deadline): without the
# cancel-on-timeout fix, close() drains the leaked worker and blocks
# the FULL BLOCK_SECS; with the fix shutdown(wait=False) returns
# without draining, so close() finishes a small fraction of that.
# We compare against BLOCK_SECS rather than a magic "< 1.0s" so a
# CPU-starved CI host (which inflates both the drain and the
# non-drain path together) cannot false-fail this — only genuinely
# draining (≈1.0× BLOCK_SECS) trips it. 0.3× = wide jitter margin.
assert elapsed < BLOCK_SECS * 0.3, (
f"close() took {elapsed:.2f}s after wait_all timeout "
f"({elapsed / BLOCK_SECS:.2f}× the {BLOCK_SECS:.0f}s worker block) — "
"cancel-on-timeout regression: close() is draining instead of bailing"
)