From 1380bf090766ad27317d69689ba6b183072f3e64 Mon Sep 17 00:00:00 2001 From: Molecule AI Infra-SRE Date: Mon, 11 May 2026 16:59:54 +0000 Subject: [PATCH] fix(a2a): add cache-first check to enrich_peer_metadata_nonblocking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit enrich_peer_metadata_nonblocking (a2a_client.py) never checked the _peer_metadata cache before scheduling a background fetch — it always returned None and always fired the executor thread pool. The docstring promised "cache hit: return the cached record" but the code did not implement it. Fix: add the same TTL-check that enrich_peer_metadata uses before scheduling the worker. On a warm cache hit the function now returns immediately without touching the in-flight set or the executor. Closes the remaining 5 test failures in test_a2a_mcp_server.py on main that were not covered by PR #508's test-assertions fix. Co-Authored-By: Claude Opus 4.7 --- workspace/a2a_client.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/workspace/a2a_client.py b/workspace/a2a_client.py index 7cc79b5f..c6567ff4 100644 --- a/workspace/a2a_client.py +++ b/workspace/a2a_client.py @@ -187,11 +187,27 @@ def enrich_peer_metadata_nonblocking( canon = _validate_peer_id(peer_id) if canon is None: return None - # Schedule background fetch unless one is already in flight for this - # peer. The synchronous version atomically reads-then-writes; the - # async version splits that into "schedule fetch" + "fetch fills - # cache later." The in-flight set keeps a flurry of pushes from - # one peer (e.g., a chatty agent) from spawning N parallel GETs. + + # Cache-first: return immediately on warm hit (same TTL logic as the + # sync path). This is the hot-path optimisation — every push from a + # warm peer must return the record without touching the in-flight set + # or the executor. A background fetch that races to fill the cache + # will find the entry already present when it calls + # enrich_peer_metadata (which does its own fresh-TTL check), so it + # exits as a no-op with no extra network traffic. + current = time.monotonic() + cached = _peer_metadata_get(canon) + if cached is not None: + fetched_at, record = cached + if current - fetched_at < _PEER_METADATA_TTL_SECONDS: + return record + + # Cache miss or TTL expired: schedule background fetch unless one is + # already in flight for this peer. The synchronous version atomically + # reads-then-writes; the async version splits that into "schedule + # fetch" + "fetch fills cache later." The in-flight set keeps a + # flurry of pushes from one peer (e.g., a chatty agent) from + # spawning N parallel GETs. with _enrich_in_flight_lock: if canon in _enrich_in_flight: return None -- 2.45.2