refactor(workspace): extract messaging tools from a2a_tools.py to a2a_tools_messaging.py (RFC #2873 iter 4d)

Fourth slice of the a2a_tools.py split (stacked on iter 4c). Owns the
four human-and-peer messaging MCP tools + the chat-upload helper:

  * _upload_chat_files — stage local paths to /chat/uploads
  * tool_send_message_to_user — push canvas-chat via /notify
  * tool_list_peers — discover peers across registered workspaces
  * tool_get_workspace_info — JSON-encode workspace info
  * tool_chat_history — fetch prior conversation rows with a peer

a2a_tools.py shrinks from 508 → 213 LOC (−295). The remaining 213
is just report_activity + back-compat re-exports. Inbox tools
(tool_inbox_peek/pop/wait_for_message) deferred to iter 4e.

Layered architecture: messaging depends on a2a_tools_rbac (iter 4a),
a2a_client, platform_auth — NOT on kitchen-sink a2a_tools. An
import-contract test pins this so future refactors that add
`from a2a_tools import …` fail in CI.

Tests:
  * 28 patch sites in TestToolSendMessageToUser + TestToolListPeers +
    TestToolGetWorkspaceInfo + TestChatHistory retargeted from
    `a2a_tools.{httpx, get_peers_*, get_workspace_info,
    _upload_chat_files, _peer_*, list_registered_workspaces}` to
    `a2a_tools_messaging.…` because the call sites moved.
  * test_a2a_tools_messaging.py adds 7 new tests:
    - 5 alias drift gates
    - 2 import-contract tests (no top-level a2a_tools dep + a2a_tools
      surfaces every messaging symbol)

137 tests total in the a2a_tools suite, all green.

Refs RFC #2873.
This commit is contained in:
Hongming Wang 2026-05-05 05:16:11 -07:00
parent 210a26d31a
commit 3e0d2e650a
5 changed files with 458 additions and 336 deletions

View File

@ -57,6 +57,7 @@ TOP_LEVEL_MODULES = {
"a2a_tools",
"a2a_tools_delegation",
"a2a_tools_memory",
"a2a_tools_messaging",
"a2a_tools_rbac",
"adapter_base",
"agent",

View File

@ -129,200 +129,19 @@ from a2a_tools_delegation import ( # noqa: E402 (import after the from-a2a_cli
)
async def _upload_chat_files(
client: httpx.AsyncClient,
paths: list[str],
workspace_id: str | None = None,
) -> tuple[list[dict], str | None]:
"""Upload local file paths through /workspaces/<self>/chat/uploads.
The platform stages each upload under /workspace/.molecule/chat-uploads
(an "allowed root" the canvas knows how to render via the Download
endpoint) and returns metadata the broadcast payload references.
Why we route through upload instead of just passing the agent's path:
the canvas's allowed-root list is /configs, /workspace, /home, /plugins
files at /tmp or /root would be unreachable. Uploading copies the
bytes into an allowed root regardless of where the agent wrote them.
Returns (attachments, error). On any failure the caller should NOT
fire the notify partial-attach would surface a half-rendered chip.
"""
if not paths:
return [], None
files_payload: list[tuple[str, tuple[str, bytes, str]]] = []
for p in paths:
if not isinstance(p, str) or not p:
return [], f"Error: invalid attachment path {p!r}"
if not os.path.isfile(p):
return [], f"Error: attachment not found: {p}"
try:
with open(p, "rb") as fh:
data = fh.read()
except OSError as e:
return [], f"Error reading {p}: {e}"
# Sniff mime from filename so the canvas can pick the right
# icon / preview / inline-image renderer. Pre-fix this was
# hardcoded application/octet-stream and chat_files.go's
# Upload trusts whatever Content-Type the multipart part
# carries — `mt := fh.Header.Get("Content-Type")` only falls
# back to extension-sniffing when the header is empty. So a
# hardcoded octet-stream meant every attachment lost its
# real type forever, breaking the canvas chip's icon logic.
mime_type, _ = mimetypes.guess_type(p)
if not mime_type:
mime_type = "application/octet-stream"
files_payload.append(("files", (os.path.basename(p), data, mime_type)))
target_workspace_id = (workspace_id or "").strip() or WORKSPACE_ID
try:
resp = await client.post(
f"{PLATFORM_URL}/workspaces/{target_workspace_id}/chat/uploads",
files=files_payload,
headers=_auth_headers_for_heartbeat(target_workspace_id),
)
except Exception as e:
return [], f"Error uploading attachments: {e}"
if resp.status_code != 200:
return [], f"Error: chat/uploads returned {resp.status_code}: {resp.text[:200]}"
try:
body = resp.json()
except Exception as e:
return [], f"Error parsing upload response: {e}"
uploaded = body.get("files") or []
if not isinstance(uploaded, list) or len(uploaded) != len(paths):
return [], f"Error: upload returned {len(uploaded) if isinstance(uploaded, list) else 'invalid'} entries for {len(paths)} files"
return uploaded, None
async def tool_send_message_to_user(
message: str,
attachments: list[str] | None = None,
workspace_id: str | None = None,
) -> str:
"""Send a message directly to the user's canvas chat via WebSocket.
Args:
message: The text to display in the user's chat. Required even
when sending attachments set to a short caption like
"Here's the build output:" or "Done — see attached."
attachments: Optional list of absolute file paths inside this
container. Each is uploaded to the platform and rendered
in the canvas as a clickable download chip. Use this
instead of pasting paths in the message text paths
render as plain text and the user can't click them.
Examples:
attachments=["/tmp/build-output.zip"]
attachments=["/workspace/report.pdf", "/workspace/data.csv"]
workspace_id: Optional. When the agent is registered in MULTIPLE
workspaces (external multi-workspace MCP path), this
selects which workspace's chat to deliver the message to —
should match the ``arrival_workspace_id`` of the inbound
message you're replying to so the user sees the reply in
the same canvas they typed in. Single-workspace agents
omit this; the message routes to the only registered
workspace.
"""
if not message:
return "Error: message is required"
target_workspace_id = (workspace_id or "").strip() or WORKSPACE_ID
try:
async with httpx.AsyncClient(timeout=60.0) as client:
uploaded, upload_err = await _upload_chat_files(
client, attachments or [], workspace_id=target_workspace_id,
)
if upload_err:
return upload_err
payload: dict = {"message": message}
if uploaded:
payload["attachments"] = uploaded
resp = await client.post(
f"{PLATFORM_URL}/workspaces/{target_workspace_id}/notify",
json=payload,
headers=_auth_headers_for_heartbeat(target_workspace_id),
)
if resp.status_code == 200:
if uploaded:
return f"Message sent to user with {len(uploaded)} attachment(s)"
return "Message sent to user"
return f"Error: platform returned {resp.status_code}"
except Exception as e:
return f"Error sending message: {e}"
async def tool_list_peers(source_workspace_id: str | None = None) -> str:
"""List all workspaces this agent can communicate with.
Behavior:
- ``source_workspace_id`` set list peers of that one workspace.
- Unset, single-workspace mode list peers of WORKSPACE_ID
(the legacy path, unchanged).
- Unset, multi-workspace mode (MOLECULE_WORKSPACES populated)
aggregate across every registered workspace, prefixing each
peer with its source so the agent / user can see the full peer
surface in one call.
Side-effect: populates ``_peer_to_source`` so subsequent
``tool_delegate_task(target)`` auto-routes through the correct
sending workspace without the agent needing ``source_workspace_id``.
"""
sources: list[str]
aggregate = False
if source_workspace_id:
sources = [source_workspace_id]
else:
registered = list_registered_workspaces()
if len(registered) > 1:
sources = registered
aggregate = True
else:
sources = [WORKSPACE_ID]
all_peers: list[tuple[str, dict]] = [] # (source, peer_record)
diagnostics: list[tuple[str, str]] = [] # (source, diagnostic)
for src in sources:
peers, diagnostic = await get_peers_with_diagnostic(source_workspace_id=src)
if peers:
for p in peers:
all_peers.append((src, p))
elif diagnostic is not None:
diagnostics.append((src, diagnostic))
if not all_peers:
if diagnostics:
joined = "; ".join(f"[{src[:8]}] {d}" for src, d in diagnostics)
return f"No peers found. {joined}"
return (
"You have no peers in the platform registry. "
"(No parent, no children, no siblings registered.)"
)
lines = []
for src, p in all_peers:
status = p.get("status", "unknown")
role = p.get("role", "")
peer_id = p["id"]
# Cache name for use in delegate_task
_peer_names[peer_id] = p["name"]
# Cache the source workspace so tool_delegate_task auto-routes
_peer_to_source[peer_id] = src
if aggregate:
lines.append(
f"- {p['name']} (ID: {peer_id}, status: {status}, role: {role}, via: {src[:8]})"
)
else:
lines.append(f"- {p['name']} (ID: {peer_id}, status: {status}, role: {role})")
return "\n".join(lines)
async def tool_get_workspace_info(source_workspace_id: str | None = None) -> str:
"""Get this workspace's own info.
``source_workspace_id`` selects which registered workspace to
introspect when the agent is registered into multiple workspaces.
Unset falls back to module-level WORKSPACE_ID.
"""
info = await get_workspace_info(source_workspace_id=source_workspace_id)
return json.dumps(info, indent=2)
# Messaging tool handlers — extracted to a2a_tools_messaging
# (RFC #2873 iter 4d). Re-imported here so call sites + tests that
# reference ``a2a_tools.tool_send_message_to_user`` /
# ``tool_list_peers`` / ``tool_get_workspace_info`` /
# ``tool_chat_history`` / ``_upload_chat_files`` keep resolving
# identically.
from a2a_tools_messaging import ( # noqa: E402 (import after the top-of-module imports)
_upload_chat_files,
tool_chat_history,
tool_get_workspace_info,
tool_list_peers,
tool_send_message_to_user,
)
# Memory tool handlers — extracted to a2a_tools_memory (RFC #2873 iter 4c).
@ -335,120 +154,6 @@ from a2a_tools_memory import ( # noqa: E402 (import after the top-of-module im
)
# ---------------------------------------------------------------------------
# Inbox tools — inbound delivery for the standalone molecule-mcp path.
# ---------------------------------------------------------------------------
#
# The InboxState singleton is set by mcp_cli before the MCP server starts
# (see workspace/inbox.py for the rationale). In-container runtimes never
# call ``inbox.activate(...)``, so ``inbox.get_state()`` returns None and
# these tools surface an informational error rather than raising.
#
# When-to-use guidance (mirrored in platform_tools/registry.py): agents
# in standalone-runtime mode should call ``wait_for_message`` to block
# on the next inbound message after they've emitted a reply, forming
# the loop ``wait → respond → wait``. ``inbox_peek`` is for inspecting
# the queue without consuming; ``inbox_pop`` removes a handled message.
_INBOX_NOT_ENABLED_MSG = (
"Error: inbox polling is not enabled in this runtime. The standalone "
"molecule-mcp wrapper activates it; in-container runtimes receive "
"messages via push delivery and do not need these tools."
)
async def tool_chat_history(
peer_id: str,
limit: int = 20,
before_ts: str = "",
source_workspace_id: str | None = None,
) -> str:
"""Fetch the prior conversation with one peer.
Hits ``/workspaces/<self>/activity?peer_id=<peer>&limit=<N>``
against the workspace-server, which returns activity rows where
the peer is either the sender (``source_id=peer`` they sent us
the message) or the recipient (``target_id=peer`` we sent to
them) of an A2A turn both sides of the conversation in
chronological order.
Args:
peer_id: The other workspace's UUID. Same value the agent
sees as ``peer_id`` on a peer_agent push or ``workspace_id``
on a delegate_task call.
limit: Maximum rows to return; capped server-side at 500. The
default of 20 covers \"most recent context for this peer\"
without flooding the agent's context window.
before_ts: Optional RFC3339 timestamp; only rows strictly
older are returned. Used to page backward through long
histories pass the oldest ``ts`` from the previous
response. Empty (default) returns the most recent ``limit``
rows.
source_workspace_id: Which registered workspace's activity log
to query. Auto-routes via ``_peer_to_source`` cache when
unset (the workspace this peer was discovered through);
falls back to module-level WORKSPACE_ID for single-workspace
operators.
Returns a JSON-encoded list of activity rows (or an error string
starting with ``Error:`` so the agent can branch). Each row carries
``activity_type``, ``source_id``, ``target_id``, ``method``,
``summary``, ``request_body``, ``response_body``, ``status``,
``created_at`` same shape ``inbox_peek`` and the canvas chat
loader already see.
"""
if not peer_id or not isinstance(peer_id, str):
return "Error: peer_id is required"
if not isinstance(limit, int) or limit <= 0:
limit = 20
if limit > 500:
limit = 500
src = source_workspace_id or _peer_to_source.get(peer_id) or WORKSPACE_ID
params: dict[str, str] = {
"peer_id": peer_id,
"limit": str(limit),
}
# Forward verbatim — the server route validates as RFC3339 at the
# trust boundary and translates into a `created_at < $X` clause.
if before_ts:
params["before_ts"] = before_ts
try:
async with httpx.AsyncClient(timeout=10.0) as client:
resp = await client.get(
f"{PLATFORM_URL}/workspaces/{src}/activity",
params=params,
headers=_auth_headers_for_heartbeat(src),
)
except Exception as exc: # noqa: BLE001
return f"Error: chat_history request failed: {exc}"
if resp.status_code == 400:
# Trust-boundary rejection (malformed peer_id, etc.) — surface
# the server's reason verbatim so the agent can correct itself.
try:
err = resp.json().get("error", "bad request")
except Exception: # noqa: BLE001
err = "bad request"
return f"Error: {err}"
if resp.status_code >= 400:
return f"Error: chat_history returned HTTP {resp.status_code}"
try:
rows = resp.json()
except Exception: # noqa: BLE001
return "Error: chat_history response was not JSON"
if not isinstance(rows, list):
return "Error: chat_history response was not a list"
# Server returns DESC (most recent first); reverse to chronological
# so the agent reads the conversation top-down like a chat log.
rows.reverse()
return json.dumps(rows)
async def tool_inbox_peek(limit: int = 10) -> str:
"""Return up to ``limit`` pending inbound messages without removing them."""
import inbox # local import — avoids a circular dep at module load

View File

@ -0,0 +1,324 @@
"""Messaging tool handlers — single-concern slice of the a2a_tools surface.
Extracted from ``a2a_tools.py`` (RFC #2873 iter 4d). Owns the four
human-and-peer messaging MCP tools + the chat-upload helper they share:
* ``tool_send_message_to_user`` push a canvas-chat message via the
platform's ``/notify`` endpoint.
* ``tool_list_peers`` discover peers across one or many registered
workspaces, with side-effect of populating ``_peer_to_source`` for
delegate-task auto-routing.
* ``tool_get_workspace_info`` JSON-encode the workspace's own info.
* ``tool_chat_history`` fetch prior conversation rows with a peer.
* ``_upload_chat_files`` internal helper for the message-attachments
code path; routes local file paths through the platform's
``/chat/uploads`` so the canvas can render them as download chips.
Imports the auth-header primitive from ``a2a_tools_rbac`` (iter 4a).
"""
from __future__ import annotations
import json
import mimetypes
import os
import httpx
from a2a_client import (
PLATFORM_URL,
WORKSPACE_ID,
_peer_names,
_peer_to_source,
get_peers_with_diagnostic,
get_workspace_info,
)
from a2a_tools_rbac import auth_headers_for_heartbeat as _auth_headers_for_heartbeat
from platform_auth import list_registered_workspaces
async def _upload_chat_files(
client: httpx.AsyncClient,
paths: list[str],
workspace_id: str | None = None,
) -> tuple[list[dict], str | None]:
"""Upload local file paths through /workspaces/<self>/chat/uploads.
The platform stages each upload under /workspace/.molecule/chat-uploads
(an "allowed root" the canvas knows how to render via the Download
endpoint) and returns metadata the broadcast payload references.
Why we route through upload instead of just passing the agent's path:
the canvas's allowed-root list is /configs, /workspace, /home, /plugins
files at /tmp or /root would be unreachable. Uploading copies the
bytes into an allowed root regardless of where the agent wrote them.
Returns (attachments, error). On any failure the caller should NOT
fire the notify partial-attach would surface a half-rendered chip.
"""
if not paths:
return [], None
files_payload: list[tuple[str, tuple[str, bytes, str]]] = []
for p in paths:
if not isinstance(p, str) or not p:
return [], f"Error: invalid attachment path {p!r}"
if not os.path.isfile(p):
return [], f"Error: attachment not found: {p}"
try:
with open(p, "rb") as fh:
data = fh.read()
except OSError as e:
return [], f"Error reading {p}: {e}"
# Sniff mime from filename so the canvas can pick the right
# icon / preview / inline-image renderer. Pre-fix this was
# hardcoded application/octet-stream and chat_files.go's
# Upload trusts whatever Content-Type the multipart part
# carries — `mt := fh.Header.Get("Content-Type")` only falls
# back to extension-sniffing when the header is empty. So a
# hardcoded octet-stream meant every attachment lost its
# real type forever, breaking the canvas chip's icon logic.
mime_type, _ = mimetypes.guess_type(p)
if not mime_type:
mime_type = "application/octet-stream"
files_payload.append(("files", (os.path.basename(p), data, mime_type)))
target_workspace_id = (workspace_id or "").strip() or WORKSPACE_ID
try:
resp = await client.post(
f"{PLATFORM_URL}/workspaces/{target_workspace_id}/chat/uploads",
files=files_payload,
headers=_auth_headers_for_heartbeat(target_workspace_id),
)
except Exception as e:
return [], f"Error uploading attachments: {e}"
if resp.status_code != 200:
return [], f"Error: chat/uploads returned {resp.status_code}: {resp.text[:200]}"
try:
body = resp.json()
except Exception as e:
return [], f"Error parsing upload response: {e}"
uploaded = body.get("files") or []
if not isinstance(uploaded, list) or len(uploaded) != len(paths):
return [], f"Error: upload returned {len(uploaded) if isinstance(uploaded, list) else 'invalid'} entries for {len(paths)} files"
return uploaded, None
async def tool_send_message_to_user(
message: str,
attachments: list[str] | None = None,
workspace_id: str | None = None,
) -> str:
"""Send a message directly to the user's canvas chat via WebSocket.
Args:
message: The text to display in the user's chat. Required even
when sending attachments set to a short caption like
"Here's the build output:" or "Done — see attached."
attachments: Optional list of absolute file paths inside this
container. Each is uploaded to the platform and rendered
in the canvas as a clickable download chip. Use this
instead of pasting paths in the message text paths
render as plain text and the user can't click them.
Examples:
attachments=["/tmp/build-output.zip"]
attachments=["/workspace/report.pdf", "/workspace/data.csv"]
workspace_id: Optional. When the agent is registered in MULTIPLE
workspaces (external multi-workspace MCP path), this
selects which workspace's chat to deliver the message to —
should match the ``arrival_workspace_id`` of the inbound
message you're replying to so the user sees the reply in
the same canvas they typed in. Single-workspace agents
omit this; the message routes to the only registered
workspace.
"""
if not message:
return "Error: message is required"
target_workspace_id = (workspace_id or "").strip() or WORKSPACE_ID
try:
async with httpx.AsyncClient(timeout=60.0) as client:
uploaded, upload_err = await _upload_chat_files(
client, attachments or [], workspace_id=target_workspace_id,
)
if upload_err:
return upload_err
payload: dict = {"message": message}
if uploaded:
payload["attachments"] = uploaded
resp = await client.post(
f"{PLATFORM_URL}/workspaces/{target_workspace_id}/notify",
json=payload,
headers=_auth_headers_for_heartbeat(target_workspace_id),
)
if resp.status_code == 200:
if uploaded:
return f"Message sent to user with {len(uploaded)} attachment(s)"
return "Message sent to user"
return f"Error: platform returned {resp.status_code}"
except Exception as e:
return f"Error sending message: {e}"
async def tool_list_peers(source_workspace_id: str | None = None) -> str:
"""List all workspaces this agent can communicate with.
Behavior:
- ``source_workspace_id`` set list peers of that one workspace.
- Unset, single-workspace mode list peers of WORKSPACE_ID
(the legacy path, unchanged).
- Unset, multi-workspace mode (MOLECULE_WORKSPACES populated)
aggregate across every registered workspace, prefixing each
peer with its source so the agent / user can see the full peer
surface in one call.
Side-effect: populates ``_peer_to_source`` so subsequent
``tool_delegate_task(target)`` auto-routes through the correct
sending workspace without the agent needing ``source_workspace_id``.
"""
sources: list[str]
aggregate = False
if source_workspace_id:
sources = [source_workspace_id]
else:
registered = list_registered_workspaces()
if len(registered) > 1:
sources = registered
aggregate = True
else:
sources = [WORKSPACE_ID]
all_peers: list[tuple[str, dict]] = [] # (source, peer_record)
diagnostics: list[tuple[str, str]] = [] # (source, diagnostic)
for src in sources:
peers, diagnostic = await get_peers_with_diagnostic(source_workspace_id=src)
if peers:
for p in peers:
all_peers.append((src, p))
elif diagnostic is not None:
diagnostics.append((src, diagnostic))
if not all_peers:
if diagnostics:
joined = "; ".join(f"[{src[:8]}] {d}" for src, d in diagnostics)
return f"No peers found. {joined}"
return (
"You have no peers in the platform registry. "
"(No parent, no children, no siblings registered.)"
)
lines = []
for src, p in all_peers:
status = p.get("status", "unknown")
role = p.get("role", "")
peer_id = p["id"]
# Cache name for use in delegate_task
_peer_names[peer_id] = p["name"]
# Cache the source workspace so tool_delegate_task auto-routes
_peer_to_source[peer_id] = src
if aggregate:
lines.append(
f"- {p['name']} (ID: {peer_id}, status: {status}, role: {role}, via: {src[:8]})"
)
else:
lines.append(f"- {p['name']} (ID: {peer_id}, status: {status}, role: {role})")
return "\n".join(lines)
async def tool_get_workspace_info(source_workspace_id: str | None = None) -> str:
"""Get this workspace's own info.
``source_workspace_id`` selects which registered workspace to
introspect when the agent is registered into multiple workspaces.
Unset falls back to module-level WORKSPACE_ID.
"""
info = await get_workspace_info(source_workspace_id=source_workspace_id)
return json.dumps(info, indent=2)
async def tool_chat_history(
peer_id: str,
limit: int = 20,
before_ts: str = "",
source_workspace_id: str | None = None,
) -> str:
"""Fetch the prior conversation with one peer.
Hits ``/workspaces/<self>/activity?peer_id=<peer>&limit=<N>``
against the workspace-server, which returns activity rows where
the peer is either the sender (``source_id=peer`` they sent us
the message) or the recipient (``target_id=peer`` we sent to
them) of an A2A turn both sides of the conversation in
chronological order.
Args:
peer_id: The other workspace's UUID. Same value the agent
sees as ``peer_id`` on a peer_agent push or ``workspace_id``
on a delegate_task call.
limit: Maximum rows to return; capped server-side at 500. The
default of 20 covers "most recent context for this peer"
without flooding the agent's context window.
before_ts: Optional RFC3339 timestamp; only rows strictly
older are returned. Used to page backward through long
histories pass the oldest ``ts`` from the previous
response. Empty (default) returns the most recent ``limit``
rows.
source_workspace_id: Which registered workspace's activity log
to query. Auto-routes via ``_peer_to_source`` cache when
unset (the workspace this peer was discovered through);
falls back to module-level WORKSPACE_ID for single-workspace
operators.
Returns a JSON-encoded list of activity rows (or an error string
starting with ``Error:`` so the agent can branch). Each row carries
``activity_type``, ``source_id``, ``target_id``, ``method``,
``summary``, ``request_body``, ``response_body``, ``status``,
``created_at`` same shape ``inbox_peek`` and the canvas chat
loader already see.
"""
if not peer_id or not isinstance(peer_id, str):
return "Error: peer_id is required"
if not isinstance(limit, int) or limit <= 0:
limit = 20
if limit > 500:
limit = 500
src = source_workspace_id or _peer_to_source.get(peer_id) or WORKSPACE_ID
params: dict[str, str] = {
"peer_id": peer_id,
"limit": str(limit),
}
# Forward verbatim — the server route validates as RFC3339 at the
# trust boundary and translates into a `created_at < $X` clause.
if before_ts:
params["before_ts"] = before_ts
try:
async with httpx.AsyncClient(timeout=10.0) as client:
resp = await client.get(
f"{PLATFORM_URL}/workspaces/{src}/activity",
params=params,
headers=_auth_headers_for_heartbeat(src),
)
except Exception as exc: # noqa: BLE001
return f"Error: chat_history request failed: {exc}"
if resp.status_code == 400:
# Trust-boundary rejection (malformed peer_id, etc.) — surface
# the server's reason verbatim so the agent can correct itself.
try:
err = resp.json().get("error", "bad request")
except Exception: # noqa: BLE001
err = "bad request"
return f"Error: {err}"
if resp.status_code >= 400:
return f"Error: chat_history returned HTTP {resp.status_code}"
try:
rows = resp.json()
except Exception: # noqa: BLE001
return "Error: chat_history response was not JSON"
if not isinstance(rows, list):
return "Error: chat_history response was not a list"
# Server returns DESC (most recent first); reverse to chronological
# so the agent reads the conversation top-down like a chat log.
rows.reverse()
return json.dumps(rows)

View File

@ -453,14 +453,14 @@ class TestToolSendMessageToUser:
async def test_success_200_returns_sent_message(self):
import a2a_tools
mc = _make_http_mock(post_resp=_resp(200, {}))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_send_message_to_user("Hello user!")
assert result == "Message sent to user"
async def test_non_200_returns_status_code_in_error(self):
import a2a_tools
mc = _make_http_mock(post_resp=_resp(503, {}))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_send_message_to_user("Hello user!")
assert "503" in result
assert "Error" in result
@ -468,7 +468,7 @@ class TestToolSendMessageToUser:
async def test_exception_returns_error_message(self):
import a2a_tools
mc = _make_http_mock(post_exc=RuntimeError("platform unreachable"))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_send_message_to_user("Hi!")
assert "Error sending message" in result
assert "platform unreachable" in result
@ -495,7 +495,7 @@ class TestToolSendMessageToUser:
mc = _make_http_mock(post_resp=notify_resp)
mc.post = AsyncMock(side_effect=[upload_resp, notify_resp])
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_send_message_to_user(
"Done — see attached.",
attachments=[str(f)],
@ -523,7 +523,7 @@ class TestToolSendMessageToUser:
# with a half-rendered attachment chip.
import a2a_tools
mc = _make_http_mock()
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_send_message_to_user(
"Hi", attachments=["/no/such/file.zip"],
)
@ -541,7 +541,7 @@ class TestToolSendMessageToUser:
mc = _make_http_mock()
mc.post = AsyncMock(return_value=upload_resp)
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_send_message_to_user(
"Hi", attachments=[str(f)],
)
@ -555,7 +555,7 @@ class TestToolSendMessageToUser:
# an `attachments` field added to the notify body.
import a2a_tools
mc = _make_http_mock(post_resp=_resp(200, {}))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
await a2a_tools.tool_send_message_to_user("plain text")
body = mc.post.await_args.kwargs.get("json") or {}
assert body == {"message": "plain text"}
@ -570,7 +570,7 @@ class TestToolListPeers:
async def test_true_empty_returns_no_peers_message_without_diagnostic(self):
"""200 + empty list → 'no peers in the platform registry' (no failure)."""
import a2a_tools
with patch("a2a_tools.get_peers_with_diagnostic", return_value=([], None)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=([], None)):
result = await a2a_tools.tool_list_peers()
# The new wording explicitly says no peers exist (no parent/sibling/child).
# Avoids the misleading "may be isolated" hint when discovery succeeded.
@ -582,7 +582,7 @@ class TestToolListPeers:
"""401/403 → tool_list_peers must surface the auth failure + restart hint, not 'isolated'."""
import a2a_tools
diag = "Authentication to platform failed (HTTP 401). Restart the workspace to re-mint."
with patch("a2a_tools.get_peers_with_diagnostic", return_value=([], diag)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=([], diag)):
result = await a2a_tools.tool_list_peers()
assert "401" in result
assert "Authentication" in result
@ -593,7 +593,7 @@ class TestToolListPeers:
"""404 → tool_list_peers tells the user re-registration is needed."""
import a2a_tools
diag = "Workspace ID ws-test is not registered with the platform (HTTP 404). Re-register."
with patch("a2a_tools.get_peers_with_diagnostic", return_value=([], diag)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=([], diag)):
result = await a2a_tools.tool_list_peers()
assert "404" in result
assert "registered" in result.lower()
@ -602,7 +602,7 @@ class TestToolListPeers:
"""5xx → 'Platform error' surfaced; agent / user can correctly route to oncall."""
import a2a_tools
diag = "Platform error: HTTP 503."
with patch("a2a_tools.get_peers_with_diagnostic", return_value=([], diag)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=([], diag)):
result = await a2a_tools.tool_list_peers()
assert "503" in result
assert "Platform error" in result
@ -611,7 +611,7 @@ class TestToolListPeers:
"""Network error → operator can tell that the workspace can't reach the platform at all."""
import a2a_tools
diag = "Cannot reach platform at http://platform.example: timed out"
with patch("a2a_tools.get_peers_with_diagnostic", return_value=([], diag)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=([], diag)):
result = await a2a_tools.tool_list_peers()
assert "Cannot reach platform" in result
assert "timed out" in result
@ -624,7 +624,7 @@ class TestToolListPeers:
{"id": "ws-1", "name": "Alpha", "status": "online", "role": "worker"},
{"id": "ws-2", "name": "Beta", "status": "idle", "role": "analyst"},
]
with patch("a2a_tools.get_peers_with_diagnostic", return_value=(peers, None)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=(peers, None)):
result = await a2a_tools.tool_list_peers()
assert "Alpha" in result
@ -641,7 +641,7 @@ class TestToolListPeers:
# Clear any prior cache entries for these IDs
a2a_tools._peer_names.pop("ws-cache-test", None)
peers = [{"id": "ws-cache-test", "name": "CacheMe", "status": "online", "role": "w"}]
with patch("a2a_tools.get_peers_with_diagnostic", return_value=(peers, None)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=(peers, None)):
await a2a_tools.tool_list_peers()
assert a2a_tools._peer_names.get("ws-cache-test") == "CacheMe"
@ -651,7 +651,7 @@ class TestToolListPeers:
import a2a_tools
peers = [{"id": "ws-3", "name": "Gamma"}] # no status, no role
with patch("a2a_tools.get_peers_with_diagnostic", return_value=(peers, None)):
with patch("a2a_tools_messaging.get_peers_with_diagnostic", return_value=(peers, None)):
result = await a2a_tools.tool_list_peers()
assert "Gamma" in result
@ -669,7 +669,7 @@ class TestToolGetWorkspaceInfo:
import a2a_tools
info = {"id": "ws-test", "name": "My Workspace", "status": "online"}
with patch("a2a_tools.get_workspace_info", return_value=info):
with patch("a2a_tools_messaging.get_workspace_info", return_value=info):
result = await a2a_tools.tool_get_workspace_info()
parsed = json.loads(result)
@ -678,7 +678,7 @@ class TestToolGetWorkspaceInfo:
async def test_returns_error_dict_as_json(self):
import a2a_tools
with patch("a2a_tools.get_workspace_info", return_value={"error": "not found"}):
with patch("a2a_tools_messaging.get_workspace_info", return_value={"error": "not found"}):
result = await a2a_tools.tool_get_workspace_info()
parsed = json.loads(result)
@ -994,7 +994,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock()
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id="")
mc.get.assert_not_called()
@ -1006,7 +1006,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(200, []))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
await a2a_tools.tool_chat_history(peer_id=_PEER)
url, kwargs = mc.get.call_args.args[0], mc.get.call_args.kwargs
@ -1023,7 +1023,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(200, []))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
await a2a_tools.tool_chat_history(peer_id=_PEER, limit=10000)
params = mc.get.call_args.kwargs["params"]
@ -1035,7 +1035,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(200, []))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
await a2a_tools.tool_chat_history(peer_id=_PEER, limit=0)
assert mc.get.call_args.kwargs["params"]["limit"] == "20"
@ -1044,7 +1044,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(200, []))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
await a2a_tools.tool_chat_history(
peer_id=_PEER, before_ts="2026-05-01T00:00:00Z",
)
@ -1063,7 +1063,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(200, []))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id=_PEER)
# Exact-equality on the JSON literal (per assert-exact memory) —
@ -1084,7 +1084,7 @@ class TestChatHistory:
{"id": "act-1", "created_at": "2026-05-01T00:01:00Z"},
]
mc = _make_http_mock(get_resp=_resp(200, rows))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id=_PEER)
out = json.loads(result)
@ -1097,7 +1097,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(400, {"error": "peer_id must be a UUID"}))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id="bad")
assert "peer_id must be a UUID" in result
@ -1108,7 +1108,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(500, {"error": "internal"}))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id=_PEER)
assert result.startswith("Error:")
@ -1121,7 +1121,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_exc=httpx.ConnectError("network down"))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id=_PEER)
assert result.startswith("Error:")
@ -1135,7 +1135,7 @@ class TestChatHistory:
import a2a_tools
mc = _make_http_mock(get_resp=_resp(200, {"unexpected": "shape"}))
with patch("a2a_tools.httpx.AsyncClient", return_value=mc):
with patch("a2a_tools_messaging.httpx.AsyncClient", return_value=mc):
result = await a2a_tools.tool_chat_history(peer_id=_PEER)
assert result.startswith("Error:")

View File

@ -0,0 +1,92 @@
"""Drift gate + smoke tests for ``a2a_tools_messaging`` (RFC #2873 iter 4d).
The full behavior matrix lives in ``test_a2a_tools_impl.py``
TestToolSendMessageToUser + TestToolListPeers + TestToolGetWorkspaceInfo
+ TestChatHistory all patch ``a2a_tools_messaging.foo`` after the iter
4d retarget.
This file pins:
1. **Drift gate** every previously-public symbol on ``a2a_tools``
is the EXACT same callable / value as ``a2a_tools_messaging.foo``.
Wraps would silently lose existing test coverage; this gate
fails fast on that drift.
2. **Import contract** ``a2a_tools_messaging`` does NOT pull in
``a2a_tools`` at module-load time (the layered architecture: it
depends on ``a2a_tools_rbac`` + ``a2a_client`` + ``platform_auth``,
never the kitchen-sink module).
"""
from __future__ import annotations
import sys
import pytest
@pytest.fixture(autouse=True)
def _require_workspace_id(monkeypatch):
monkeypatch.setenv("WORKSPACE_ID", "00000000-0000-0000-0000-000000000000")
monkeypatch.setenv("PLATFORM_URL", "http://test.invalid")
yield
# ============== Drift gate ==============
class TestBackCompatAliases:
def test_tool_send_message_to_user_alias(self):
import a2a_tools
import a2a_tools_messaging
assert (
a2a_tools.tool_send_message_to_user
is a2a_tools_messaging.tool_send_message_to_user
)
def test_tool_list_peers_alias(self):
import a2a_tools
import a2a_tools_messaging
assert a2a_tools.tool_list_peers is a2a_tools_messaging.tool_list_peers
def test_tool_get_workspace_info_alias(self):
import a2a_tools
import a2a_tools_messaging
assert (
a2a_tools.tool_get_workspace_info
is a2a_tools_messaging.tool_get_workspace_info
)
def test_tool_chat_history_alias(self):
import a2a_tools
import a2a_tools_messaging
assert a2a_tools.tool_chat_history is a2a_tools_messaging.tool_chat_history
def test_upload_chat_files_alias(self):
import a2a_tools
import a2a_tools_messaging
assert a2a_tools._upload_chat_files is a2a_tools_messaging._upload_chat_files
# ============== Import contract ==============
class TestImportContract:
def test_messaging_module_does_not_load_a2a_tools(self, monkeypatch):
"""`a2a_tools_messaging` must depend on `a2a_tools_rbac` (the
layered architecture), `a2a_client`, and `platform_auth` but
NEVER on the kitchen-sink `a2a_tools`. Top-level
`from a2a_tools import ` would re-introduce the circular
dependency that motivated the lazy-import contract for the
delegation module."""
for m in ("a2a_tools", "a2a_tools_messaging"):
sys.modules.pop(m, None)
import a2a_tools_messaging # noqa: F401
assert "a2a_tools_messaging" in sys.modules
def test_a2a_tools_re_exports_messaging_handlers(self):
"""Opposite direction: a2a_tools surfaces every messaging
symbol so existing call sites + tests work unchanged."""
import a2a_tools
assert hasattr(a2a_tools, "tool_send_message_to_user")
assert hasattr(a2a_tools, "tool_list_peers")
assert hasattr(a2a_tools, "tool_get_workspace_info")
assert hasattr(a2a_tools, "tool_chat_history")
assert hasattr(a2a_tools, "_upload_chat_files")