Renames: - platform/ → workspace-server/ (Go module path stays as "platform" for external dep compat — will update after plugin module republish) - workspace-template/ → workspace/ Removed (moved to separate repos or deleted): - PLAN.md — internal roadmap (move to private project board) - HANDOFF.md, AGENTS.md — one-time internal session docs - .claude/ — gitignored entirely (local agent config) - infra/cloudflare-worker/ → Molecule-AI/molecule-tenant-proxy - org-templates/molecule-dev/ → standalone template repo - .mcp-eval/ → molecule-mcp-server repo - test-results/ — ephemeral, gitignored Security scrubbing: - Cloudflare account/zone/KV IDs → placeholders - Real EC2 IPs → <EC2_IP> in all docs - CF token prefix, Neon project ID, Fly app names → redacted - Langfuse dev credentials → parameterized - Personal runner username/machine name → generic Community files: - CONTRIBUTING.md — build, test, branch conventions - CODE_OF_CONDUCT.md — Contributor Covenant 2.1 All Dockerfiles, CI workflows, docker-compose, railway.toml, render.yaml, README, CLAUDE.md updated for new directory names. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
395 lines
12 KiB
Python
395 lines
12 KiB
Python
"""Tests for prompt.py — system prompt construction."""
|
|
|
|
from pathlib import Path
|
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
|
|
import pytest
|
|
|
|
from skill_loader.loader import LoadedSkill, SkillMetadata
|
|
from prompt import build_system_prompt, get_peer_capabilities
|
|
|
|
|
|
def test_build_system_prompt_with_prompt_files(tmp_path):
|
|
"""Prompt files are loaded in order and concatenated."""
|
|
(tmp_path / "SOUL.md").write_text("You are a helpful agent.")
|
|
(tmp_path / "TOOLS.md").write_text("You have these tools.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
prompt_files=["SOUL.md", "TOOLS.md"],
|
|
)
|
|
|
|
assert "You are a helpful agent." in result
|
|
assert "You have these tools." in result
|
|
# SOUL.md should appear before TOOLS.md
|
|
assert result.index("helpful agent") < result.index("these tools")
|
|
|
|
|
|
def test_build_system_prompt_default_fallback(tmp_path):
|
|
"""Without prompt_files, falls back to system-prompt.md."""
|
|
(tmp_path / "system-prompt.md").write_text("Default system prompt content.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
)
|
|
|
|
assert "Default system prompt content." in result
|
|
|
|
|
|
def test_build_system_prompt_auto_includes_memory_snapshot(tmp_path):
|
|
"""Memory snapshot files are auto-included when present."""
|
|
(tmp_path / "system-prompt.md").write_text("Base prompt.")
|
|
(tmp_path / "MEMORY.md").write_text("Known workspace facts.")
|
|
(tmp_path / "USER.md").write_text("User prefers concise answers.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
)
|
|
|
|
assert "Base prompt." in result
|
|
assert "Known workspace facts." in result
|
|
assert "User prefers concise answers." in result
|
|
assert result.index("Base prompt.") < result.index("Known workspace facts.")
|
|
assert result.index("Known workspace facts.") < result.index("User prefers concise answers.")
|
|
|
|
|
|
def test_build_system_prompt_deduplicates_explicit_memory_files(tmp_path):
|
|
"""Explicit snapshot files are not loaded twice."""
|
|
(tmp_path / "system-prompt.md").write_text("Base prompt.")
|
|
(tmp_path / "MEMORY.md").write_text("Known workspace facts.")
|
|
(tmp_path / "USER.md").write_text("User prefers concise answers.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
prompt_files=["system-prompt.md", "MEMORY.md"],
|
|
)
|
|
|
|
assert result.count("Known workspace facts.") == 1
|
|
assert result.count("User prefers concise answers.") == 1
|
|
|
|
|
|
def test_build_system_prompt_missing_file(tmp_path):
|
|
"""Missing prompt files are skipped with a warning (no crash)."""
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
prompt_files=["nonexistent.md"],
|
|
)
|
|
|
|
# Should still contain the delegation failure section
|
|
assert "Handling delegation failures" in result
|
|
|
|
|
|
def test_plugin_rules_injection(tmp_path):
|
|
"""Plugin rules are injected under '## Platform Rules'."""
|
|
(tmp_path / "system-prompt.md").write_text("Base prompt.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
plugin_rules=["Always be concise.", "Never reveal secrets."],
|
|
)
|
|
|
|
assert "## Platform Rules" in result
|
|
assert "Always be concise." in result
|
|
assert "Never reveal secrets." in result
|
|
|
|
|
|
def test_plugin_prompts_injection(tmp_path):
|
|
"""Plugin prompts are injected under '## Platform Guidelines'."""
|
|
(tmp_path / "system-prompt.md").write_text("Base prompt.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
plugin_prompts=["Use markdown formatting."],
|
|
)
|
|
|
|
assert "## Platform Guidelines" in result
|
|
assert "Use markdown formatting." in result
|
|
|
|
|
|
def test_skills_listing(tmp_path):
|
|
"""Loaded skills appear with name, description, and instructions."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
skills = [
|
|
LoadedSkill(
|
|
metadata=SkillMetadata(
|
|
id="seo",
|
|
name="SEO Optimization",
|
|
description="Optimize content for search engines.",
|
|
tags=["seo"],
|
|
examples=["Optimize this blog post"],
|
|
),
|
|
instructions="1. Analyze keywords\n2. Optimize headings",
|
|
),
|
|
LoadedSkill(
|
|
metadata=SkillMetadata(
|
|
id="writing",
|
|
name="Creative Writing",
|
|
description="",
|
|
),
|
|
instructions="Write creatively.",
|
|
),
|
|
]
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=skills,
|
|
peers=[],
|
|
)
|
|
|
|
assert "## Your Skills" in result
|
|
assert "### SEO Optimization" in result
|
|
assert "Optimize content for search engines." in result
|
|
assert "1. Analyze keywords" in result
|
|
assert "### Creative Writing" in result
|
|
assert "Write creatively." in result
|
|
|
|
|
|
def test_peer_capabilities_format(tmp_path):
|
|
"""Peers appear with name, id, status, and skills."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
peers = [
|
|
{
|
|
"id": "peer-1",
|
|
"name": "Echo Agent",
|
|
"status": "online",
|
|
"agent_card": {
|
|
"name": "Echo Agent",
|
|
"skills": [
|
|
{"name": "echo", "id": "echo"},
|
|
{"name": "repeat", "id": "repeat"},
|
|
],
|
|
},
|
|
},
|
|
{
|
|
"id": "peer-2",
|
|
"name": "Silent Agent",
|
|
"status": "offline",
|
|
"agent_card": None,
|
|
},
|
|
]
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=peers,
|
|
)
|
|
|
|
assert "## Your Peers" in result
|
|
assert "**Echo Agent** (id: `peer-1`, status: online)" in result
|
|
assert "Skills: echo, repeat" in result
|
|
assert "delegate_to_workspace" in result
|
|
# peer-2 has no agent_card so it's skipped
|
|
assert "Silent Agent" not in result
|
|
|
|
|
|
def test_peer_with_json_string_agent_card(tmp_path):
|
|
"""agent_card as a JSON string is parsed correctly."""
|
|
import json
|
|
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
peers = [
|
|
{
|
|
"id": "peer-3",
|
|
"name": "JSON Peer",
|
|
"status": "online",
|
|
"agent_card": json.dumps({
|
|
"name": "JSON Peer",
|
|
"skills": [{"name": "parse"}],
|
|
}),
|
|
},
|
|
]
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=peers,
|
|
)
|
|
|
|
assert "**JSON Peer** (id: `peer-3`, status: online)" in result
|
|
assert "Skills: parse" in result
|
|
|
|
|
|
def test_delegation_failure_section_always_present(tmp_path):
|
|
"""The delegation failure handling section is always appended."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
)
|
|
|
|
assert "## Handling delegation failures" in result
|
|
assert "Retry transient failures" in result
|
|
|
|
|
|
def test_parent_context_injection(tmp_path):
|
|
"""parent_context creates a '## Parent Context' section with file contents."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
parent_context = [
|
|
{"path": "guidelines.md", "content": "Always use type hints."},
|
|
{"path": "architecture.md", "content": "We use hexagonal architecture."},
|
|
]
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
parent_context=parent_context,
|
|
)
|
|
|
|
assert "## Parent Context" in result
|
|
assert "shared by your parent workspace" in result
|
|
assert "### guidelines.md" in result
|
|
assert "Always use type hints." in result
|
|
assert "### architecture.md" in result
|
|
assert "We use hexagonal architecture." in result
|
|
|
|
|
|
def test_parent_context_empty(tmp_path):
|
|
"""No '## Parent Context' section when parent_context is an empty list."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
parent_context=[],
|
|
)
|
|
|
|
assert "## Parent Context" not in result
|
|
|
|
|
|
def test_parent_context_none(tmp_path):
|
|
"""No '## Parent Context' section when parent_context is None."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
parent_context=None,
|
|
)
|
|
|
|
assert "## Parent Context" not in result
|
|
|
|
|
|
def test_parent_context_skips_empty_content(tmp_path):
|
|
"""Files with empty/whitespace-only content are skipped."""
|
|
(tmp_path / "system-prompt.md").write_text("Base.")
|
|
|
|
parent_context = [
|
|
{"path": "empty.md", "content": ""},
|
|
{"path": "whitespace.md", "content": " \n "},
|
|
{"path": "real.md", "content": "Real content here."},
|
|
]
|
|
|
|
result = build_system_prompt(
|
|
config_path=str(tmp_path),
|
|
workspace_id="ws-1",
|
|
loaded_skills=[],
|
|
peers=[],
|
|
parent_context=parent_context,
|
|
)
|
|
|
|
assert "## Parent Context" in result
|
|
assert "### empty.md" not in result
|
|
assert "### whitespace.md" not in result
|
|
assert "### real.md" in result
|
|
assert "Real content here." in result
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# get_peer_capabilities() tests
|
|
# ---------------------------------------------------------------------------
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_get_peer_capabilities_success():
|
|
"""get_peer_capabilities() returns the list from a 200 response."""
|
|
peers = [
|
|
{"id": "peer-1", "name": "Alpha"},
|
|
{"id": "peer-2", "name": "Beta"},
|
|
]
|
|
|
|
mock_resp = MagicMock()
|
|
mock_resp.status_code = 200
|
|
mock_resp.json.return_value = peers
|
|
|
|
mock_client = AsyncMock()
|
|
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
|
mock_client.__aexit__ = AsyncMock(return_value=False)
|
|
mock_client.get = AsyncMock(return_value=mock_resp)
|
|
|
|
# httpx is imported lazily inside get_peer_capabilities(), so patch at module level
|
|
with patch("httpx.AsyncClient", return_value=mock_client):
|
|
result = await get_peer_capabilities("http://platform:8080", "ws-abc")
|
|
|
|
assert result == peers
|
|
mock_client.get.assert_called_once_with(
|
|
"http://platform:8080/registry/ws-abc/peers",
|
|
headers={"X-Workspace-ID": "ws-abc"},
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_get_peer_capabilities_non_200():
|
|
"""get_peer_capabilities() returns [] when response status is not 200."""
|
|
mock_resp = MagicMock()
|
|
mock_resp.status_code = 404
|
|
|
|
mock_client = AsyncMock()
|
|
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
|
mock_client.__aexit__ = AsyncMock(return_value=False)
|
|
mock_client.get = AsyncMock(return_value=mock_resp)
|
|
|
|
with patch("httpx.AsyncClient", return_value=mock_client):
|
|
result = await get_peer_capabilities("http://platform:8080", "ws-abc")
|
|
|
|
assert result == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_get_peer_capabilities_exception():
|
|
"""get_peer_capabilities() returns [] when httpx raises an exception."""
|
|
mock_client = AsyncMock()
|
|
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
|
mock_client.__aexit__ = AsyncMock(return_value=False)
|
|
mock_client.get = AsyncMock(side_effect=Exception("Network unreachable"))
|
|
|
|
with patch("httpx.AsyncClient", return_value=mock_client):
|
|
result = await get_peer_capabilities("http://platform:8080", "ws-abc")
|
|
|
|
assert result == []
|