Compare commits
13 Commits
main
...
runtime/pl
| Author | SHA1 | Date | |
|---|---|---|---|
| 17f4e86539 | |||
| b1b5c67055 | |||
| de5d8585c7 | |||
| 6958cd7966 | |||
| ba0680d5fb | |||
| d4d3306150 | |||
| a3c9f0b717 | |||
| de9f46ea30 | |||
| 7ff5622a42 | |||
| bea89ce4e9 | |||
| 14f05b5a64 | |||
| 7caee806df | |||
| a914f675a4 |
@ -32,11 +32,9 @@ on:
|
||||
- '.gitea/workflows/publish-workspace-server-image.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
# Serialize per-branch so two rapid staging pushes don't race the same
|
||||
# :staging-latest tag retag. Allow staging and main to run in parallel
|
||||
# (different GITHUB_REF → different concurrency group) since they
|
||||
# produce different :staging-<sha> tags and last-write-wins on
|
||||
# :staging-latest is acceptable across branches.
|
||||
# Serialize per-branch so two rapid main pushes don't race the same
|
||||
# :staging-latest tag retag. Allow parallel runs as they produce
|
||||
# different :staging-<sha> tags and last-write-wins on :staging-latest.
|
||||
#
|
||||
# cancel-in-progress: false → in-flight builds finish; the next push's
|
||||
# build queues. This avoids a partially-pushed image.
|
||||
|
||||
@ -77,6 +77,13 @@ jobs:
|
||||
# works if we never check out PR HEAD. Same SHA the workflow
|
||||
# itself was loaded from.
|
||||
ref: ${{ github.event.pull_request.base.sha }}
|
||||
- name: Install jq
|
||||
# Gitea Actions runners (ubuntu-latest label) do not bundle jq.
|
||||
# The script uses jq extensively for all JSON parsing; install it
|
||||
# before the script runs. Using -qq for quiet output — diagnostic
|
||||
# info is already captured via SOP_DEBUG=1 on failure.
|
||||
run: apt-get update -qq && apt-get install -y -qq jq
|
||||
|
||||
- name: Verify tier label + reviewer team membership
|
||||
env:
|
||||
# SOP_TIER_CHECK_TOKEN is the org-level secret for the
|
||||
|
||||
1
.staging-trigger
Normal file
1
.staging-trigger
Normal file
@ -0,0 +1 @@
|
||||
staging trigger
|
||||
@ -44,3 +44,4 @@
|
||||
{"name": "mock-bigorg", "repo": "molecule-ai/molecule-ai-org-template-mock-bigorg", "ref": "main"}
|
||||
]
|
||||
}
|
||||
// Triggered by Integration Tester at 2026-05-10T08:52Z
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/db"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/envx"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/events"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/models"
|
||||
"github.com/Molecule-AI/molecule-monorepo/platform/internal/provisioner"
|
||||
@ -110,11 +111,14 @@ const maxProxyResponseBody = 10 << 20
|
||||
// a generic 502 page to canvas. 10s is well above realistic intra-region
|
||||
// latencies and well below CF's edge timeout.
|
||||
//
|
||||
// 3. Transport.ResponseHeaderTimeout — 60s. From request-body-end to
|
||||
// response-headers-start. Covers cold-start first-byte (the 30-60s OAuth
|
||||
// flow above), with margin. Body streaming after headers is governed by
|
||||
// the per-request context deadline, NOT this timeout — so multi-minute
|
||||
// agent responses still work fine.
|
||||
// 3. Transport.ResponseHeaderTimeout — 180s default. From request-body-end
|
||||
// to response-headers-start. Configurable via
|
||||
// A2A_PROXY_RESPONSE_HEADER_TIMEOUT (envx.Duration). Covers cold-start
|
||||
// first-byte (30-60s OAuth flow above) with enough room for Opus agent
|
||||
// turns (big context + internal delegate_task round-trips routinely exceed
|
||||
// the old 60s ceiling). Body streaming after headers is governed by the
|
||||
// per-request context deadline, NOT this timeout — so multi-minute agent
|
||||
// responses still work fine.
|
||||
//
|
||||
// The point of (2) and (3) is to surface a *structured* 503 from
|
||||
// handleA2ADispatchError when the workspace agent is unreachable, so canvas
|
||||
@ -127,7 +131,7 @@ var a2aClient = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ResponseHeaderTimeout: 60 * time.Second,
|
||||
ResponseHeaderTimeout: envx.Duration("A2A_PROXY_RESPONSE_HEADER_TIMEOUT", 180*time.Second),
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
// MaxIdleConns / IdleConnTimeout: stdlib defaults are fine; agent
|
||||
// fan-in is bounded by the platform's broadcaster fan-out, not by
|
||||
|
||||
@ -2276,3 +2276,43 @@ func TestProxyA2A_PollMode_FailsClosedToPush(t *testing.T) {
|
||||
t.Errorf("unmet sqlmock expectations: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// ==================== a2aClient ResponseHeaderTimeout config ====================
|
||||
|
||||
func TestA2AClientResponseHeaderTimeout(t *testing.T) {
|
||||
const defaultTimeout = 180 * time.Second
|
||||
|
||||
// Default (unset env) — a2aClient was initialised at package load time.
|
||||
if a2aClient.Transport.(*http.Transport).ResponseHeaderTimeout != defaultTimeout {
|
||||
t.Errorf("a2aClient default ResponseHeaderTimeout = %v, want %v",
|
||||
a2aClient.Transport.(*http.Transport).ResponseHeaderTimeout, defaultTimeout)
|
||||
}
|
||||
|
||||
// Env var override — verify parsing logic inline since a2aClient is
|
||||
// initialised once at package load (env already consumed at import time).
|
||||
t.Run("A2A_PROXY_RESPONSE_HEADER_TIMEOUT parsed correctly", func(t *testing.T) {
|
||||
// We can't re-initialise a2aClient, but we can verify the same
|
||||
// envx.Duration logic inline for the 5m override case.
|
||||
t.Setenv("A2A_PROXY_RESPONSE_HEADER_TIMEOUT", "5m")
|
||||
if d, err := time.ParseDuration("5m"); err == nil && d > 0 {
|
||||
if d != 5*time.Minute {
|
||||
t.Errorf("ParseDuration(\"5m\") = %v, want 5m", d)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid A2A_PROXY_RESPONSE_HEADER_TIMEOUT falls back to default", func(t *testing.T) {
|
||||
t.Setenv("A2A_PROXY_RESPONSE_HEADER_TIMEOUT", "not-a-duration")
|
||||
// Simulate what envx.Duration does with an invalid value.
|
||||
var fallback = 180 * time.Second
|
||||
override := fallback
|
||||
if v := os.Getenv("A2A_PROXY_RESPONSE_HEADER_TIMEOUT"); v != "" {
|
||||
if d, err := time.ParseDuration(v); err == nil && d > 0 {
|
||||
override = d
|
||||
}
|
||||
}
|
||||
if override != fallback {
|
||||
t.Errorf("invalid env var: got %v, want fallback %v", override, fallback)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -25,10 +25,7 @@ _WORKSPACE_ID_raw = os.environ.get("WORKSPACE_ID")
|
||||
if not _WORKSPACE_ID_raw:
|
||||
raise RuntimeError("WORKSPACE_ID environment variable is required but not set")
|
||||
WORKSPACE_ID = _WORKSPACE_ID_raw
|
||||
if os.path.exists("/.dockerenv") or os.environ.get("DOCKER_VERSION"):
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
else:
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://localhost:8080")
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
|
||||
|
||||
async def discover(target_id: str) -> dict | None:
|
||||
|
||||
@ -26,10 +26,7 @@ _WORKSPACE_ID_raw = os.environ.get("WORKSPACE_ID")
|
||||
if not _WORKSPACE_ID_raw:
|
||||
raise RuntimeError("WORKSPACE_ID environment variable is required but not set")
|
||||
WORKSPACE_ID = _WORKSPACE_ID_raw
|
||||
if os.path.exists("/.dockerenv") or os.environ.get("DOCKER_VERSION"):
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
else:
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://localhost:8080")
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
|
||||
# Cache workspace ID → name mappings (populated by list_peers calls)
|
||||
_peer_names: dict[str, str] = {}
|
||||
|
||||
@ -77,6 +77,16 @@ async def delegate_task(workspace_id: str, task: str) -> str:
|
||||
return str(result) if isinstance(result, str) else "(no text)"
|
||||
elif "error" in data:
|
||||
err = data["error"]
|
||||
# Handle both string-form errors ("error": "some string")
|
||||
# and object-form errors ("error": {"message": "...", "code": ...}).
|
||||
msg = ""
|
||||
if isinstance(err, dict):
|
||||
msg = err.get("message", "")
|
||||
elif isinstance(err, str):
|
||||
msg = err
|
||||
else:
|
||||
msg = str(err)
|
||||
return f"Error: {msg}"
|
||||
msg = ""
|
||||
if isinstance(err, dict):
|
||||
msg = err.get("message", "")
|
||||
|
||||
@ -18,10 +18,7 @@ from platform_auth import auth_headers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if os.path.exists("/.dockerenv") or os.environ.get("DOCKER_VERSION"):
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
else:
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://localhost:8080")
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
_WORKSPACE_ID_raw = os.environ.get("WORKSPACE_ID")
|
||||
if not _WORKSPACE_ID_raw:
|
||||
raise RuntimeError("WORKSPACE_ID environment variable is required but not set")
|
||||
|
||||
@ -22,10 +22,7 @@ from policies.routing import build_team_routing_payload
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if os.path.exists("/.dockerenv") or os.environ.get("DOCKER_VERSION"):
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
else:
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://localhost:8080")
|
||||
PLATFORM_URL = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
_WORKSPACE_ID_raw = os.environ.get("WORKSPACE_ID")
|
||||
if not _WORKSPACE_ID_raw:
|
||||
raise RuntimeError("WORKSPACE_ID environment variable is required but not set")
|
||||
|
||||
@ -58,12 +58,7 @@ async def main(): # pragma: no cover
|
||||
if not workspace_id:
|
||||
raise SystemExit("FATAL: WORKSPACE_ID env var is not set. Aborting.")
|
||||
config_path = os.environ.get("WORKSPACE_CONFIG_PATH", "/configs")
|
||||
# Docker-aware default — host.docker.internal resolves the platform service
|
||||
# from inside the Docker network mesh; falls back to localhost for local dev.
|
||||
if os.path.exists("/.dockerenv") or os.environ.get("DOCKER_VERSION"):
|
||||
platform_url = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
else:
|
||||
platform_url = os.environ.get("PLATFORM_URL", "http://localhost:8080")
|
||||
platform_url = os.environ.get("PLATFORM_URL", "http://host.docker.internal:8080")
|
||||
awareness_config = get_awareness_config()
|
||||
|
||||
# 0. Initialise OpenTelemetry (no-op if packages not installed)
|
||||
|
||||
@ -51,6 +51,22 @@ class AdaptorSource:
|
||||
|
||||
def _load_module_from_path(module_name: str, path: Path):
|
||||
"""Import a Python file by absolute path. Returns the module or None on failure."""
|
||||
# Ensure the plugins_registry package and its submodules are importable in the
|
||||
# fresh module namespace created by module_from_spec(). Plugin adapters
|
||||
# (molecule-skill-*/adapters/*.py) use "from plugins_registry.builtins import ..."
|
||||
# which requires plugins_registry and its submodules to already be in sys.modules.
|
||||
# We import and register them before exec_module so the plugin's own
|
||||
# from ... import statements resolve correctly.
|
||||
import sys
|
||||
import plugins_registry
|
||||
sys.modules.setdefault("plugins_registry", plugins_registry)
|
||||
for _sub in ("builtins", "protocol", "raw_drop"):
|
||||
try:
|
||||
sub = importlib.import_module(f"plugins_registry.{_sub}")
|
||||
sys.modules.setdefault(f"plugins_registry.{_sub}", sub)
|
||||
except Exception:
|
||||
# Submodule may not exist in all versions; skip if absent.
|
||||
pass
|
||||
spec = importlib.util.spec_from_file_location(module_name, path)
|
||||
if spec is None or spec.loader is None:
|
||||
return None
|
||||
|
||||
60
workspace/plugins_registry/test_resolve_plugin.py
Normal file
60
workspace/plugins_registry/test_resolve_plugin.py
Normal file
@ -0,0 +1,60 @@
|
||||
"""Tests for _load_module_from_path sys.modules injection fix (issue #296).
|
||||
|
||||
Verifies that plugin adapters using "from plugins_registry.builtins import ..."
|
||||
can be loaded via _load_module_from_path() without ModuleNotFoundError.
|
||||
"""
|
||||
import sys
|
||||
import tempfile
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Ensure the plugins_registry package is importable
|
||||
import plugins_registry
|
||||
|
||||
from plugins_registry import _load_module_from_path
|
||||
|
||||
|
||||
def test_load_adapter_with_plugins_registry_import():
|
||||
"""Plugin adapter using 'from plugins_registry.builtins import ...' loads cleanly."""
|
||||
# Write a temp adapter file that does the exact import from the bug report.
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".py", delete=False, dir=tempfile.gettempdir()
|
||||
) as f:
|
||||
f.write("from plugins_registry.builtins import AgentskillsAdaptor as Adaptor\n")
|
||||
f.write("assert Adaptor is not None\n")
|
||||
adapter_path = Path(f.name)
|
||||
|
||||
try:
|
||||
module = _load_module_from_path("test_adapter", adapter_path)
|
||||
assert module is not None, "module should load without error"
|
||||
assert hasattr(module, "Adaptor"), "module should expose Adaptor"
|
||||
finally:
|
||||
os.unlink(adapter_path)
|
||||
|
||||
|
||||
def test_load_adapter_with_full_plugins_registry_import():
|
||||
"""Plugin adapter using 'from plugins_registry import ...' loads cleanly."""
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w", suffix=".py", delete=False, dir=tempfile.gettempdir()
|
||||
) as f:
|
||||
f.write("from plugins_registry import InstallContext, resolve\n")
|
||||
f.write("from plugins_registry.protocol import PluginAdaptor\n")
|
||||
f.write("assert InstallContext is not None\n")
|
||||
f.write("assert resolve is not None\n")
|
||||
f.write("assert PluginAdaptor is not None\n")
|
||||
adapter_path = Path(f.name)
|
||||
|
||||
try:
|
||||
module = _load_module_from_path("test_adapter_full", adapter_path)
|
||||
assert module is not None, "module should load without error"
|
||||
assert hasattr(module, "InstallContext"), "module should expose InstallContext"
|
||||
assert hasattr(module, "resolve"), "module should expose resolve"
|
||||
assert hasattr(module, "PluginAdaptor"), "module should expose PluginAdaptor"
|
||||
finally:
|
||||
os.unlink(adapter_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_load_adapter_with_plugins_registry_import()
|
||||
test_load_adapter_with_full_plugins_registry_import()
|
||||
print("ALL TESTS PASS")
|
||||
Loading…
Reference in New Issue
Block a user