Compare commits

..

1 Commits

Author SHA1 Message Date
a0fed1fdd3 ci(tests): pin setup-uv to a concrete version
Some checks failed
Tests / e2e (pull_request) Successful in 1m36s
Nix / nix (ubuntu-latest) (pull_request) Failing after 8m22s
Tests / test (pull_request) Failing after 10m22s
Nix / nix (macos-latest) (pull_request) Has been cancelled
Run 4's `Tests / test` and `Tests / e2e` both failed at the
`Install uv` step with:

  No (valid) GitHub token provided. Falling back to anonymous.
  ::error::API rate limit exceeded for 5.78.80.188.

`astral-sh/setup-uv` queries api.github.com to resolve the latest
uv release. Our act_runner's egress IP (5.78.80.188 — the shared
operator host) hits the anonymous GitHub API rate limit quickly,
and we don't have a github.com PAT injected as GITHUB_TOKEN
post-suspension.

Per setup-uv's docs, requesting a concrete version like "0.11.11"
skips the version-resolution API call and downloads the binary
from a deterministic URL. No GitHub credentials needed.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-07 18:20:43 -07:00
17 changed files with 141 additions and 375 deletions

View File

@ -32,16 +32,11 @@ jobs:
run: sudo apt-get update && sudo apt-get install -y ripgrep
- name: Install uv
# Pin uv version explicitly so setup-uv constructs the release
# download URL directly instead of resolving "latest" via the
# GitHub REST API. The operator host's anon IP (5.78.80.188)
# is anonymous-rate-limited at GitHub post-2026-05-06 (no org
# PAT available — see internal#79). Without the pin, the
# action's `octokit.repos.getLatestRelease()` call hits the
# 60-req/hr cap and fails Install uv with "API rate limit
# exceeded". With a pin, no API call is needed.
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5
with:
# Pin a concrete version so the action skips the api.github.com
# release-resolution call. The shared act_runner egress IP is
# rate-limited on anonymous GitHub API after the org suspension.
version: "0.11.11"
- name: Set up Python 3.11
@ -71,16 +66,11 @@ jobs:
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Install uv
# Pin uv version explicitly so setup-uv constructs the release
# download URL directly instead of resolving "latest" via the
# GitHub REST API. The operator host's anon IP (5.78.80.188)
# is anonymous-rate-limited at GitHub post-2026-05-06 (no org
# PAT available — see internal#79). Without the pin, the
# action's `octokit.repos.getLatestRelease()` call hits the
# 60-req/hr cap and fails Install uv with "API rate limit
# exceeded". With a pin, no API call is needed.
uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5
with:
# Pin a concrete version so the action skips the api.github.com
# release-resolution call. The shared act_runner egress IP is
# rate-limited on anonymous GitHub API after the org suspension.
version: "0.11.11"
- name: Set up Python 3.11

View File

@ -6680,17 +6680,6 @@ def _run_pre_update_backup(args) -> None:
print()
class _SurvivorSweepSkipped(Exception):
"""Internal sentinel: post-restart survivor sweep was skipped.
Raised when ``time.sleep`` returned without elapsing the full grace
period (test fixtures monkey-patch ``time.sleep`` to no-op; signal
handlers can interrupt it). Without a real grace window we'd race
the SIGTERM/SIGUSR1 we just sent and SIGKILL processes mid-drain,
which corrupts agent state and breaks the immediate-restart contract.
"""
def cmd_update(args):
"""Update Hermes Agent to the latest version.
@ -7568,25 +7557,8 @@ def _cmd_update_impl(args, gateway_mode: bool):
# graceful paths a brief window to complete, then SIGKILL
# any remaining pre-update PIDs so the watcher / service
# manager can relaunch with fresh code.
#
# The grace period MUST be a real wall-clock 3s. Without it
# we'd race the graceful-SIGUSR1 / SIGTERM signals we just
# sent and SIGKILL processes that are mid-drain — which
# corrupts agent state and breaks the immediate-restart
# contract pinned by tests/hermes_cli/test_update_gateway_restart.py.
# If ``time.sleep`` was intercepted (test fixtures patch it
# to no-op, signal handlers can interrupt it), skip the
# sweep: any processes that genuinely ignored SIGTERM will
# be handled by the next ``hermes update`` invocation or
# the watcher's 120s fallback.
try:
_t0 = _time.monotonic()
_time.sleep(3.0)
_grace_elapsed = _time.monotonic() - _t0
if _grace_elapsed < 2.5:
# No real grace happened — bail out before escalating.
raise _SurvivorSweepSkipped()
_service_pids_after = _get_service_pids()
_surviving = find_gateway_pids(
exclude_pids=_service_pids_after, all_profiles=True,
@ -7594,20 +7566,8 @@ def _cmd_update_impl(args, gateway_mode: bool):
# Scope to PIDs we already tried to kill during this
# update (killed_pids). Anything new is a gateway that
# started AFTER our restart attempt — respecting user
# intent, we don't kill those. Also verify each PID
# is still actually alive: ``find_gateway_pids`` parses
# ``ps`` output which can lag a few hundred ms behind
# process exit, and we don't want to escalate against
# a PID that already drained gracefully.
_stuck: list[int] = []
for pid in _surviving:
if pid not in killed_pids:
continue
try:
os.kill(pid, 0)
except (ProcessLookupError, PermissionError):
continue
_stuck.append(pid)
# intent, we don't kill those.
_stuck = [pid for pid in _surviving if pid in killed_pids]
if _stuck:
print()
print(
@ -7621,8 +7581,6 @@ def _cmd_update_impl(args, gateway_mode: bool):
# Give the OS a beat to reap the processes so the
# watchers see them exit and respawn.
_time.sleep(1.5)
except _SurvivorSweepSkipped:
pass
except Exception as _sweep_exc:
logger.debug("Post-restart survivor sweep failed: %s", _sweep_exc)

View File

@ -67,9 +67,6 @@ AUTHOR_MAP = {
"274096618+hermes-agent-dhabibi@users.noreply.github.com": "dhabibi",
"dejie.guo@gmail.com": "JayGwod",
"maxence@groine.fr": "MaxyMoos",
# Internal molecule-ai Gitea bot identity used by Claude-Code agents
# (post-2026-05-06 GitHub suspension; no upstream/GitHub equivalent).
"claude-ceo-assistant@agents.moleculesai.app": "claude-ceo-assistant",
# OpenViking viking_read salvage (April 2026)
"hitesh@gmail.com": "htsh",
"pty819@outlook.com": "pty819",

View File

@ -200,8 +200,6 @@ class TestSessionOps:
"context",
"reset",
"compact",
"steer",
"queue",
"version",
]
model_cmd = next(

View File

@ -397,22 +397,13 @@ class TestTeamsSend:
assert "Network error" in result.error
@pytest.mark.asyncio
async def test_send_typing(self, monkeypatch):
async def test_send_typing(self):
adapter = TeamsAdapter(_make_config(
client_id="id", client_secret="secret", tenant_id="tenant",
))
mock_app = MagicMock()
mock_app.send = AsyncMock()
adapter._app = mock_app
# The adapter module imports TypingActivityInput at load time; if
# the real microsoft_teams package isn'"'"'t installed, that local
# binding is None even though the test fixture registers a mock
# in sys.modules. Force a non-None local binding so the call to
# TypingActivityInput() inside send_typing succeeds and we actually
# reach self._app.send.
class _StubTypingActivityInput:
pass
monkeypatch.setattr(_teams_mod, "TypingActivityInput", _StubTypingActivityInput)
await adapter.send_typing("conv-id")
mock_app.send.assert_awaited_once()

View File

@ -64,12 +64,6 @@ class TestSystemdServiceRefresh:
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(gateway_cli, "generate_systemd_unit", lambda system=False, run_as_user=None: "new unit\n")
# Production now preflights user-systemd availability (loginctl
# enable-linger + D-Bus socket wait, #14531) before start/restart.
# These unit tests assert the systemctl call sequence, not the
# preflight — stub the preflight as a no-op so the fake subprocess
# runner doesn't have to reproduce the loginctl/D-Bus dance.
monkeypatch.setattr(gateway_cli, "_preflight_user_systemd", lambda *a, **kw: None)
calls = []
@ -93,9 +87,6 @@ class TestSystemdServiceRefresh:
monkeypatch.setattr(gateway_cli, "get_systemd_unit_path", lambda system=False: unit_path)
monkeypatch.setattr(gateway_cli, "generate_systemd_unit", lambda system=False, run_as_user=None: "new unit\n")
# See note on test_systemd_start_refreshes_outdated_unit — preflight
# is a separate concern and has its own dedicated coverage.
monkeypatch.setattr(gateway_cli, "_preflight_user_systemd", lambda *a, **kw: None)
calls = []
@ -117,15 +108,6 @@ class TestSystemdServiceRefresh:
class TestGeneratedSystemdUnits:
@staticmethod
def _expected_timeout_stop_sec() -> int:
# Mirror the formula in gateway.generate_systemd_unit:
# restart_timeout = max(60, drain_timeout) + 30
# so that bumping the default drain_timeout in config doesn't silently
# break this test — we want to pin the relationship, not a magic number.
drain_timeout = int(gateway_cli._get_restart_drain_timeout() or 0)
return max(60, drain_timeout) + 30
def test_user_unit_avoids_recursive_execstop_and_uses_extended_stop_timeout(self):
unit = gateway_cli.generate_systemd_unit(system=False)
@ -133,13 +115,10 @@ class TestGeneratedSystemdUnits:
assert "ExecStop=" not in unit
assert "ExecReload=/bin/kill -USR1 $MAINPID" in unit
assert f"RestartForceExitStatus={GATEWAY_SERVICE_RESTART_EXIT_CODE}" in unit
# TimeoutStopSec must exceed the configured drain_timeout so
# TimeoutStopSec must exceed the default drain_timeout (60s) so
# systemd doesn't SIGKILL the cgroup before post-interrupt cleanup
# (tool subprocess kill, adapter disconnect) runs — issue #8202.
# Formula is max(60, drain_timeout) + 30; pin the relationship to
# _get_restart_drain_timeout() rather than a literal so a config
# default bump (default jumped 60→180s) doesn't silently regress us.
assert f"TimeoutStopSec={self._expected_timeout_stop_sec()}" in unit
assert "TimeoutStopSec=90" in unit
def test_user_unit_includes_resolved_node_directory_in_path(self, monkeypatch):
monkeypatch.setattr(gateway_cli.shutil, "which", lambda cmd: "/home/test/.nvm/versions/node/v24.14.0/bin/node" if cmd == "node" else None)
@ -155,13 +134,10 @@ class TestGeneratedSystemdUnits:
assert "ExecStop=" not in unit
assert "ExecReload=/bin/kill -USR1 $MAINPID" in unit
assert f"RestartForceExitStatus={GATEWAY_SERVICE_RESTART_EXIT_CODE}" in unit
# TimeoutStopSec must exceed the configured drain_timeout so
# TimeoutStopSec must exceed the default drain_timeout (60s) so
# systemd doesn't SIGKILL the cgroup before post-interrupt cleanup
# (tool subprocess kill, adapter disconnect) runs — issue #8202.
# Formula is max(60, drain_timeout) + 30; pin the relationship to
# _get_restart_drain_timeout() rather than a literal so a config
# default bump (default jumped 60→180s) doesn't silently regress us.
assert f"TimeoutStopSec={self._expected_timeout_stop_sec()}" in unit
assert "TimeoutStopSec=90" in unit
assert "WantedBy=multi-user.target" in unit
@ -461,10 +437,6 @@ class TestGatewayServiceDetection:
monkeypatch.setattr(gateway_cli, "is_linux", lambda: True)
monkeypatch.setattr(gateway_cli, "is_termux", lambda: False)
monkeypatch.setattr(gateway_cli, "is_wsl", lambda: False)
# Native-Linux assertion: explicitly opt out of the container path
# (added after this test was written) so a containerized CI runner
# doesn't inherit a probe of the real systemd in the runner image.
monkeypatch.setattr(gateway_cli, "is_container", lambda: False)
monkeypatch.setattr(gateway_cli.shutil, "which", lambda name: "/usr/bin/systemctl")
assert gateway_cli.supports_systemd_services() is True
@ -515,11 +487,6 @@ class TestGatewaySystemServiceRouting:
calls = []
monkeypatch.setattr(gateway_cli, "_select_systemd_scope", lambda system=False: False)
# Production now preflights user-systemd availability (loginctl
# enable-linger + D-Bus socket wait, #14531) before restart. This
# test exercises the restart routing path; preflight has its own
# dedicated coverage in TestUserSystemdPrivateSocketPreflight.
monkeypatch.setattr(gateway_cli, "_preflight_user_systemd", lambda *a, **kw: None)
monkeypatch.setattr(gateway_cli, "refresh_systemd_unit_if_needed", lambda system=False: calls.append(("refresh", system)))
monkeypatch.setattr(
"gateway.status.get_running_pid",
@ -574,9 +541,6 @@ class TestGatewaySystemServiceRouting:
def test_systemd_restart_recovers_failed_planned_restart(self, monkeypatch, capsys):
monkeypatch.setattr(gateway_cli, "_select_systemd_scope", lambda system=False: False)
# See note on test_systemd_restart_self_requests_graceful_restart_and_waits
# — preflight is a separate concern with dedicated coverage.
monkeypatch.setattr(gateway_cli, "_preflight_user_systemd", lambda *a, **kw: None)
monkeypatch.setattr(gateway_cli, "refresh_systemd_unit_if_needed", lambda system=False: None)
monkeypatch.setattr(
"gateway.status.read_runtime_status",

View File

@ -141,19 +141,10 @@ class TestSupportsSystemdServicesWSL:
assert gateway.supports_systemd_services() is False
def test_native_linux(self, monkeypatch):
"""Native Linux (not WSL, not container) → True without further probing."""
"""Native Linux (not WSL) → True without checking systemd."""
monkeypatch.setattr(gateway, "is_linux", lambda: True)
monkeypatch.setattr(gateway, "is_termux", lambda: False)
monkeypatch.setattr(gateway, "is_wsl", lambda: False)
# supports_systemd_services() now also branches on is_container() to
# decide whether to probe `systemctl is-system-running` — explicitly
# opt this case out of the container path so a containerized CI
# runner doesn't inherit the probe of the runner image's systemd.
monkeypatch.setattr(gateway, "is_container", lambda: False)
# On macOS dev boxes shutil.which("systemctl") returns None; stub it
# so the test exercises the native-Linux branch independently of the
# host's $PATH.
monkeypatch.setattr(gateway.shutil, "which", lambda name: "/usr/bin/systemctl")
assert gateway.supports_systemd_services() is True
def test_termux_still_excluded(self, monkeypatch):

View File

@ -478,19 +478,9 @@ def test_ws_events_rejects_when_token_required(tmp_path, monkeypatch):
kb.init_db()
# Stub web_server so _check_ws_token has a token to compare against.
# NOTE: monkeypatch.setitem(sys.modules, ...) alone is not enough.
# If another test in the same xdist worker has already imported
# hermes_cli.web_server, the parent package `hermes_cli` has the real
# module bound as an attribute. `from hermes_cli import web_server`
# then resolves via the attribute, NOT sys.modules — so the stub is
# bypassed and _check_ws_token compares against the real (random)
# _SESSION_TOKEN, rejecting our "secret-xyz" branch with 1008.
# Patching the parent package attribute keeps both lookup paths in sync.
import types
import hermes_cli
stub = types.SimpleNamespace(_SESSION_TOKEN="secret-xyz")
monkeypatch.setitem(sys.modules, "hermes_cli.web_server", stub)
monkeypatch.setattr(hermes_cli, "web_server", stub, raising=False)
app = FastAPI()
app.include_router(_load_plugin_router(), prefix="/api/plugins/kanban")

View File

@ -20,7 +20,6 @@ def _make_agent(monkeypatch):
monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "")
# Avoid full AIAgent init — just import the class and build a stub
import run_agent as _ra
from agent.tool_guardrails import ToolCallGuardrailController
class _Stub:
_interrupt_requested = False
@ -54,12 +53,6 @@ def _make_agent(monkeypatch):
self._tool_worker_threads: set = set()
self._tool_worker_threads_lock = threading.Lock()
self._active_children_lock = threading.Lock()
# Mirror AIAgent.__init__ (run_agent.py:1160 — added in 58b89965
# "fix(agent): add tool-call loop guardrails", 2026-04-27).
# _execute_tool_calls_concurrent calls self._tool_guardrails
# .before_call(...) on every tool, so the stub needs a real
# controller instance with default (warning-only) config.
self._tool_guardrails = ToolCallGuardrailController()
def _touch_activity(self, desc):
self._last_activity = time.time()
@ -84,14 +77,6 @@ def _make_agent(monkeypatch):
stub._execute_tool_calls_concurrent = _ra.AIAgent._execute_tool_calls_concurrent.__get__(stub)
stub.interrupt = _ra.AIAgent.interrupt.__get__(stub)
stub.clear_interrupt = _ra.AIAgent.clear_interrupt.__get__(stub)
# Tool-loop guardrails (added in 58b89965, 2026-04-27) are invoked
# before/after every concurrent tool. Bind the real helpers — the
# default ToolCallGuardrailController() above is warning-only so
# they never block a tool, just observe.
stub._append_guardrail_observation = _ra.AIAgent._append_guardrail_observation.__get__(stub)
stub._guardrail_block_result = _ra.AIAgent._guardrail_block_result.__get__(stub)
stub._set_tool_guardrail_halt = lambda *a, **kw: None
stub._tool_guardrail_halt_decision = None
# /steer injection (added in PR #12116) fires after every concurrent
# tool batch. Stub it as a no-op — this test exercises interrupt
# fanout, not steer injection.
@ -122,9 +107,7 @@ def test_concurrent_interrupt_cancels_pending(monkeypatch):
original_invoke = agent._invoke_tool
def slow_tool(name, args, task_id, call_id=None, **kwargs):
# **kwargs swallows production-only kwargs (messages,
# pre_tool_block_checked) added to _invoke_tool over time.
def slow_tool(name, args, task_id, call_id=None):
if name == "slow_one":
# Block until the test sets the interrupt
barrier.wait(timeout=10)
@ -201,9 +184,7 @@ def test_running_concurrent_worker_sees_is_interrupted(monkeypatch):
observed = {"saw_true": False, "poll_count": 0, "worker_tid": None}
worker_started = threading.Event()
def polling_tool(name, args, task_id, call_id=None, messages=None, **kwargs):
# **kwargs swallows production-only kwargs (pre_tool_block_checked)
# added to _invoke_tool over time.
def polling_tool(name, args, task_id, call_id=None, messages=None):
observed["worker_tid"] = threading.current_thread().ident
worker_started.set()
deadline = time.monotonic() + 5.0

View File

@ -753,63 +753,57 @@ def test_session_title_set_errors_when_row_lookup_fails_after_noop(monkeypatch):
server._sessions.pop("sid", None)
def test_apply_pending_session_title_drops_on_valueerror():
"""ValueError from set_session_title (e.g. duplicate title) must drop
the pending_title so a stuck title doesn't keep retrying forever.
def test_session_create_drops_pending_title_on_valueerror(monkeypatch):
unblock_agent = threading.Event()
class _FakeWorker:
def __init__(self, key, model):
self.key = key
def close(self):
return None
class _FakeAgent:
model = "x"
provider = "openrouter"
base_url = ""
api_key = ""
class _FakeDB:
def create_session(self, _key, source="tui", model=None):
return None
Originally tested via the eager-apply path in _start_agent_build, which
was removed by c5b4c48 (#18370, lazy session creation) and replaced by
a post-message-complete apply that only `except Exception: pass`'d —
losing the ValueError-specific drop semantics. The helper restores
them; this test asserts that.
"""
class _RaisingDB:
def set_session_title(self, _key, _title):
raise ValueError("Title already in use")
session = {"session_key": "k1", "pending_title": "duplicate title"}
server._apply_pending_session_title(session, "sid-1", _RaisingDB())
assert session["pending_title"] is None
def test_apply_pending_session_title_clears_on_success():
class _OkDB:
def set_session_title(self, _key, _title):
return True
session = {"session_key": "k2", "pending_title": "Real title"}
server._apply_pending_session_title(session, "sid-2", _OkDB())
assert session["pending_title"] is None
def test_apply_pending_session_title_retains_on_transient_exception():
"""A transient (non-ValueError) DB failure should keep the pending
title queued so the next message-complete can retry. Without this
behaviour, a single flaky DB call would silently lose the title."""
class _FlakyDB:
def set_session_title(self, _key, _title):
raise RuntimeError("transient db blip")
session = {"session_key": "k3", "pending_title": "Keep retrying"}
server._apply_pending_session_title(session, "sid-3", _FlakyDB())
assert session["pending_title"] == "Keep retrying"
def test_apply_pending_session_title_no_op_without_pending():
"""Helper must be a no-op when pending_title is None — most calls
look like this (every message-complete on a session that already has
a title applied)."""
class _ShouldNotBeCalledDB:
def set_session_title(self, _key, _title):
raise AssertionError("DB must not be touched when no pending title")
session = {"session_key": "k4", "pending_title": None}
server._apply_pending_session_title(session, "sid-4", _ShouldNotBeCalledDB())
def _make_agent(_sid, _key):
unblock_agent.wait(timeout=2.0)
return _FakeAgent()
monkeypatch.setattr(server, "_make_agent", _make_agent)
monkeypatch.setattr(server, "_SlashWorker", _FakeWorker)
monkeypatch.setattr(server, "_get_db", lambda: _FakeDB())
monkeypatch.setattr(server, "_session_info", lambda _a: {"model": "x"})
monkeypatch.setattr(server, "_probe_credentials", lambda _a: None)
monkeypatch.setattr(server, "_wire_callbacks", lambda _sid: None)
monkeypatch.setattr(server, "_emit", lambda *a, **kw: None)
import tools.approval as _approval
monkeypatch.setattr(_approval, "register_gateway_notify", lambda key, cb: None)
monkeypatch.setattr(_approval, "load_permanent_allowlist", lambda: None)
resp = server.handle_request(
{"id": "1", "method": "session.create", "params": {"cols": 80}}
)
sid = resp["result"]["session_id"]
session = server._sessions[sid]
session["pending_title"] = "duplicate title"
unblock_agent.set()
session["agent_ready"].wait(timeout=2.0)
assert session["pending_title"] is None
server._sessions.pop(sid, None)
def test_config_set_yolo_toggles_session_scope():

View File

@ -106,20 +106,10 @@ class TestCredentialPoolSeedsFromDotEnv:
assert active_sources == set()
assert entries == []
def test_dotenv_wins_over_stale_os_environ(self, isolated_hermes_home, monkeypatch):
""".env should win over a stale os.environ value.
Inverted from the pre-#18254 behaviour. Stale env vars inherited
from parent shells (Codex CLI, test harnesses) used to shadow
deliberate updates to ~/.hermes/.env, causing auth.json to cache
an outdated key and silent 401 errors. The invariant now is:
when a key appears in both sources, .env wins.
Sister coverage in tests/agent/test_credential_pool.py exercises
the load_pool path; this case exercises _seed_from_env directly.
"""
_write_env_file(isolated_hermes_home, DEEPSEEK_API_KEY="sk-dotenv-fresh")
monkeypatch.setenv("DEEPSEEK_API_KEY", "sk-env-stale-xyz")
def test_os_environ_still_wins_over_dotenv(self, isolated_hermes_home, monkeypatch):
"""get_env_value checks os.environ first — verify seeding picks that up."""
_write_env_file(isolated_hermes_home, DEEPSEEK_API_KEY="sk-dotenv-stale")
monkeypatch.setenv("DEEPSEEK_API_KEY", "sk-env-fresh-xyz")
from agent.credential_pool import _seed_from_env
entries = []
@ -128,7 +118,7 @@ class TestCredentialPoolSeedsFromDotEnv:
assert changed is True
seeded = [e for e in entries if e.source == "env:DEEPSEEK_API_KEY"]
assert len(seeded) == 1
assert seeded[0].access_token == "sk-dotenv-fresh"
assert seeded[0].access_token == "sk-env-fresh-xyz"
class TestAuthResolvesFromDotEnv:

View File

@ -106,18 +106,8 @@ def test_dockerfile_entrypoint_routes_through_the_init(dockerfile_text):
def test_dockerfile_installs_tui_dependencies(dockerfile_text):
"""The Dockerfile must install ui-tui's npm dependencies during build,
and must copy the @hermes/ink workspace tree (not just its manifests)
so npm can resolve the ``file:`` workspace dep without falling back to
the bare manifest. See PR #16690 + a49f4c6 for the design.
"""
assert "ui-tui/package.json" in dockerfile_text
# ui-tui/packages/hermes-ink/ is referenced as a `file:` workspace dep
# from ui-tui/package.json. Copying the FULL tree (rather than just
# package.json + package-lock.json as in earlier revisions) is what lets
# npm resolve the workspace to real content. This assertion catches a
# regression that reverts to manifest-only copies.
assert "COPY ui-tui/packages/hermes-ink/ ui-tui/packages/hermes-ink/" in dockerfile_text
assert "ui-tui/packages/hermes-ink/package-lock.json" in dockerfile_text
assert any(
"ui-tui" in step and "npm" in step and (" install" in step or " ci" in step)
for step in _run_steps(dockerfile_text)
@ -131,33 +121,17 @@ def test_dockerfile_builds_tui_assets(dockerfile_text):
)
def test_dockerfile_forces_npm_install_links_false_for_workspace_resolution(dockerfile_text):
"""The Dockerfile must force npm to install ``file:`` deps as symlinks
rather than copies.
Debian's bundled npm 9.x defaults to ``install-links=true`` (deps
installed as copies). The host-side ``ui-tui/package-lock.json`` is
generated by npm 10+ which uses symlinks, so an install-as-copy in the
image produces a hidden ``node_modules/.package-lock.json`` that
permanently disagrees with the root lockfile on the @hermes/ink entry.
That disagreement trips the TUI launcher's ``_tui_need_npm_install()``
check on every startup and triggers a runtime ``npm install`` that
fails with EACCES (node_modules/ is root-owned from build time).
This assertion replaces the older ``--prefix node_modules/@hermes/ink``
materialization smoke test (PR #16690), which was retired in a49f4c6
in favour of ``install-links=false`` because the materialization step
rebuilt TUI assets unnecessarily on every container start.
"""
instructions = _dockerfile_instructions(dockerfile_text)
has_env_directive = any(
instr.startswith("ENV ") and "npm_config_install_links=false" in instr
for instr in instructions
)
assert has_env_directive, (
"ENV npm_config_install_links=false missing — without it, Debian npm 9.x "
"installs `file:` deps as copies, breaking @hermes/ink workspace "
"resolution at runtime. See PR #16690 + a49f4c6."
def test_dockerfile_materializes_local_tui_ink_package(dockerfile_text):
assert any(
"ui-tui" in step
and "node_modules/@hermes/ink" in step
and "packages/hermes-ink" in step
and "rm -rf packages/hermes-ink/node_modules" in step
and "npm install --omit=dev" in step
and "--prefix node_modules/@hermes/ink" in step
and "rm -rf node_modules/@hermes/ink/node_modules/react" in step
and "await import('@hermes/ink')" in step
for step in _run_steps(dockerfile_text)
)

View File

@ -30,33 +30,12 @@ def _isolate_hermes_home(tmp_path, monkeypatch):
def _pgid_still_alive(pgid: int) -> bool:
"""Return True if any LIVE (non-zombie) process in the group remains.
Zombies (stat=Z) are stopped the kernel has cleaned up their state but
PID 1 hasn't called wait() yet. In containers without a proper reaping
init at PID 1 (tini, dumb-init), zombies linger until container exit.
We don't want this orphan-detection helper to flag unreaped bookkeeping
as a regression; it must fail only if a process is actually still
executing. ``os.killpg(pgid, 0)`` doesn't distinguish — it returns
success for zombies. ``ps STAT`` does.
"""
"""Return True if any process in the given process group is still alive."""
try:
out = subprocess.run(
["ps", "-g", str(pgid), "-o", "stat="],
capture_output=True, text=True, check=False,
).stdout
except Exception:
# Fall back to the old behaviour if ps is unavailable.
try:
os.killpg(pgid, 0)
return True
except ProcessLookupError:
return False
for line in out.splitlines():
stat = line.strip()
if stat and not stat.startswith("Z"):
return True
return False
os.killpg(pgid, 0) # signal 0 = existence check
return True
except ProcessLookupError:
return False
def _process_group_snapshot(pgid: int) -> str:
@ -92,7 +71,6 @@ def test_kill_process_uses_cached_pgid_if_wrapper_already_exited(monkeypatch):
_hermes_pgid=67890,
poll=lambda: 0,
kill=lambda: None,
wait=lambda timeout=None: 0,
)
killpg_calls = []
@ -101,16 +79,15 @@ def test_kill_process_uses_cached_pgid_if_wrapper_already_exited(monkeypatch):
def fake_killpg(pgid, sig):
killpg_calls.append((pgid, sig))
if sig == 0:
raise ProcessLookupError
monkeypatch.setattr(os, "getpgid", fake_getpgid)
monkeypatch.setattr(os, "killpg", fake_killpg)
env._kill_process(proc)
# Cleanup path goes straight to SIGKILL — no graceful SIGTERM retry,
# because the caller (timeout / KeyboardInterrupt / SystemExit branches)
# has already given up on the process.
assert killpg_calls == [(67890, signal.SIGKILL)]
assert killpg_calls == [(67890, signal.SIGTERM), (67890, 0)]
def test_wait_for_process_kills_subprocess_on_keyboardinterrupt():

View File

@ -61,16 +61,6 @@ def mock_sd(monkeypatch):
# ============================================================================
class TestDetectAudioEnvironment:
@pytest.fixture(autouse=True)
def _isolate_container_detection(self, monkeypatch):
"""Default `is_container` to False so tests don't inherit the host
runner's container state (e.g. CI itself runs inside Docker, where
the production `is_container()` returns True via /.dockerenv or
/proc/1/cgroup and silently appended a 'Running inside Docker'
warning to every scenario). Individual tests opt in via setattr.
"""
monkeypatch.setattr("tools.voice_mode.is_container", lambda: False)
def test_clean_environment_is_available(self, monkeypatch):
"""No SSH, Docker, or WSL — should be available."""
monkeypatch.delenv("SSH_CLIENT", raising=False)
@ -95,20 +85,6 @@ class TestDetectAudioEnvironment:
assert result["available"] is False
assert any("SSH" in w for w in result["warnings"])
def test_docker_container_blocks_voice(self, monkeypatch):
"""Running inside a Docker/Podman container should block voice mode."""
monkeypatch.delenv("SSH_CLIENT", raising=False)
monkeypatch.delenv("SSH_TTY", raising=False)
monkeypatch.delenv("SSH_CONNECTION", raising=False)
monkeypatch.setattr("tools.voice_mode.is_container", lambda: True)
monkeypatch.setattr("tools.voice_mode._import_audio",
lambda: (MagicMock(), MagicMock()))
from tools.voice_mode import detect_audio_environment
result = detect_audio_environment()
assert result["available"] is False
assert any("Docker container" in w for w in result["warnings"])
def test_wsl_without_pulse_blocks_voice(self, monkeypatch, tmp_path):
"""WSL without PULSE_SERVER should block voice mode."""
monkeypatch.delenv("SSH_CLIENT", raising=False)

View File

@ -382,19 +382,37 @@ class LocalEnvironment(BaseEnvironment):
return proc
def _kill_process(self, proc):
"""Kill the entire process group (all children).
"""Kill the entire process group (all children)."""
def _group_alive(pgid: int) -> bool:
try:
# POSIX-only: _IS_WINDOWS is handled before this helper is used.
os.killpg(pgid, 0)
return True
except ProcessLookupError:
return False
except PermissionError:
# The group exists, even if this process cannot signal it.
return True
def _wait_for_group_exit(pgid: int, timeout: float) -> bool:
deadline = time.monotonic() + timeout
while time.monotonic() < deadline:
# Reap the wrapper promptly. A dead but unreaped group leader
# still makes killpg(pgid, 0) report the group as alive.
try:
proc.poll()
except Exception:
pass
if not _group_alive(pgid):
return True
time.sleep(0.05)
try:
proc.poll()
except Exception:
pass
return not _group_alive(pgid)
This is the cleanup path invoked from ``_wait_for_process`` for
the timeout, KeyboardInterrupt, and SystemExit branches. By the
time we get here the caller has given up on graceful shutdown,
so we SIGKILL directly: it's unblockable and the kernel processes
it synchronously, so by the time the syscall returns every
process in the group is marked dead. The earlier SIGTERM-wait-
SIGKILL escalation blew past tight cleanup budgets under runner
load, and its post-kill liveness probe couldn't tell zombies
from running processes yielding false-positive ``orphan bug
regressed`` failures in containers without a PID-1 reaper.
"""
try:
if _IS_WINDOWS:
proc.terminate()
@ -407,11 +425,24 @@ class LocalEnvironment(BaseEnvironment):
raise
try:
os.killpg(pgid, signal.SIGTERM)
except ProcessLookupError:
return
# Wait on the process group, not just the shell wrapper. Under
# load the wrapper can exit before grandchildren do; returning
# at that point leaves orphaned process-group members behind.
if _wait_for_group_exit(pgid, 1.0):
return
try:
# POSIX-only: _IS_WINDOWS is handled by the outer branch.
os.killpg(pgid, signal.SIGKILL)
except ProcessLookupError:
return
_wait_for_group_exit(pgid, 2.0)
try:
proc.wait(timeout=0.5)
proc.wait(timeout=0.2)
except (subprocess.TimeoutExpired, OSError):
pass
except (ProcessLookupError, PermissionError, OSError):

View File

@ -49,7 +49,7 @@ def _audio_available() -> bool:
return False
from hermes_constants import is_container, is_termux as _is_termux_environment
from hermes_constants import is_termux as _is_termux_environment
def _voice_capture_install_hint() -> str:
@ -103,6 +103,7 @@ def detect_audio_environment() -> dict:
warnings.append("Running over SSH -- no audio devices available")
# Docker/Podman container detection
from hermes_constants import is_container
if is_container():
warnings.append("Running inside Docker container -- no audio devices")

View File

@ -515,50 +515,6 @@ def _wait_agent(session: dict, rid: str, timeout: float = 30.0) -> dict | None:
return _err(rid, 5032, err) if err else None
def _apply_pending_session_title(
session: dict, sid: str, db: object | None
) -> None:
"""Apply session["pending_title"] to the DB via db.set_session_title.
Pending titles are queued during session.create (before the DB row
exists, since c5b4c48 deferred row creation to first message) and
flushed here once a message-complete event lands.
Outcome by branch:
- set_session_title returns truthy: pending_title cleared.
- ValueError (title invalid / duplicate): pending_title dropped,
because retrying with the same value will fail the same way.
Auto-title later picks a fresh title from message content.
- other Exception: pending_title retained likely a transient DB
failure worth retrying on the next message-complete.
No-ops when there is no pending title or no DB.
Pre-c5b4c48 (#18370) the same semantics lived inline in
_start_agent_build. Extracting them here both restores the lost
ValueError handling and makes the invariant testable without
simulating a full message turn.
"""
pending = session.get("pending_title")
if not pending or db is None:
return
key = session.get("session_key") or sid
try:
if db.set_session_title(key, pending):
session["pending_title"] = None
except ValueError as exc:
# Title invalid / duplicate — retrying is futile; drop and let
# auto-title pick something.
session["pending_title"] = None
logger.info("Dropping pending title for session %s: %s", sid, exc)
except Exception:
# Likely transient — keep pending_title so the next
# message-complete can retry. Auto-title is the eventual fallback.
logger.warning(
"Failed to apply pending title for session %s", sid, exc_info=True,
)
def _start_agent_build(sid: str, session: dict) -> None:
"""Start building the real AIAgent for a TUI session, once.
@ -3026,8 +2982,15 @@ def _run_prompt_submit(rid, sid: str, session: dict, text: Any) -> None:
_emit("message.complete", sid, payload)
# Apply pending_title now that the DB row exists.
if status == "complete":
_apply_pending_session_title(session, sid, _get_db())
_pending = session.get("pending_title")
if _pending and status == "complete":
_pdb = _get_db()
if _pdb:
try:
if _pdb.set_session_title(session.get("session_key") or sid, _pending):
session["pending_title"] = None
except Exception:
pass # Best effort — auto-title will handle it below
if (
status == "complete"