chore(salvage): strip duplicated/merge-corrupted blocks from PR #17664

Removes drive-by duplication that accumulated during the contributor
branch's multiple rebases. All runtime-benign (dict last-wins,
redefinition last-wins) but left dead source that would confuse
reviewers and maintainers.

Surgical in-place de-duplication (kept PR's intentional additions,
removed only the doubled copy):

* hermes_cli/auth.py: duplicate "gmi" + "azure-foundry" ProviderConfig
* hermes_cli/models.py: duplicate "gmi" entry in _PROVIDER_MODELS
* hermes_cli/config.py: duplicate NOTION/LINEAR/AIRTABLE/TENOR skill env
  block + duplicate get_custom_provider_context_length definition
* hermes_cli/gateway.py: duplicate _setup_yuanbao
* gateway/platforms/base.py: duplicate is_host_excluded_by_no_proxy
* gateway/platforms/telegram.py: duplicate delete_message
* gateway/stream_consumer.py: duplicate _should_send_fresh_final and
  _try_fresh_final
* gateway/run.py: duplicate _parse_reasoning_command_args /
  _resolve_session_reasoning_config / _set_session_reasoning_override,
  duplicate "Drain silently when interrupted" interrupt check
* run_agent.py: duplicate HERMES_AGENT_HELP_GUIDANCE append, duplicate
  codex_message_items capture, duplicate custom_providers resolution
* tools/approval.py: duplicate HARDLINE_PATTERNS section and duplicate
  hardline call in check_dangerous_command
* tools/mcp_tool.py: duplicate _orphan_stdio_pids module-level decl
* cron/scheduler.py: duplicate "not configured/enabled" check — kept
  the new early-rejection, removed the stale late-path copy

Full-file resets to origin/main (all PR additions were duplicates of
content already on main):

* ui-tui/packages/hermes-ink/index.d.ts
* ui-tui/packages/hermes-ink/src/entry-exports.ts
* ui-tui/packages/hermes-ink/src/ink/selection.ts
* ui-tui/src/app/interfaces.ts
* ui-tui/src/app/slash/commands/core.ts
* ui-tui/src/components/thinking.tsx
* ui-tui/src/lib/memoryMonitor.ts
* ui-tui/src/types.ts
* ui-tui/src/types/hermes-ink.d.ts
* tests/hermes_cli/test_doctor.py
* tests/hermes_cli/test_api_key_providers.py
* tests/hermes_cli/test_model_validation.py
* tests/plugins/memory/test_hindsight_provider.py
* tests/run_agent/test_run_agent.py
* tests/gateway/test_email.py
* tests/tools/test_dockerfile_pid1_reaping.py
* hermes_cli/commands.py (slack_native_slashes block — full duplicate)
This commit is contained in:
Teknium 2026-04-29 20:33:29 -07:00
parent 868bc1c242
commit 71c8ca17dc
29 changed files with 12 additions and 928 deletions

View File

@ -456,13 +456,6 @@ def _deliver_result(job: dict, content: str, adapters=None, loop=None) -> Option
)
if not delivered:
pconfig = config.platforms.get(platform)
if not pconfig or not pconfig.enabled:
msg = f"platform '{platform_name}' not configured/enabled"
logger.warning("Job '%s': %s", job["id"], msg)
delivery_errors.append(msg)
continue
# Standalone path: run the async send in a fresh event loop (safe from any thread)
coro = _send_to_platform(platform, pconfig, chat_id, cleaned_delivery_content, thread_id=thread_id, media_files=media_files)
try:

View File

@ -374,39 +374,6 @@ def is_host_excluded_by_no_proxy(hostname: str, no_proxy_value: str | None = Non
return False
def is_host_excluded_by_no_proxy(hostname: str, no_proxy_value: str | None = None) -> bool:
"""Return True when ``hostname`` matches a ``NO_PROXY`` entry.
Supports comma- or whitespace-separated entries with optional leading dots
and ``*.`` wildcards, which match both the apex domain and subdomains.
"""
raw = no_proxy_value
if raw is None:
raw = os.environ.get("NO_PROXY") or os.environ.get("no_proxy") or ""
raw = raw.strip()
if not raw:
return False
lower_hostname = hostname.lower()
for entry in re.split(r"[\s,]+", raw):
normalized = entry.strip().lower()
if not normalized:
continue
if normalized == "*":
return True
if normalized.startswith("*."):
normalized = normalized[2:]
elif normalized.startswith("."):
normalized = normalized[1:]
if lower_hostname == normalized or lower_hostname.endswith(f".{normalized}"):
return True
return False
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path

View File

@ -1318,31 +1318,6 @@ class TelegramAdapter(BasePlatformAdapter):
)
return False
async def delete_message(self, chat_id: str, message_id: str) -> bool:
"""Delete a previously sent Telegram message.
Used by the stream consumer's fresh-final cleanup path (ported
from openclaw/openclaw#72038) to remove long-lived preview
messages after sending the completed reply as a fresh message.
Telegram's Bot API ``deleteMessage`` works for bot-posted
messages in the last 48 hours. Failures are non-fatal the
caller leaves the preview in place and logs at debug level.
"""
if not self._bot:
return False
try:
await self._bot.delete_message(
chat_id=int(chat_id),
message_id=int(message_id),
)
return True
except Exception as e:
logger.debug(
"[%s] Failed to delete Telegram message %s: %s",
self.name, message_id, e,
)
return False
async def send_update_prompt(
self, chat_id: str, prompt: str, default: str = "",
session_key: str = "",

View File

@ -1688,66 +1688,6 @@ class GatewayRunner:
else:
self._session_reasoning_overrides[session_key] = dict(reasoning_config)
@staticmethod
def _parse_reasoning_command_args(raw_args: str) -> tuple[str, bool]:
"""Parse `/reasoning` args into `(value, persist_global)`.
`/reasoning <level>` is session-scoped by default. `--global` may be
supplied in any position to persist the change to config.yaml.
"""
import shlex
text = str(raw_args or "").strip().replace("", "--")
if not text:
return "", False
try:
tokens = shlex.split(text)
except ValueError:
tokens = text.split()
persist_global = False
value_tokens = []
for token in tokens:
if token == "--global":
persist_global = True
else:
value_tokens.append(token)
return " ".join(value_tokens).strip().lower(), persist_global
def _resolve_session_reasoning_config(
self,
*,
source: Optional[SessionSource] = None,
session_key: Optional[str] = None,
) -> dict | None:
"""Resolve reasoning effort for a session, honoring session overrides."""
resolved_session_key = session_key
if not resolved_session_key and source is not None:
try:
resolved_session_key = self._session_key_for_source(source)
except Exception:
resolved_session_key = None
overrides = getattr(self, "_session_reasoning_overrides", {}) or {}
if resolved_session_key and resolved_session_key in overrides:
return overrides[resolved_session_key]
return self._load_reasoning_config()
def _set_session_reasoning_override(
self,
session_key: str,
reasoning_config: Optional[dict],
) -> None:
"""Set or clear the session-scoped reasoning override."""
if not session_key:
return
if not hasattr(self, "_session_reasoning_overrides"):
self._session_reasoning_overrides = {}
if reasoning_config is None:
self._session_reasoning_overrides.pop(session_key, None)
else:
self._session_reasoning_overrides[session_key] = dict(reasoning_config)
@staticmethod
def _load_service_tier() -> str | None:
"""Load Priority Processing setting from config.yaml.
@ -10778,22 +10718,6 @@ class GatewayRunner:
raw = progress_queue.get_nowait()
# Drain silently when interrupted: events queued in the
# window between tool parse and interrupt processing
# should not render as bubbles. The "⚡ Interrupting
# current task" message is sent separately and is the
# last progress-flavored bubble the user should see.
try:
_agent_for_interrupt = agent_holder[0] if agent_holder else None
if _agent_for_interrupt is not None and getattr(
_agent_for_interrupt, "is_interrupted", False
):
# Drop this event and continue draining.
await asyncio.sleep(0)
continue
except Exception:
pass
# Drain silently when interrupted: events queued in the
# window between tool parse and interrupt processing
# should not render as bubbles. The "⚡ Interrupting

View File

@ -854,81 +854,6 @@ class GatewayStreamConsumer:
self._final_response_sent = True
return True
def _should_send_fresh_final(self) -> bool:
"""Return True when a long-lived preview should be replaced with a
fresh final message instead of an edit.
Conditions:
- Fresh-final is enabled (``fresh_final_after_seconds > 0``).
- We have a real preview message id (not the ``__no_edit__`` sentinel
and not ``None``).
- The preview has been visible for at least the configured threshold.
Ported from openclaw/openclaw#72038.
"""
threshold = getattr(self.cfg, "fresh_final_after_seconds", 0.0) or 0.0
if threshold <= 0:
return False
if not self._message_id or self._message_id == "__no_edit__":
return False
if self._message_created_ts is None:
return False
age = time.monotonic() - self._message_created_ts
return age >= threshold
async def _try_fresh_final(self, text: str) -> bool:
"""Send ``text`` as a brand-new message (best-effort delete the old
preview) so the platform's visible timestamp reflects completion
time. Returns True on successful delivery, False on any failure so
the caller falls back to the normal edit path.
Ported from openclaw/openclaw#72038.
"""
old_message_id = self._message_id
try:
result = await self.adapter.send(
chat_id=self.chat_id,
content=text,
metadata=self.metadata,
)
except Exception as e:
logger.debug("Fresh-final send failed, falling back to edit: %s", e)
return False
if not getattr(result, "success", False):
return False
# Successful fresh send — try to delete the stale preview so the
# user doesn't see the old edit-stuck message underneath. Cleanup
# is best-effort; platforms that don't implement ``delete_message``
# just leave the preview behind (still an acceptable outcome —
# the visible final timestamp is the important part).
if old_message_id and old_message_id != "__no_edit__":
delete_fn = getattr(self.adapter, "delete_message", None)
if delete_fn is not None:
try:
await delete_fn(self.chat_id, old_message_id)
except Exception as e:
logger.debug(
"Fresh-final preview cleanup failed (%s): %s",
old_message_id, e,
)
# Adopt the new message id as the current message so subsequent
# callers (e.g. overflow split loops, finalize retries) see a
# consistent state.
new_message_id = getattr(result, "message_id", None)
if new_message_id:
self._message_id = new_message_id
self._message_created_ts = time.monotonic()
else:
# Send succeeded but platform didn't return an id — treat the
# delivery as final-only and fall back to "__no_edit__" so we
# don't try to edit something we can't address.
self._message_id = "__no_edit__"
self._message_created_ts = None
self._already_sent = True
self._last_sent_text = text
self._final_response_sent = True
return True
async def _send_or_edit(self, text: str, *, finalize: bool = False) -> bool:
"""Send or edit the streaming message.

View File

@ -255,14 +255,6 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
api_key_env_vars=("GMI_API_KEY",),
base_url_env_var="GMI_BASE_URL",
),
"gmi": ProviderConfig(
id="gmi",
name="GMI Cloud",
auth_type="api_key",
inference_base_url="https://api.gmi-serving.com/v1",
api_key_env_vars=("GMI_API_KEY",),
base_url_env_var="GMI_BASE_URL",
),
"minimax": ProviderConfig(
id="minimax",
name="MiniMax",
@ -422,14 +414,6 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
api_key_env_vars=("AZURE_FOUNDRY_API_KEY",),
base_url_env_var="AZURE_FOUNDRY_BASE_URL",
),
"azure-foundry": ProviderConfig(
id="azure-foundry",
name="Azure Foundry",
auth_type="api_key",
inference_base_url="", # User-provided endpoint
api_key_env_vars=("AZURE_FOUNDRY_API_KEY",),
base_url_env_var="AZURE_FOUNDRY_BASE_URL",
),
}

View File

@ -831,114 +831,6 @@ _SLACK_NAME_LIMIT = 32
_SLACK_INVALID_CHARS = re.compile(r"[^a-z0-9_\-]")
def _sanitize_slack_name(raw: str) -> str:
"""Convert a command name to a valid Slack slash command name.
Slack allows lowercase a-z, digits, hyphens, and underscores. Max 32
chars. Uppercase is lowercased; invalid chars are stripped.
"""
name = raw.lower()
name = _SLACK_INVALID_CHARS.sub("", name)
name = name.strip("-_")
return name[:_SLACK_NAME_LIMIT]
def slack_native_slashes() -> list[tuple[str, str, str]]:
"""Return (slash_name, description, usage_hint) triples for Slack.
Every gateway-available command in ``COMMAND_REGISTRY`` is surfaced as
a standalone Slack slash command (e.g. ``/btw``, ``/stop``, ``/model``),
matching Discord's and Telegram's model where every command is a
first-class slash and not a ``/hermes <verb>`` subcommand.
Both canonical names and aliases are included so users can type any
documented form (e.g. ``/background``, ``/bg``, and ``/btw`` all work).
Plugin-registered slash commands are included too.
Results are clamped to Slack's 50-command limit with duplicate-name
avoidance. ``/hermes`` is always reserved as the first entry so the
legacy ``/hermes <subcommand>`` form keeps working for anything that
gets dropped by the clamp or for free-form questions.
"""
overrides = _resolve_config_gates()
entries: list[tuple[str, str, str]] = []
seen: set[str] = set()
# Reserve /hermes as the catch-all top-level command.
entries.append(("hermes", "Talk to Hermes or run a subcommand", "[subcommand] [args]"))
seen.add("hermes")
def _add(name: str, desc: str, hint: str) -> None:
slack_name = _sanitize_slack_name(name)
if not slack_name or slack_name in seen:
return
if len(entries) >= _SLACK_MAX_SLASH_COMMANDS:
return
# Slack description cap is 2000 chars; keep it short.
entries.append((slack_name, desc[:140], hint[:100]))
seen.add(slack_name)
# First pass: canonical names (so they win slots if we hit the cap).
for cmd in COMMAND_REGISTRY:
if not _is_gateway_available(cmd, overrides):
continue
_add(cmd.name, cmd.description, cmd.args_hint or "")
# Second pass: aliases.
for cmd in COMMAND_REGISTRY:
if not _is_gateway_available(cmd, overrides):
continue
for alias in cmd.aliases:
# Skip aliases that only differ from canonical by case/punctuation
# normalization (already covered by _add dedup).
_add(alias, f"Alias for /{cmd.name}{cmd.description}", cmd.args_hint or "")
# Third pass: plugin commands.
for name, description, args_hint in _iter_plugin_command_entries():
_add(name, description, args_hint or "")
return entries
def slack_app_manifest(request_url: str = "https://hermes-agent.local/slack/commands") -> dict[str, Any]:
"""Generate a Slack app manifest with all gateway commands as slashes.
``request_url`` is required by Slack's manifest schema for every slash
command, but in Socket Mode (which we use) Slack ignores it and routes
the command event through the WebSocket. A placeholder URL is fine.
The returned dict is the ``features.slash_commands`` portion only
callers compose it into a full manifest (or merge into an existing
one). Keeping it narrow avoids coupling us to the rest of the manifest
schema (display_information, oauth_config, settings, etc.) which users
set up once in the Slack UI and rarely change.
"""
slashes = []
for name, desc, usage in slack_native_slashes():
entry = {
"command": f"/{name}",
"description": desc or f"Run /{name}",
"should_escape": False,
"url": request_url,
}
if usage:
entry["usage_hint"] = usage
slashes.append(entry)
return {"features": {"slash_commands": slashes}}
# ---------------------------------------------------------------------------
# Slack native slash commands
# ---------------------------------------------------------------------------
# Slack slash command name constraints: lowercase a-z, 0-9, hyphens,
# underscores. Max 32 chars. Slack app manifest accepts up to 50 slash
# commands per app.
_SLACK_MAX_SLASH_COMMANDS = 50
_SLACK_NAME_LIMIT = 32
_SLACK_INVALID_CHARS = re.compile(r"[^a-z0-9_\-]")
def _sanitize_slack_name(raw: str) -> str:
"""Convert a command name to a valid Slack slash command name.

View File

@ -1808,44 +1808,6 @@ OPTIONAL_ENV_VARS = {
"advanced": True,
},
# ── Bundled skills (opt-in: only needed if the user uses that skill) ──
# These use category="skill" (distinct from "tool") so the sandbox
# env blocklist in tools/environments/local.py does NOT rewrite them —
# skills legitimately need these passed through to curl via
# tools/env_passthrough.py when the user's skill calls out.
"NOTION_API_KEY": {
"description": "Notion integration token (used by the `notion` skill)",
"prompt": "Notion API key",
"url": "https://www.notion.so/my-integrations",
"password": True,
"category": "skill",
"advanced": True,
},
"LINEAR_API_KEY": {
"description": "Linear personal API key (used by the `linear` skill)",
"prompt": "Linear API key",
"url": "https://linear.app/settings/api",
"password": True,
"category": "skill",
"advanced": True,
},
"AIRTABLE_API_KEY": {
"description": "Airtable personal access token (used by the `airtable` skill)",
"prompt": "Airtable API key",
"url": "https://airtable.com/create/tokens",
"password": True,
"category": "skill",
"advanced": True,
},
"TENOR_API_KEY": {
"description": "Tenor API key for GIF search (used by the `gif-search` skill)",
"prompt": "Tenor API key",
"url": "https://developers.google.com/tenor/guides/quickstart",
"password": True,
"category": "skill",
"advanced": True,
},
# ── Honcho ──
"HONCHO_API_KEY": {
"description": "Honcho API key for AI-native persistent memory",
@ -2655,71 +2617,6 @@ def get_custom_provider_context_length(
return None
def get_custom_provider_context_length(
model: str,
base_url: str,
custom_providers: Optional[List[Dict[str, Any]]] = None,
config: Optional[Dict[str, Any]] = None,
) -> Optional[int]:
"""Look up a per-model ``context_length`` override from ``custom_providers``.
Matches any entry whose ``base_url`` equals ``base_url`` (trailing-slash
insensitive) and returns ``custom_providers[i].models.<model>.context_length``
if present and valid. Returns ``None`` when no override applies.
This is the single source of truth for custom-provider context overrides,
used by:
* ``AIAgent.__init__`` (startup resolution)
* ``AIAgent.switch_model`` (mid-session ``/model`` switch)
* ``hermes_cli.model_switch.resolve_display_context_length`` (``/model`` confirmation display)
* ``gateway.run._format_session_info`` (``/info`` display)
* ``agent.model_metadata.get_model_context_length`` (when custom_providers is threaded through)
Before this helper existed, the lookup was duplicated in ``run_agent.py``'s
startup path only; every other path (notably ``/model`` switch) fell back
to the 128K default. See #15779.
"""
if not model or not base_url:
return None
if custom_providers is None:
try:
custom_providers = get_compatible_custom_providers(config)
except Exception:
if config is None:
return None
raw = config.get("custom_providers")
custom_providers = raw if isinstance(raw, list) else []
if not isinstance(custom_providers, list):
return None
target_url = (base_url or "").rstrip("/")
if not target_url:
return None
for entry in custom_providers:
if not isinstance(entry, dict):
continue
entry_url = (entry.get("base_url") or "").rstrip("/")
if not entry_url or entry_url != target_url:
continue
models = entry.get("models")
if not isinstance(models, dict):
continue
model_cfg = models.get(model)
if not isinstance(model_cfg, dict):
continue
raw_ctx = model_cfg.get("context_length")
if raw_ctx is None:
continue
try:
ctx = int(raw_ctx)
except (TypeError, ValueError):
continue
if ctx > 0:
return ctx
return None
def check_config_version() -> Tuple[int, int]:
"""
Check config version.

View File

@ -3295,12 +3295,6 @@ def _setup_yuanbao():
_setup_standard_platform(yuanbao_platform)
def _setup_yuanbao():
"""Configure Yuanbao via the standard platform setup."""
yuanbao_platform = next(p for p in _PLATFORMS if p["key"] == "yuanbao")
_setup_standard_platform(yuanbao_platform)
def _is_service_installed() -> bool:
"""Check if the gateway is installed as a system service."""
if supports_systemd_services():

View File

@ -337,14 +337,6 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"anthropic/claude-sonnet-4.6",
"openai/gpt-5.4",
],
"gmi": [
"zai-org/GLM-5.1-FP8",
"deepseek-ai/DeepSeek-V3.2",
"moonshotai/Kimi-K2.5",
"google/gemini-3.1-flash-lite-preview",
"anthropic/claude-sonnet-4.6",
"openai/gpt-5.4",
],
"opencode-zen": [
"kimi-k2.5",
"gpt-5.4-pro",

View File

@ -1855,16 +1855,6 @@ class AIAgent:
if not isinstance(_custom_providers, list):
_custom_providers = []
# Resolve custom_providers list once for reuse below (startup
# context-length override and plugin context-engine init).
try:
from hermes_cli.config import get_compatible_custom_providers
_custom_providers = get_compatible_custom_providers(_agent_cfg)
except Exception:
_custom_providers = _agent_cfg.get("custom_providers")
if not isinstance(_custom_providers, list):
_custom_providers = []
# Check custom_providers per-model context_length
if _config_context_length is None and _custom_providers:
try:
@ -4788,9 +4778,6 @@ class AIAgent:
# Pointer to the hermes-agent skill + docs for user questions about Hermes itself.
prompt_parts.append(HERMES_AGENT_HELP_GUIDANCE)
# Pointer to the hermes-agent skill + docs for user questions about Hermes itself.
prompt_parts.append(HERMES_AGENT_HELP_GUIDANCE)
# Tool-aware behavioral guidance: only inject when the tools are loaded
tool_guidance = []
if "memory" in self.valid_tool_names:
@ -8586,13 +8573,6 @@ class AIAgent:
if codex_message_items:
msg["codex_message_items"] = codex_message_items
# Codex Responses API: preserve exact assistant message items (with
# id/phase) so follow-up turns can replay structured items instead of
# flattening to plain text. This is required for prefix cache hits.
codex_message_items = getattr(assistant_message, "codex_message_items", None)
if codex_message_items:
msg["codex_message_items"] = codex_message_items
if assistant_message.tool_calls:
tool_calls = []
for tool_call in assistant_message.tool_calls:

View File

@ -235,17 +235,6 @@ class TestExtractAttachments(unittest.TestCase):
mock_cache.assert_called_once()
class TestCronDelivery(unittest.TestCase):
"""Verify email in cron scheduler platform_map."""
def test_email_resolves_for_cron(self):
"""Email platform resolves via Platform() for cron delivery."""
from gateway.config import Platform
p = Platform("email")
self.assertEqual(p, Platform.EMAIL)
self.assertEqual(p.value, "email")
class TestDispatchMessage(unittest.TestCase):
"""Test email message dispatch logic."""

View File

@ -552,19 +552,6 @@ class TestResolveApiKeyProviderCredentials:
creds = resolve_api_key_provider_credentials("gmi")
assert creds["base_url"] == "https://custom.gmi.example/v1"
def test_resolve_gmi_with_key(self, monkeypatch):
monkeypatch.setenv("GMI_API_KEY", "gmi-secret-key")
creds = resolve_api_key_provider_credentials("gmi")
assert creds["provider"] == "gmi"
assert creds["api_key"] == "gmi-secret-key"
assert creds["base_url"] == "https://api.gmi-serving.com/v1"
def test_resolve_gmi_custom_base_url(self, monkeypatch):
monkeypatch.setenv("GMI_API_KEY", "gmi-key")
monkeypatch.setenv("GMI_BASE_URL", "https://custom.gmi.example/v1")
creds = resolve_api_key_provider_credentials("gmi")
assert creds["base_url"] == "https://custom.gmi.example/v1"
def test_resolve_kilocode_custom_base_url(self, monkeypatch):
monkeypatch.setenv("KILOCODE_API_KEY", "kilo-key")
monkeypatch.setenv("KILOCODE_BASE_URL", "https://custom.kilo.example/v1")

View File

@ -430,43 +430,6 @@ def test_run_doctor_accepts_hermes_provider_ids_that_catalog_aliases(
)
def test_run_doctor_accepts_bare_custom_provider(monkeypatch, tmp_path):
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)
(home / "config.yaml").write_text(
"model:\n"
" provider: custom\n"
" default: local-model\n"
" base_url: http://localhost:8000/v1\n",
encoding="utf-8",
)
monkeypatch.setattr(doctor_mod, "HERMES_HOME", home)
monkeypatch.setattr(doctor_mod, "PROJECT_ROOT", tmp_path / "project")
monkeypatch.setattr(doctor_mod, "_DHH", str(home))
(tmp_path / "project").mkdir(exist_ok=True)
fake_model_tools = types.SimpleNamespace(
check_tool_availability=lambda *a, **kw: ([], []),
TOOLSET_REQUIREMENTS={},
)
monkeypatch.setitem(sys.modules, "model_tools", fake_model_tools)
try:
from hermes_cli import auth as _auth_mod
monkeypatch.setattr(_auth_mod, "get_nous_auth_status", lambda: {})
monkeypatch.setattr(_auth_mod, "get_codex_auth_status", lambda: {})
except Exception:
pass
buf = io.StringIO()
with contextlib.redirect_stdout(buf):
doctor_mod.run_doctor(Namespace(fix=False))
out = buf.getvalue()
assert "model.provider 'custom' is not a recognised provider" not in out
def test_run_doctor_termux_does_not_mark_browser_available_without_agent_browser(monkeypatch, tmp_path):
home = tmp_path / ".hermes"
home.mkdir(parents=True, exist_ok=True)

View File

@ -479,69 +479,6 @@ class TestAzureFoundryModelApiMode:
assert azure_foundry_model_api_mode("Codex-Mini") == "codex_responses"
class TestAzureFoundryModelApiMode:
"""Azure Foundry deploys GPT-5.x / codex / o-series as Responses-API-only.
Azure returns ``400 "The requested operation is unsupported."`` when
/chat/completions is called against these deployments. Verified in the
wild by a user debug bundle on 2026-04-26: gpt-5.3-codex failed with
that exact payload while gpt-4o-pure worked on the same endpoint.
"""
def test_gpt5_family_uses_responses(self):
assert azure_foundry_model_api_mode("gpt-5") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5.3") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5.4") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5-codex") == "codex_responses"
assert azure_foundry_model_api_mode("gpt-5.3-codex") == "codex_responses"
# gpt-5-mini exceptions are Copilot-specific; Azure deploys the whole
# gpt-5 family on Responses API uniformly.
assert azure_foundry_model_api_mode("gpt-5-mini") == "codex_responses"
def test_codex_family_uses_responses(self):
assert azure_foundry_model_api_mode("codex") == "codex_responses"
assert azure_foundry_model_api_mode("codex-mini") == "codex_responses"
def test_o_series_reasoning_uses_responses(self):
assert azure_foundry_model_api_mode("o1") == "codex_responses"
assert azure_foundry_model_api_mode("o1-preview") == "codex_responses"
assert azure_foundry_model_api_mode("o1-mini") == "codex_responses"
assert azure_foundry_model_api_mode("o3") == "codex_responses"
assert azure_foundry_model_api_mode("o3-mini") == "codex_responses"
assert azure_foundry_model_api_mode("o4-mini") == "codex_responses"
def test_gpt4_family_returns_none(self):
"""GPT-4, GPT-4o, etc. speak chat completions on Azure."""
assert azure_foundry_model_api_mode("gpt-4") is None
assert azure_foundry_model_api_mode("gpt-4o") is None
assert azure_foundry_model_api_mode("gpt-4o-pure") is None
assert azure_foundry_model_api_mode("gpt-4o-mini") is None
assert azure_foundry_model_api_mode("gpt-4-turbo") is None
assert azure_foundry_model_api_mode("gpt-4.1") is None
assert azure_foundry_model_api_mode("gpt-3.5-turbo") is None
def test_non_openai_deployments_return_none(self):
"""Llama, Mistral, Grok, etc. keep the default chat completions."""
assert azure_foundry_model_api_mode("llama-3.1-70b") is None
assert azure_foundry_model_api_mode("mistral-large") is None
assert azure_foundry_model_api_mode("grok-4") is None
assert azure_foundry_model_api_mode("phi-3-medium") is None
def test_vendor_prefix_stripped(self):
"""Users who copy-paste ``openai/gpt-5.3-codex`` should still match."""
assert azure_foundry_model_api_mode("openai/gpt-5.3-codex") == "codex_responses"
assert azure_foundry_model_api_mode("openai/gpt-4o") is None
def test_empty_and_none_return_none(self):
assert azure_foundry_model_api_mode(None) is None
assert azure_foundry_model_api_mode("") is None
assert azure_foundry_model_api_mode(" ") is None
def test_case_insensitive(self):
assert azure_foundry_model_api_mode("GPT-5.3-Codex") == "codex_responses"
assert azure_foundry_model_api_mode("Codex-Mini") == "codex_responses"
# -- validate — format checks -----------------------------------------------
class TestValidateFormatChecks:

View File

@ -569,28 +569,6 @@ class TestToolHandlers:
first_client.arecall.assert_called_once()
second_client.arecall.assert_called_once()
def test_local_embedded_recall_reconnects_after_idle_shutdown(self, provider, monkeypatch):
first_client = _make_mock_client()
first_client.arecall.side_effect = RuntimeError("Cannot connect to host 127.0.0.1:8888")
second_client = _make_mock_client()
second_client.arecall.return_value = SimpleNamespace(
results=[SimpleNamespace(text="Recovered memory")]
)
clients = iter([first_client, second_client])
provider._mode = "local_embedded"
provider._client = first_client
monkeypatch.setattr(provider, "_get_client", lambda: next(clients))
result = json.loads(provider.handle_tool_call(
"hindsight_recall", {"query": "test"}
))
assert result["result"] == "1. Recovered memory"
assert provider._client is second_client
first_client.arecall.assert_called_once()
second_client.arecall.assert_called_once()
# ---------------------------------------------------------------------------
# Prefetch tests

View File

@ -1535,24 +1535,6 @@ class TestBuildAssistantMessage:
assert "<memory-context>" in result["content"]
assert "Visible answer" in result["content"]
def test_memory_context_in_stored_content_is_preserved(self, agent):
"""`_build_assistant_message` must not silently mutate model output
containing literal <memory-context> markers that's legitimate text
(e.g. documentation, code) that the model may emit. Streaming-path
leak prevention is handled by StreamingContextScrubber upstream."""
original = (
"<memory-context>\n"
"[System note: The following is recalled memory context, NOT new user input. Treat as informational background data.]\n\n"
"## Honcho Context\n"
"stale memory\n"
"</memory-context>\n\n"
"Visible answer"
)
msg = _mock_assistant_msg(content=original)
result = agent._build_assistant_message(msg, "stop")
assert "<memory-context>" in result["content"]
assert "Visible answer" in result["content"]
def test_unterminated_think_block_stripped(self, agent):
"""Unterminated <think> block (MiniMax / NIM dropped close tag) is
fully stripped from stored content."""

View File

@ -57,32 +57,6 @@ def _run_steps(dockerfile_text: str) -> list[str]:
]
def _dockerfile_instructions(dockerfile_text: str) -> list[str]:
instructions: list[str] = []
current = ""
for raw_line in dockerfile_text.splitlines():
line = raw_line.strip()
if not line or line.startswith("#"):
continue
continued = line.removesuffix("\\").strip()
current = f"{current} {continued}".strip()
if not line.endswith("\\"):
instructions.append(current)
current = ""
return instructions
def _run_steps(dockerfile_text: str) -> list[str]:
return [
instruction
for instruction in _dockerfile_instructions(dockerfile_text)
if instruction.startswith("RUN ")
]
def test_dockerfile_installs_an_init_for_zombie_reaping(dockerfile_text):
"""Some init (tini, dumb-init, catatonit) must be installed.
@ -131,26 +105,6 @@ def test_dockerfile_entrypoint_routes_through_the_init(dockerfile_text):
)
def test_dockerfile_installs_tui_dependencies(dockerfile_text):
assert "ui-tui/package.json" in dockerfile_text
assert "ui-tui/packages/hermes-ink/package-lock.json" in dockerfile_text
assert any(
"ui-tui" in step
and "npm" in step
and (" install" in step or " ci" in step)
for step in _run_steps(dockerfile_text)
)
def test_dockerfile_builds_tui_assets(dockerfile_text):
assert any(
"ui-tui" in step
and "npm" in step
and "run build" in step
for step in _run_steps(dockerfile_text)
)
def test_dockerfile_installs_tui_dependencies(dockerfile_text):
assert "ui-tui/package.json" in dockerfile_text
assert "ui-tui/packages/hermes-ink/package-lock.json" in dockerfile_text

View File

@ -207,101 +207,6 @@ def _hardline_block_result(description: str) -> dict:
}
# =========================================================================
# Hardline (unconditional) blocklist
# =========================================================================
#
# Commands so catastrophic they should NEVER run via the agent, regardless
# of --yolo, /yolo, approvals.mode=off, or cron approve mode. This is a
# floor below yolo: opting into yolo is the user trusting the agent with
# their files and services, not trusting it to wipe the disk or power the
# box off.
#
# Hardline only applies to environments that can actually damage the host
# (local, ssh, container-host cron). Containerized backends (docker,
# singularity, modal, daytona) already bypass the dangerous-command layer
# because nothing they do can touch the host, so we leave that behavior
# alone.
#
# The list is deliberately tiny — only things with no recovery path:
# filesystem destruction rooted at /, raw block device overwrites, kernel
# shutdown/reboot, and denial-of-service commands that take the host down.
# Recoverable-but-costly operations (git reset --hard, rm -rf /tmp/x,
# chmod -R 777, curl|sh) stay in DANGEROUS_PATTERNS where yolo can pass
# them through — that's what yolo is for.
#
# Inspired by Mercury Agent's permission-hardened blocklist
# (https://github.com/cosmicstack-labs/mercury-agent).
# Regex fragment matching the *start* of a command (i.e. positions where
# a shell would begin parsing a new command). Used by shutdown/reboot
# patterns so they don't fire on "echo reboot" or "grep 'shutdown' log".
# Matches: start of string, after command separators (; && || | newline),
# after subshell openers ( `$(` or backtick ), optionally consuming
# leading wrapper commands (sudo, env VAR=VAL, exec, nohup, setsid).
_CMDPOS = (
r'(?:^|[;&|\n`]|\$\()' # start position
r'\s*' # optional whitespace
r'(?:sudo\s+(?:-[^\s]+\s+)*)?' # optional sudo with flags
r'(?:env\s+(?:\w+=\S*\s+)*)?' # optional env with VAR=VAL pairs
r'(?:(?:exec|nohup|setsid|time)\s+)*' # optional wrapper commands
r'\s*'
)
HARDLINE_PATTERNS = [
# rm recursive targeting the root filesystem or protected roots
(r'\brm\s+(-[^\s]*\s+)*(/|/\*|/ \*)(\s|$)', "recursive delete of root filesystem"),
(r'\brm\s+(-[^\s]*\s+)*(/home|/home/\*|/root|/root/\*|/etc|/etc/\*|/usr|/usr/\*|/var|/var/\*|/bin|/bin/\*|/sbin|/sbin/\*|/boot|/boot/\*|/lib|/lib/\*)(\s|$)', "recursive delete of system directory"),
(r'\brm\s+(-[^\s]*\s+)*(~|\$HOME)(/?|/\*)?(\s|$)', "recursive delete of home directory"),
# Filesystem format
(r'\bmkfs(\.[a-z0-9]+)?\b', "format filesystem (mkfs)"),
# Raw block device overwrites (dd + redirection)
(r'\bdd\b[^\n]*\bof=/dev/(sd|nvme|hd|mmcblk|vd|xvd)[a-z0-9]*', "dd to raw block device"),
(r'>\s*/dev/(sd|nvme|hd|mmcblk|vd|xvd)[a-z0-9]*\b', "redirect to raw block device"),
# Fork bomb (classic shell form)
(r':\(\)\s*\{\s*:\s*\|\s*:\s*&\s*\}\s*;\s*:', "fork bomb"),
# Kill every process on the system
(r'\bkill\s+(-[^\s]+\s+)*-1\b', "kill all processes"),
# System shutdown / reboot — anchor to command position (start of line,
# after a command separator, or after sudo/env wrappers) so we don't
# false-positive on "echo reboot" or "grep 'shutdown' logs".
# _CMDPOS matches start-of-command positions.
(_CMDPOS + r'(shutdown|reboot|halt|poweroff)\b', "system shutdown/reboot"),
(_CMDPOS + r'init\s+[06]\b', "init 0/6 (shutdown/reboot)"),
(_CMDPOS + r'systemctl\s+(poweroff|reboot|halt|kexec)\b', "systemctl poweroff/reboot"),
(_CMDPOS + r'telinit\s+[06]\b', "telinit 0/6 (shutdown/reboot)"),
]
def detect_hardline_command(command: str) -> tuple:
"""Check if a command matches the unconditional hardline blocklist.
Returns:
(is_hardline, description) or (False, None)
"""
normalized = _normalize_command_for_detection(command).lower()
for pattern, description in HARDLINE_PATTERNS:
if re.search(pattern, normalized, re.IGNORECASE | re.DOTALL):
return (True, description)
return (False, None)
def _hardline_block_result(description: str) -> dict:
"""Build the standard block result for a hardline match."""
return {
"approved": False,
"hardline": True,
"message": (
f"BLOCKED (hardline): {description}. "
"This command is on the unconditional blocklist and cannot "
"be executed via the agent — not even with --yolo, /yolo, "
"approvals.mode=off, or cron approve mode. If you genuinely "
"need to run it, run it yourself in a terminal outside the "
"agent."
),
}
# =========================================================================
# Dangerous command patterns
# =========================================================================
@ -890,16 +795,6 @@ def check_dangerous_command(command: str, env_type: str,
logger.warning("Hardline block: %s (command: %s)", hardline_desc, command[:200])
return _hardline_block_result(hardline_desc)
# Hardline floor: commands with no recovery path (rm -rf /, mkfs, dd
# to raw device, shutdown/reboot, fork bomb, kill -1) are blocked
# unconditionally, BEFORE the yolo bypass. Opting into yolo is
# trusting the agent with your files and services, not trusting it
# to wipe the disk or power the box off.
is_hardline, hardline_desc = detect_hardline_command(command)
if is_hardline:
logger.warning("Hardline block: %s (command: %s)", hardline_desc, command[:200])
return _hardline_block_result(hardline_desc)
# --yolo: bypass all approval prompts. Gateway /yolo is session-scoped;
# CLI --yolo remains process-scoped via the env var for local use.
if os.getenv("HERMES_YOLO_MODE") or is_current_session_yolo_enabled():

View File

@ -1794,13 +1794,6 @@ _stdio_pids: Dict[int, str] = {} # pid -> server_name
# sessions (e.g. concurrent cron jobs or live user chats).
_orphan_stdio_pids: set = set()
# PIDs that survived their session context exit (SDK teardown failed to
# terminate them). These are detected in _run_stdio's finally block and
# can be cleaned up asynchronously by _kill_orphaned_mcp_children().
# Separate from _stdio_pids so cleanup sweeps never race with active
# sessions (e.g. concurrent cron jobs or live user chats).
_orphan_stdio_pids: set = set()
def _snapshot_child_pids() -> set:
"""Return a set of current child process PIDs.

View File

@ -30,7 +30,7 @@ export { useTerminalFocus } from './src/ink/hooks/use-terminal-focus.ts'
export { useTerminalTitle } from './src/ink/hooks/use-terminal-title.ts'
export { useTerminalViewport } from './src/ink/hooks/use-terminal-viewport.ts'
export { default as measureElement } from './src/ink/measure-element.ts'
export { createRoot, forceRedraw, default as render, forceRedraw, renderSync } from './src/ink/root.ts'
export { createRoot, forceRedraw, default as render, renderSync } from './src/ink/root.ts'
export type { Instance, RenderOptions, Root } from './src/ink/root.ts'
export { stringWidth } from './src/ink/stringWidth.ts'
export { default as TextInput, UncontrolledTextInput } from 'ink-text-input'

View File

@ -23,7 +23,7 @@ export { useTerminalTitle } from './ink/hooks/use-terminal-title.js'
export { useTerminalViewport } from './ink/hooks/use-terminal-viewport.js'
export { default as measureElement } from './ink/measure-element.js'
export { scrollFastPathStats, type ScrollFastPathStats } from './ink/render-node-to-output.js'
export { createRoot, forceRedraw, default as render, forceRedraw, renderSync } from './ink/root.js'
export { createRoot, forceRedraw, default as render, renderSync } from './ink/root.js'
export { stringWidth } from './ink/stringWidth.js'
export { isXtermJs } from './ink/terminal.js'
export { default as TextInput, UncontrolledTextInput } from 'ink-text-input'

View File

@ -893,43 +893,6 @@ function selectionContentBounds(
return { first, last }
}
function selectableCell(screen: Screen, row: number, col: number): boolean {
const cell = cellAt(screen, col, row)
return (
screen.noSelect[row * screen.width + col] !== 1 &&
isWrittenCellAt(screen, col, row) &&
!!cell &&
cell.width !== CellWidth.SpacerTail &&
cell.width !== CellWidth.SpacerHead
)
}
function selectionContentBounds(
screen: Screen,
row: number,
start: number,
end: number
): { first: number; last: number } | null {
let first = start
while (first <= end && !selectableCell(screen, row, first)) {
first++
}
if (first > end) {
return null
}
let last = end
while (last >= first && !selectableCell(screen, row, last)) {
last--
}
return { first, last }
}
/** Extract text from one screen row. When the next row is a soft-wrap
* continuation (screen.softWrap[row+1]>0), clamp to that content-end
* column and skip the trailing trim so the word-separator space survives

View File

@ -41,10 +41,10 @@ export interface SelectionApi {
captureScrolledRows: (firstRow: number, lastRow: number, side: 'above' | 'below') => void
clearSelection: () => void
copySelection: () => Promise<string>
copySelectionNoClear: () => Promise<string>;
getState: () => unknown;
version: () => number;
shiftAnchor: (dRow: number, minRow: number, maxRow: number) => void;
copySelectionNoClear: () => Promise<string>
getState: () => unknown
version: () => number
shiftAnchor: (dRow: number, minRow: number, maxRow: number) => void
shiftSelection: (dRow: number, minRow: number, maxRow: number) => void
}

View File

@ -437,39 +437,6 @@ export const coreCommands: SlashCommand[] = [
}
},
{
help: 'save the current transcript to JSON',
name: 'save',
run: (_arg, ctx) => {
const hasConversation = ctx.local
.getHistoryItems()
.some(m => m.role === 'user' || m.role === 'assistant' || m.role === 'tool')
if (!hasConversation) {
return ctx.transcript.sys('no conversation yet')
}
if (!ctx.sid) {
return ctx.transcript.sys('no active session — nothing to save')
}
ctx.gateway
.rpc<SessionSaveResponse>('session.save', { session_id: ctx.sid })
.then(
ctx.guarded<SessionSaveResponse>(r => {
const file = r?.file
if (file) {
ctx.transcript.sys(`conversation saved to: ${file}`)
} else {
ctx.transcript.sys('failed to save')
}
})
)
.catch(ctx.guardedErr)
}
},
{
aliases: ['sb'],
help: 'status bar position (on|off|top|bottom)',

View File

@ -904,21 +904,6 @@ export const ToolTrail = memo(function ToolTrail({
)
}
const toolLabel = (group: Group) => {
const { duration, label } = splitToolDuration(String(group.content))
return duration ? (
<>
{label}
<Text color={t.color.statusFg} dim>
{duration}
</Text>
</>
) : (
group.content
)
}
// ── Backstop: floating alerts when every panel is hidden ─────────
//
// Per-section overrides win over the global details_mode (they're computed

View File

@ -1,5 +1,3 @@
import { evictInkCaches } from '@hermes/ink'
import { type HeapDumpResult, performHeapDump } from './memory.js'
export type MemoryLevel = 'critical' | 'high' | 'normal'
@ -73,10 +71,6 @@ export function startMemoryMonitor({
return
}
// Prune Ink content caches before dump/exit — half on 'high' (recoverable),
// full on 'critical' (post-dump RSS reduction, keeps user running).
evictInkCaches(level === 'critical' ? 'all' : 'half')
if (dumped.has(level) || inFlight.has(level)) {
return
}

View File

@ -142,12 +142,12 @@ export interface McpServerStatus {
export interface SessionInfo {
cwd?: string
fast?: boolean;
lazy?: boolean;
mcp_servers?: McpServerStatus[];
model: string;
reasoning_effort?: string;
release_date?: string;
fast?: boolean
lazy?: boolean
mcp_servers?: McpServerStatus[]
model: string
reasoning_effort?: string
release_date?: string
service_tier?: string
skills: Record<string, string[]>
tools: Record<string, string[]>

View File

@ -59,32 +59,6 @@ declare module '@hermes/ink' {
}>
}
export type FrameEvent = {
readonly durationMs: number
readonly phases?: {
readonly renderer: number
readonly diff: number
readonly optimize: number
readonly write: number
readonly patches: number
readonly optimizedPatches: number
readonly writeBytes: number
readonly backpressure: boolean
readonly prevFrameDrainMs: number
readonly yoga: number
readonly commit: number
readonly yogaVisited: number
readonly yogaMeasured: number
readonly yogaCacheHits: number
readonly yogaLive: number
}
readonly flickers: ReadonlyArray<{
readonly desiredHeight: number
readonly availableHeight: number
readonly reason: 'resize' | 'offscreen' | 'clear'
}>
}
export type RenderOptions = {
readonly stdin?: NodeJS.ReadStream
readonly stdout?: NodeJS.WriteStream