Merge pull request #15926 from NousResearch/bb/tui-long-session-perf

perf(tui): stabilize long-session scrolling
This commit is contained in:
brooklyn! 2026-04-26 23:10:08 -05:00 committed by GitHub
commit e63929d4f3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
100 changed files with 6824 additions and 662 deletions

View File

@ -1043,6 +1043,7 @@ def _launch_tui(
)
env.setdefault("HERMES_PYTHON", sys.executable)
env.setdefault("HERMES_CWD", os.getcwd())
env.setdefault("NODE_ENV", "development" if tui_dev else "production")
if model:
env["HERMES_MODEL"] = model
env["HERMES_INFERENCE_MODEL"] = model

View File

@ -2327,16 +2327,14 @@ def _resolve_chat_argv(
from hermes_cli.main import PROJECT_ROOT, _make_tui_argv
argv, cwd = _make_tui_argv(PROJECT_ROOT / "ui-tui", tui_dev=False)
env: Optional[dict] = None
env = os.environ.copy()
env.setdefault("NODE_ENV", "production")
if resume or sidecar_url:
env = os.environ.copy()
if resume:
env["HERMES_TUI_RESUME"] = resume
if resume:
env["HERMES_TUI_RESUME"] = resume
if sidecar_url:
env["HERMES_TUI_SIDECAR_URL"] = sidecar_url
if sidecar_url:
env["HERMES_TUI_SIDECAR_URL"] = sidecar_url
return list(argv), str(cwd) if cwd else None, env

View File

@ -1132,20 +1132,27 @@ class SessionDB:
current = child_id
return session_id
def get_messages_as_conversation(self, session_id: str) -> List[Dict[str, Any]]:
def get_messages_as_conversation(
self, session_id: str, include_ancestors: bool = False
) -> List[Dict[str, Any]]:
"""
Load messages in the OpenAI conversation format (role + content dicts).
Used by the gateway to restore conversation history.
"""
session_ids = [session_id]
if include_ancestors:
session_ids = self._session_lineage_root_to_tip(session_id)
with self._lock:
cursor = self._conn.execute(
placeholders = ",".join("?" for _ in session_ids)
rows = self._conn.execute(
"SELECT role, content, tool_call_id, tool_calls, tool_name, "
"reasoning, reasoning_content, reasoning_details, codex_reasoning_items, "
"codex_message_items "
"FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(session_id,),
)
rows = cursor.fetchall()
f"FROM messages WHERE session_id IN ({placeholders}) ORDER BY timestamp, id",
tuple(session_ids),
).fetchall()
messages = []
for row in rows:
msg = {"role": row["role"], "content": row["content"]}
@ -1185,9 +1192,47 @@ class SessionDB:
except (json.JSONDecodeError, TypeError):
logger.warning("Failed to deserialize codex_message_items, falling back to None")
msg["codex_message_items"] = None
if include_ancestors and self._is_duplicate_replayed_user_message(messages, msg):
continue
messages.append(msg)
return messages
def _session_lineage_root_to_tip(self, session_id: str) -> List[str]:
if not session_id:
return [session_id]
chain = []
current = session_id
seen = set()
with self._lock:
for _ in range(100):
if not current or current in seen:
break
seen.add(current)
chain.append(current)
row = self._conn.execute(
"SELECT parent_session_id FROM sessions WHERE id = ?",
(current,),
).fetchone()
if row is None:
break
current = row["parent_session_id"] if hasattr(row, "keys") else row[0]
return list(reversed(chain)) or [session_id]
@staticmethod
def _is_duplicate_replayed_user_message(messages: List[Dict[str, Any]], msg: Dict[str, Any]) -> bool:
if msg.get("role") != "user":
return False
content = msg.get("content")
if not isinstance(content, str) or not content:
return False
for prev in reversed(messages):
if prev.get("role") == "user" and prev.get("content") == content:
return True
if prev.get("role") == "assistant" and (prev.get("content") or prev.get("tool_calls")):
return False
return False
# =========================================================================
# Search
# =========================================================================

View File

@ -4,7 +4,7 @@ let
src = ../ui-tui;
npmDeps = pkgs.fetchNpmDeps {
inherit src;
hash = "sha256-RU4qSHgJPMyfRSEJDzkG4+MReDZDc6QbTD2wisa5QE0=";
hash = "sha256-Chz+NW9NXqboXHOa6PKwf5bhAkkcFtKNhvKWwg2XSPc=";
};
npm = hermesNpmLib.mkNpmPassthru { folder = "ui-tui"; attr = "tui"; pname = "hermes-tui"; };

614
scripts/profile-tui.py Executable file
View File

@ -0,0 +1,614 @@
#!/usr/bin/env python3
"""Drive the Hermes TUI under HERMES_DEV_PERF and summarize the pipeline.
Usage:
scripts/profile-tui.py [--session SID] [--hold KEY] [--seconds N] [--rate HZ]
Defaults: picks the session with the most messages, holds PageUp for 8s at
~30 Hz (matching xterm key-repeat), summarizes ~/.hermes/perf.log on exit.
The --tui build must exist (run `npm run build` in ui-tui first). This script
launches `node dist/entry.js` directly with HERMES_TUI_RESUME set so it
bypasses the hermes_cli wrapper we want repeatable timing, not the CLI's
session-picker flow.
Environment overrides:
HERMES_PERF_LOG (default ~/.hermes/perf.log)
HERMES_PERF_NODE (default node from $PATH)
HERMES_TUI_DIR (default /home/bb/hermes-agent/ui-tui)
Exit code is 0 if the harness ran and parsed results, 2 if the TUI crashed
or produced no perf data (suggests HERMES_DEV_PERF wiring is broken).
"""
from __future__ import annotations
import argparse
import json
import os
import pty
import select
import signal
import sqlite3
import sys
import time
from pathlib import Path
from typing import Any
DEFAULT_TUI_DIR = Path(os.environ.get("HERMES_TUI_DIR", "/home/bb/hermes-agent/ui-tui"))
DEFAULT_LOG = Path(os.environ.get("HERMES_PERF_LOG", str(Path.home() / ".hermes" / "perf.log")))
DEFAULT_STATE_DB = Path.home() / ".hermes" / "state.db"
# Keystroke escape sequences. Matches what xterm/VT220 send when the
# terminal has bracketed-paste disabled and the key-repeat handler fires.
KEYS = {
"page_up": b"\x1b[5~",
"page_down": b"\x1b[6~",
"wheel_up": b"\x1b[M`!!", # mouse wheel up (SGR-less) — best-effort
"shift_up": b"\x1b[1;2A",
"shift_down": b"\x1b[1;2B",
}
def pick_longest_session(db: Path) -> str:
conn = sqlite3.connect(db)
row = conn.execute(
"SELECT id FROM sessions s ORDER BY "
"(SELECT COUNT(*) FROM messages m WHERE m.session_id = s.id) DESC LIMIT 1"
).fetchone()
if not row:
sys.exit(f"no sessions in {db}")
return row[0]
def drain(fd: int, timeout: float) -> bytes:
"""Read whatever's available from fd within `timeout`, then return."""
chunks = []
end = time.monotonic() + timeout
while time.monotonic() < end:
r, _, _ = select.select([fd], [], [], max(0.0, end - time.monotonic()))
if not r:
break
try:
data = os.read(fd, 4096)
except OSError:
break
if not data:
break
chunks.append(data)
return b"".join(chunks)
def hold_key(fd: int, seq: bytes, seconds: float, rate_hz: int) -> int:
"""Write `seq` to fd at ~rate_hz for `seconds`. Returns keystrokes sent."""
interval = 1.0 / max(1, rate_hz)
end = time.monotonic() + seconds
sent = 0
while time.monotonic() < end:
try:
os.write(fd, seq)
sent += 1
except OSError:
break
# Drain stdout to keep the PTY buffer flowing; ignore content.
drain(fd, 0)
time.sleep(interval)
return sent
def summarize(log: Path, since_ts_ms: int) -> dict[str, Any]:
"""Parse perf.log, keep only events newer than since_ts_ms, return stats."""
react_events: list[dict[str, Any]] = []
frame_events: list[dict[str, Any]] = []
if not log.exists():
return {"error": f"no log at {log}", "react": [], "frame": []}
for line in log.read_text().splitlines():
line = line.strip()
if not line:
continue
try:
row = json.loads(line)
except json.JSONDecodeError:
continue
if int(row.get("ts", 0)) < since_ts_ms:
continue
src = row.get("src")
if src == "react":
react_events.append(row)
elif src == "frame":
frame_events.append(row)
return {
"react": react_events,
"frame": frame_events,
}
def pct(values: list[float], p: float) -> float:
if not values:
return 0.0
s = sorted(values)
idx = min(len(s) - 1, int(len(s) * p))
return s[idx]
def format_report(data: dict[str, Any]) -> str:
react = data.get("react") or []
frames = data.get("frame") or []
out = []
out.append("═══ React Profiler ═══")
if not react:
out.append(" (no react events — HERMES_DEV_PERF wired? threshold too high?)")
else:
by_id: dict[str, list[float]] = {}
for r in react:
by_id.setdefault(r["id"], []).append(r["actualMs"])
out.append(f" {'pane':<14} {'count':>6} {'p50':>8} {'p95':>8} {'p99':>8} {'max':>8}")
for pid, ms in sorted(by_id.items(), key=lambda kv: -pct(kv[1], 0.99)):
out.append(
f" {pid:<14} {len(ms):>6} {pct(ms,0.50):>8.2f} {pct(ms,0.95):>8.2f} "
f"{pct(ms,0.99):>8.2f} {max(ms):>8.2f}"
)
out.append("")
out.append("═══ Ink pipeline ═══")
if not frames:
out.append(" (no frame events — onFrame wiring broken?)")
else:
dur = [f["durationMs"] for f in frames]
phases_present = any(f.get("phases") for f in frames)
out.append(f" frames captured: {len(frames)}")
out.append(
f" durationMs p50={pct(dur,0.50):.2f} p95={pct(dur,0.95):.2f} "
f"p99={pct(dur,0.99):.2f} max={max(dur):.2f}"
)
# Effective FPS during the run: frames / elapsed seconds.
ts = sorted(f["ts"] for f in frames)
if len(ts) >= 2:
elapsed_s = (ts[-1] - ts[0]) / 1000.0
fps = len(frames) / elapsed_s if elapsed_s > 0 else float("inf")
out.append(f" throughput: {len(frames)} frames / {elapsed_s:.2f}s = {fps:.1f} fps")
if phases_present:
fields = ["yoga", "renderer", "diff", "optimize", "write", "commit"]
out.append("")
out.append(f" {'phase':<10} {'p50':>8} {'p95':>8} {'p99':>8} {'max':>8} (ms)")
for field in fields:
vals = [f["phases"][field] for f in frames if f.get("phases")]
if vals:
out.append(
f" {field:<10} {pct(vals,0.50):>8.2f} {pct(vals,0.95):>8.2f} "
f"{pct(vals,0.99):>8.2f} {max(vals):>8.2f}"
)
# Derived: sum of phases vs durationMs (reveals hidden time).
sum_ps = [
sum(f["phases"][k] for k in fields)
for f in frames if f.get("phases")
]
if sum_ps:
dur_match = [f["durationMs"] for f in frames if f.get("phases")]
deltas = [d - s for d, s in zip(dur_match, sum_ps)]
out.append(
f" {'dur-Σphases':<10} {pct(deltas,0.50):>8.2f} {pct(deltas,0.95):>8.2f} "
f"{pct(deltas,0.99):>8.2f} {max(deltas):>8.2f} (unaccounted-for time)"
)
# Yoga counters
visited = [f["phases"]["yogaVisited"] for f in frames if f.get("phases")]
measured = [f["phases"]["yogaMeasured"] for f in frames if f.get("phases")]
cache_hits = [f["phases"]["yogaCacheHits"] for f in frames if f.get("phases")]
live = [f["phases"]["yogaLive"] for f in frames if f.get("phases")]
out.append("")
out.append(" Yoga counters (per frame):")
for name, vals in (
("visited", visited),
("measured", measured),
("cacheHits", cache_hits),
("live", live),
):
if vals:
out.append(f" {name:<11} p50={pct(vals,0.5):.0f} p99={pct(vals,0.99):.0f} max={max(vals)}")
# Patch counts — proxy for "how much changed each frame"
patches = [f["phases"]["patches"] for f in frames if f.get("phases")]
if patches:
out.append(
f" patches p50={pct(patches,0.5):.0f} p99={pct(patches,0.99):.0f} "
f"max={max(patches)} total={sum(patches)}"
)
optimized = [
f["phases"].get("optimizedPatches", 0)
for f in frames if f.get("phases")
]
if any(optimized):
out.append(
f" optimized p50={pct(optimized,0.5):.0f} p99={pct(optimized,0.99):.0f} "
f"max={max(optimized)} total={sum(optimized)}"
f" (ratio: {sum(optimized)/max(1,sum(patches)):.2f})"
)
# Write bytes + drain telemetry — the outer-terminal bottleneck gauge.
bytes_written = [
f["phases"].get("writeBytes", 0)
for f in frames if f.get("phases")
]
if any(bytes_written):
total_b = sum(bytes_written)
kb = total_b / 1024
out.append(
f" writeBytes p50={pct(bytes_written,0.5):.0f}B p99={pct(bytes_written,0.99):.0f}B "
f"max={max(bytes_written)}B total={kb:.1f}KB"
)
drains = [
f["phases"].get("prevFrameDrainMs", 0)
for f in frames if f.get("phases")
]
if any(d > 0 for d in drains):
nonzero = [d for d in drains if d > 0]
out.append(
f" drainMs p50={pct(nonzero,0.5):.2f} p95={pct(nonzero,0.95):.2f} "
f"p99={pct(nonzero,0.99):.2f} max={max(nonzero):.2f} (terminal flush latency)"
)
backpressure = sum(1 for f in frames if f.get("phases", {}).get("backpressure"))
if backpressure:
out.append(
f" backpressure: {backpressure}/{len(frames)} frames "
f"({100*backpressure/len(frames):.0f}%) (Node stdout buffer full — terminal slow)"
)
# Flickers
flicker_frames = [f for f in frames if f.get("flickers")]
if flicker_frames:
out.append("")
out.append(f" ⚠ flickers detected in {len(flicker_frames)} frames")
reasons: dict[str, int] = {}
for f in flicker_frames:
for fl in f["flickers"]:
reasons[fl["reason"]] = reasons.get(fl["reason"], 0) + 1
for reason, n in sorted(reasons.items(), key=lambda kv: -kv[1]):
out.append(f" {reason}: {n}")
return "\n".join(out)
def key_metrics(data: dict[str, Any]) -> dict[str, float]:
"""Flatten the report into a dict of scalar metrics for A/B diffing."""
metrics: dict[str, float] = {}
frames = data.get("frame") or []
react = data.get("react") or []
if frames:
durs = [f["durationMs"] for f in frames]
metrics["frames"] = len(frames)
metrics["dur_p50"] = pct(durs, 0.50)
metrics["dur_p95"] = pct(durs, 0.95)
metrics["dur_p99"] = pct(durs, 0.99)
metrics["dur_max"] = max(durs)
ts = sorted(f["ts"] for f in frames)
if len(ts) >= 2:
elapsed = (ts[-1] - ts[0]) / 1000.0
metrics["fps_throughput"] = len(frames) / elapsed if elapsed > 0 else 0.0
# Interframe gaps distribution — complementary view to throughput:
gaps = [ts[i] - ts[i - 1] for i in range(1, len(ts))]
if gaps:
metrics["gap_p50_ms"] = pct(gaps, 0.50)
metrics["gap_p99_ms"] = pct(gaps, 0.99)
metrics["gaps_under_16ms"] = sum(1 for g in gaps if g < 16)
metrics["gaps_over_200ms"] = sum(1 for g in gaps if g >= 200)
for phase in ("renderer", "yoga", "diff", "write"):
vals = [f["phases"][phase] for f in frames if f.get("phases")]
if vals:
metrics[f"{phase}_p99"] = pct(vals, 0.99)
metrics[f"{phase}_max"] = max(vals)
patches = [f["phases"]["patches"] for f in frames if f.get("phases")]
if patches:
metrics["patches_total"] = sum(patches)
metrics["patches_p99"] = pct(patches, 0.99)
optimized = [
f["phases"].get("optimizedPatches", 0) for f in frames if f.get("phases")
]
if any(optimized):
metrics["optimized_total"] = sum(optimized)
bytes_list = [
f["phases"].get("writeBytes", 0) for f in frames if f.get("phases")
]
if any(bytes_list):
metrics["writeBytes_total"] = sum(bytes_list)
drains = [
f["phases"].get("prevFrameDrainMs", 0)
for f in frames if f.get("phases")
]
drain_nonzero = [d for d in drains if d > 0]
if drain_nonzero:
metrics["drain_p99"] = pct(drain_nonzero, 0.99)
metrics["drain_max"] = max(drain_nonzero)
bp = sum(1 for f in frames if f.get("phases", {}).get("backpressure"))
metrics["backpressure_frames"] = bp
if react:
for pid in set(e["id"] for e in react):
ms = [e["actualMs"] for e in react if e["id"] == pid]
metrics[f"react_{pid}_p99"] = pct(ms, 0.99)
metrics[f"react_{pid}_max"] = max(ms)
return metrics
def format_diff(before: dict[str, float], after: dict[str, float]) -> str:
"""Render a side-by-side A/B comparison table."""
keys = sorted(set(before) | set(after))
lines = [f"{'metric':<28} {'before':>12} {'after':>12} {'delta':>12} {'%':>6}"]
lines.append("" * 76)
for k in keys:
b = before.get(k, 0.0)
a = after.get(k, 0.0)
d = a - b
pct_change = ((a / b) - 1) * 100 if b not in (0, 0.0) else float("inf") if a else 0
# Flag improvements vs regressions. For _p99 / _max / _total / gaps_over /
# patches / writeBytes / backpressure, LOWER is better. For fps / gaps_under,
# HIGHER is better.
lower_is_better = any(
token in k
for token in (
"p50",
"p95",
"p99",
"_max",
"_total",
"gaps_over",
"backpressure",
"drain",
)
)
higher_is_better = "fps_" in k or "gaps_under" in k
mark = ""
if d and not (lower_is_better or higher_is_better):
mark = ""
elif d < 0 and lower_is_better:
mark = ""
elif d > 0 and higher_is_better:
mark = ""
elif d > 0 and lower_is_better:
mark = "" # regression
elif d < 0 and higher_is_better:
mark = "" # regression
pct_str = "" if pct_change == float("inf") else f"{pct_change:+6.1f}%"
lines.append(
f"{k:<28} {b:>12.2f} {a:>12.2f} {d:>+12.2f} {pct_str} {mark}"
)
return "\n".join(lines)
def run_once(args: argparse.Namespace) -> dict[str, Any]:
tui_dir = Path(args.tui_dir).resolve()
entry = tui_dir / "dist" / "entry.js"
if not entry.exists():
sys.exit(f"{entry} missing — run `npm run build` in {tui_dir} first")
sid = args.session or pick_longest_session(DEFAULT_STATE_DB)
print(f"• session: {sid}")
print(f"• hold: {args.hold} x {args.rate}Hz for {args.seconds}s after {args.warmup}s warmup")
print(f"• terminal: {args.cols}x{args.rows}")
log = Path(args.log)
if not args.keep_log and log.exists():
log.unlink()
since_ms = int(time.time() * 1000)
env = os.environ.copy()
env["HERMES_DEV_PERF"] = "1"
env["HERMES_DEV_PERF_MS"] = str(args.threshold_ms)
env["HERMES_DEV_PERF_LOG"] = str(log)
env["HERMES_TUI_RESUME"] = sid
env["COLUMNS"] = str(args.cols)
env["LINES"] = str(args.rows)
env["TERM"] = env.get("TERM", "xterm-256color")
# Pass through extra flags the TUI wrapper recognizes (e.g. --no-fullscreen).
# Stored on args as `extra_flags` list.
node = os.environ.get("HERMES_PERF_NODE", "node")
node_args = [node, str(entry), *getattr(args, "extra_flags", [])]
pid, fd = pty.fork()
if pid == 0:
os.execvpe(node, node_args, env)
try:
import fcntl, struct, termios
winsize = struct.pack("HHHH", args.rows, args.cols, 0, 0)
fcntl.ioctl(fd, termios.TIOCSWINSZ, winsize)
print(f"• pid: {pid} fd: {fd}")
print(f"• warmup {args.warmup}s (drain startup output)…")
drain(fd, args.warmup)
print(f"• holding {args.hold}")
sent = hold_key(fd, KEYS[args.hold], args.seconds, args.rate)
print(f" sent {sent} keystrokes")
drain(fd, 0.5)
finally:
try:
os.kill(pid, signal.SIGTERM)
for _ in range(10):
pid_done, _ = os.waitpid(pid, os.WNOHANG)
if pid_done == pid:
break
time.sleep(0.1)
else:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except (ProcessLookupError, ChildProcessError):
pass
try:
os.close(fd)
except OSError:
pass
time.sleep(0.2)
return summarize(log, since_ms)
def main() -> int:
p = argparse.ArgumentParser()
p.add_argument("--session", help="session id to resume (default: longest in db)")
p.add_argument("--hold", default="page_up", choices=sorted(KEYS.keys()), help="key to hold")
p.add_argument("--seconds", type=float, default=8.0, help="how long to hold the key")
p.add_argument("--rate", type=int, default=30, help="keystrokes per second")
p.add_argument("--warmup", type=float, default=3.0, help="seconds to wait after launch before input")
p.add_argument("--threshold-ms", type=float, default=0.0, help="HERMES_DEV_PERF_MS (0 = capture all)")
p.add_argument("--cols", type=int, default=120)
p.add_argument("--rows", type=int, default=40)
p.add_argument("--keep-log", action="store_true", help="don't wipe perf.log before run")
p.add_argument("--tui-dir", default=str(DEFAULT_TUI_DIR))
p.add_argument("--log", default=str(DEFAULT_LOG))
p.add_argument("--save", metavar="LABEL",
help="save the final metrics as /tmp/perf-<LABEL>.json for later --compare")
p.add_argument("--compare", metavar="LABEL",
help="diff against /tmp/perf-<LABEL>.json after running")
p.add_argument("--loop", action="store_true",
help="watch for source changes, rebuild, rerun, and diff vs previous run")
p.add_argument("--extra-flag", dest="extra_flags", action="append", default=[],
help="pass through to node dist/entry.js (repeatable)")
args = p.parse_args()
if args.loop:
return loop_mode(args)
# Single-shot path.
data = run_once(args)
print()
print(format_report(data))
metrics = key_metrics(data)
if args.save:
path = Path(f"/tmp/perf-{args.save}.json")
path.write_text(json.dumps(metrics, indent=2))
print(f"\n• saved: {path}")
if args.compare:
path = Path(f"/tmp/perf-{args.compare}.json")
if not path.exists():
print(f"\n⚠ no baseline at {path} — run with --save {args.compare} first")
else:
before = json.loads(path.read_text())
print(f"\n═══ A/B diff vs /tmp/perf-{args.compare}.json ═══")
print(format_diff(before, metrics))
if not data["react"] and not data["frame"]:
return 2
return 0
def loop_mode(args: argparse.Namespace) -> int:
"""Watch source files, rebuild, rerun, print A/B diff against previous run.
Keeps a rolling 'previous run' baseline in memory so each iteration
reports delta vs the last one visibility into whether the last
edit moved the needle. Press Ctrl+C to stop.
"""
import subprocess
tui_dir = Path(args.tui_dir).resolve()
src_root = tui_dir / "src"
pkg_root = tui_dir / "packages" / "hermes-ink" / "src"
def collect_mtimes() -> dict[str, float]:
mtimes: dict[str, float] = {}
for root in (src_root, pkg_root):
if not root.exists():
continue
for path in root.rglob("*"):
if path.suffix in {".ts", ".tsx"} and "__tests__" not in str(path):
try:
mtimes[str(path)] = path.stat().st_mtime
except OSError:
pass
return mtimes
previous_metrics: dict[str, float] | None = None
previous_mtimes = collect_mtimes()
iteration = 0
print(f"• loop mode — watching {src_root} + {pkg_root} for *.ts(x) changes")
print("• edit any TS file, the harness rebuilds + reruns automatically")
print("• Ctrl+C to stop\n")
try:
while True:
iteration += 1
print(f"\n{'' * 76}")
print(f"Iteration {iteration} @ {time.strftime('%H:%M:%S')}")
print("" * 76)
if iteration > 1:
print("• rebuilding…")
result = subprocess.run(
["npm", "run", "build"],
cwd=tui_dir,
capture_output=True,
text=True,
)
if result.returncode != 0:
print("✗ build failed:")
print(result.stdout[-2000:])
print(result.stderr[-2000:])
print("\n• waiting for source changes to retry…")
previous_mtimes = wait_for_change(previous_mtimes, collect_mtimes)
continue
print("✓ build ok")
data = run_once(args)
metrics = key_metrics(data)
print()
print(format_report(data))
if previous_metrics is not None:
print(f"\n═══ A/B diff vs iteration {iteration - 1} ═══")
print(format_diff(previous_metrics, metrics))
previous_metrics = metrics
print("\n• waiting for source changes…")
previous_mtimes = wait_for_change(previous_mtimes, collect_mtimes)
except KeyboardInterrupt:
print("\n• loop stopped")
return 0
def wait_for_change(prev: dict[str, float], collect) -> dict[str, float]:
"""Poll every 1s until a watched file's mtime changes. Debounced 500ms."""
while True:
time.sleep(1)
current = collect()
changed = [
path for path, mtime in current.items() if prev.get(path) != mtime
]
if changed:
print(f"{len(changed)} file(s) changed:")
for path in changed[:5]:
print(f" {path}")
# Debounce — editor save bursts can take ~500ms to settle
time.sleep(0.5)
return collect()
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,151 @@
---
name: debugging-hermes-tui-commands
description: Use when debugging or adding Hermes TUI slash commands across the Python backend (hermes_cli/commands.py), the tui_gateway bridge, and the TypeScript/Ink frontend. Covers autocomplete gaps, gateway dispatch issues, and live UI-state wiring.
version: 1.0.0
author: Hermes Agent
license: MIT
metadata:
hermes:
tags: [debugging, hermes-agent, tui, slash-commands, typescript, python]
related_skills: [python-debugpy, node-inspect-debugger, systematic-debugging]
---
# Debugging Hermes TUI Slash Commands
## Overview
Hermes slash commands span three layers — Python command registry, tui_gateway JSON-RPC bridge, and the Ink/TypeScript frontend. When a command misbehaves (missing from autocomplete, works in CLI but not TUI, config persists but UI doesn't update), the bug is almost always one layer being out of sync with another.
Use this skill when you encounter issues with slash commands in the Hermes TUI, particularly when commands aren't showing in autocomplete, aren't working properly in the TUI, or need to be added/updated.
## When to Use
- A slash command exists in one part of the codebase but doesn't work fully
- A command needs to be added to both backend and frontend
- Command autocomplete isn't working for specific commands
- Command behavior is inconsistent between CLI and TUI
- A command persists config but doesn't apply live in the TUI
## Architecture Overview
```
Python backend (hermes_cli/commands.py) <- canonical COMMAND_REGISTRY
TUI gateway (tui_gateway/server.py) <- slash.exec / command.dispatch
TUI frontend (ui-tui/src/app/slash/) <- local handlers + fallthrough
```
Command definitions must be registered consistently across Python and TypeScript to work properly. The Python `COMMAND_REGISTRY` is the source of truth for: CLI dispatch, gateway help, Telegram BotCommand menu, Slack subcommand map, and autocomplete data shipped to Ink.
## Investigation Steps
1. **Check if the command exists in the TUI frontend:**
```bash
search_files --pattern "/commandname" --file_glob "*.ts" --path ui-tui/
search_files --pattern "/commandname" --file_glob "*.tsx" --path ui-tui/
```
2. **Examine the TUI command definition:**
```bash
read_file ui-tui/src/app/slash/commands/core.ts
# If not there:
search_files --pattern "commandname" --path ui-tui/src/app/slash/commands --target files
```
3. **Check if the command exists in the Python backend:**
```bash
search_files --pattern "CommandDef" --file_glob "*.py" --path hermes_cli/
search_files --pattern "commandname" --path hermes_cli/commands.py --context 3
```
4. **Examine the gateway implementation:**
```bash
search_files --pattern "complete.slash|slash.exec" --path tui_gateway/
```
## Fix: Missing Command Autocomplete
If a command exists in the TUI but doesn't show in autocomplete:
1. Add a `CommandDef` entry to `COMMAND_REGISTRY` in `hermes_cli/commands.py`:
```python
CommandDef("commandname", "Description of the command", "Session",
cli_only=True, aliases=("alias",),
args_hint="[arg1|arg2|arg3]",
subcommands=("arg1", "arg2", "arg3")),
```
2. Pick `cli_only` vs gateway availability carefully:
- `cli_only=True` — only in the interactive CLI/TUI
- `gateway_only=True` — only in messaging platforms
- neither — available everywhere
- `gateway_config_gate="display.foo"` — config-gated availability in the gateway
3. Ensure `subcommands` matches the expected tab-completion options shown by the TUI.
4. If the command runs server-side, add a handler in `HermesCLI.process_command()` in `cli.py`:
```python
elif canonical == "commandname":
self._handle_commandname(cmd_original)
```
5. For gateway-available commands, add a handler in `gateway/run.py`:
```python
if canonical == "commandname":
return await self._handle_commandname(event)
```
## Common Issues
1. **Command shows in TUI but not in autocomplete.** The command is defined in the TUI codebase but missing from `COMMAND_REGISTRY` in `hermes_cli/commands.py`. Autocomplete data ships from Python.
2. **Command shows in autocomplete but doesn't work.** Check the command handler in `tui_gateway/server.py` and the frontend handler in `ui-tui/src/app/createSlashHandler.ts`. If the command is local-only in Ink, it must be handled in `app.tsx` built-in branch; otherwise it falls through to `slash.exec` and must have a Python handler.
3. **Command behavior differs between CLI and TUI.** The command might have different implementations. Check both `cli.py::process_command` and the TUI's local handler. Local TUI handlers take precedence over gateway dispatch.
4. **Command persists config but doesn't apply live.** For TUI-local commands, updating `config.set` is not enough. Also patch the relevant nanostore state immediately (usually `patchUiState(...)`) and pass any new state through rendering components. Example: `/details collapsed` must update live detail visibility, not just save `details_mode`; in-session global `/details <mode>` may need a separate command-override flag so live commands can override built-in section defaults while startup/config sync preserves default-expanded thinking/tools behavior.
5. **Gateway dispatch silently ignores the command.** The gateway only dispatches commands it knows about. Check `GATEWAY_KNOWN_COMMANDS` (derived from `COMMAND_REGISTRY` automatically) includes the canonical name. If the command is `cli_only` with a `gateway_config_gate`, verify the gated config value is truthy.
## Debugging Tactics
When surface-level inspection doesn't reveal the bug:
- **Python side hangs or misbehaves:** use the `python-debugpy` skill to break inside `_SlashWorker.exec` or the command handler. `remote-pdb` set at the handler entry is the fastest path.
- **Ink side not reacting:** use the `node-inspect-debugger` skill to break in `app.tsx`'s slash dispatch or the local command branch. `sb('dist/app.js', <line>)` after `npm run build`.
- **Registry mismatch / unclear which side is wrong:** compare the canonical `COMMAND_REGISTRY` entry against the TUI's local command list side-by-side.
## Pitfalls
- Don't forget to set the appropriate category for the command in `CommandDef` (e.g., "Session", "Configuration", "Tools & Skills", "Info", "Exit")
- Make sure any aliases are properly registered in the `aliases` tuple — no other file changes are needed, everything downstream (Telegram menu, Slack mapping, autocomplete, help) derives from it
- For commands with subcommands, ensure the `subcommands` tuple in `CommandDef` matches what's in the TUI code
- `cli_only=True` commands won't work in gateway/messaging platforms — unless you add a `gateway_config_gate` and the gate is truthy
- After adding live UI state, search every consumer of the old prop/helper and thread the new state through all render paths, not just the active streaming path. TUI detail rendering has at least two important paths: live `StreamingAssistant`/`ToolTrail` and transcript/pending `MessageLine` rows. A `/clean` pass should explicitly check both.
- Rebuild the TUI (`npm --prefix ui-tui run build`) before testing — tsx watch mode may lag on first launch
## Verification
After fixing:
1. Rebuild the TUI:
```bash
cd /home/bb/hermes-agent && npm --prefix ui-tui run build
```
2. Run the TUI and test the command:
```bash
hermes --tui
```
3. Type `/` and verify the command appears in autocomplete suggestions with the expected description and args hint.
4. Execute the command and confirm:
- Expected behavior fires
- Any persisted config updates correctly (`read_file ~/.hermes/config.yaml`)
- Live UI state reflects the change immediately (not just after restart)
5. If the command is also gateway-available, test it from at least one messaging platform (or run the gateway tests: `scripts/run_tests.sh tests/gateway/`).

View File

@ -0,0 +1,164 @@
---
name: hermes-agent-skill-authoring
description: Use when authoring or updating a SKILL.md inside the hermes-agent repo itself (skills/ tree, committed to a branch). Covers required frontmatter, validator limits, peer-matching structure, and the write_file-vs-skill_manage distinction for in-repo skills.
version: 1.0.0
author: Hermes Agent
license: MIT
metadata:
hermes:
tags: [skills, authoring, hermes-agent, conventions, skill-md]
related_skills: [writing-plans, requesting-code-review]
---
# Authoring Hermes-Agent Skills (in-repo)
## Overview
There are two places a SKILL.md can live:
1. **User-local:** `~/.hermes/skills/<maybe-category>/<name>/SKILL.md` — personal, not shared. Created via `skill_manage(action='create')`.
2. **In-repo (this skill is about this case):** `/home/bb/hermes-agent/skills/<category>/<name>/SKILL.md` — committed, shipped with the package. Use `write_file` + `git add`. `skill_manage(action='create')` does NOT target this tree.
## When to Use
- User asks you to add a skill "in this branch / repo / commit"
- You're committing a reusable workflow that should ship with hermes-agent
- You're editing an existing skill under `/home/bb/hermes-agent/skills/` (use `patch` for small edits, `write_file` for rewrites; `skill_manage` still works for patch on in-repo skills, but not for `create`)
## Required Frontmatter
Source of truth: `tools/skill_manager_tool.py::_validate_frontmatter`. Hard requirements:
- Starts with `---` as the first bytes (no leading blank line).
- Closes with `\n---\n` before the body.
- Parses as a YAML mapping.
- `name` field present.
- `description` field present, ≤ **1024 chars** (`MAX_DESCRIPTION_LENGTH`).
- Non-empty body after the closing `---`.
Peer-matched shape used by every skill under `skills/software-development/`:
```yaml
---
name: my-skill-name # lowercase, hyphens, ≤64 chars (MAX_NAME_LENGTH)
description: Use when <trigger>. <one-line behavior>.
version: 1.0.0
author: Hermes Agent
license: MIT
metadata:
hermes:
tags: [short, descriptive, tags]
related_skills: [other-skill, another-skill]
---
```
`version` / `author` / `license` / `metadata` are NOT enforced by the validator, but every peer has them — omit and your skill sticks out.
## Size Limits
- Description: ≤ 1024 chars (enforced).
- Full SKILL.md: ≤ 100,000 chars (enforced as `MAX_SKILL_CONTENT_CHARS`, ~36k tokens).
- Peer skills in `software-development/` sit at **8-14k chars**. Aim for that range. If you're pushing past 20k, split into `references/*.md` and reference them from SKILL.md.
## Peer-Matched Structure
Every in-repo skill follows roughly:
```
# <Title>
## Overview
One or two paragraphs: what and why.
## When to Use
- Bulleted triggers
- "Don't use for:" counter-triggers
## <Topic sections specific to the skill>
- Quick-reference tables are common
- Code blocks with exact commands
- Hermes-specific recipes (tests via scripts/run_tests.sh, ui-tui paths, etc.)
## Common Pitfalls
Numbered list of mistakes and their fixes.
## Verification Checklist
- [ ] Checkbox list of post-action verifications
## One-Shot Recipes (optional)
Named scenarios → concrete command sequences.
```
Not every section is mandatory, but `Overview` + `When to Use` + actionable body + pitfalls are the minimum for the skill to feel like a peer.
## Directory Placement
```
skills/<category>/<skill-name>/SKILL.md
```
Categories currently in repo (confirm with `ls skills/`): `autonomous-ai-agents`, `creative`, `data-science`, `devops`, `dogfood`, `email`, `gaming`, `github`, `leisure`, `mcp`, `media`, `mlops/*`, `note-taking`, `productivity`, `red-teaming`, `research`, `smart-home`, `social-media`, `software-development`.
Pick the closest existing category. Don't invent new top-level categories casually.
## Workflow
1. **Survey peers** in the target category:
```
ls skills/<category>/
```
Read 2-3 peer SKILL.md files to match tone and structure.
2. **Check validator constraints** in `tools/skill_manager_tool.py` if unsure.
3. **Draft** with `write_file` to `skills/<category>/<name>/SKILL.md`.
4. **Validate locally**:
```python
import yaml, re, pathlib
content = pathlib.Path("skills/<category>/<name>/SKILL.md").read_text()
assert content.startswith("---")
m = re.search(r'\n---\s*\n', content[3:])
fm = yaml.safe_load(content[3:m.start()+3])
assert "name" in fm and "description" in fm
assert len(fm["description"]) <= 1024
assert len(content) <= 100_000
```
5. **Git add + commit** on the active branch.
6. **Note:** the CURRENT session's skill loader is cached — `skill_view` / `skills_list` will not see the new skill until a new session. This is expected, not a bug.
## Cross-Referencing Other Skills
`metadata.hermes.related_skills` unions both trees (`skills/` in-repo and `~/.hermes/skills/`) at load time. You CAN reference a user-local skill from an in-repo skill, but it won't resolve for other users who clone the repo fresh. Prefer referencing only in-repo skills from in-repo skills. If a frequently-referenced skill lives only in `~/.hermes/skills/`, consider promoting it to the repo.
## Editing Existing In-Repo Skills
- **Small fix (typo, added pitfall, tightened trigger):** `skill_manage(action='patch', name=..., old_string=..., new_string=...)` works fine on in-repo skills.
- **Major rewrite:** `write_file` the whole SKILL.md. `skill_manage(action='edit')` also works but requires supplying the full new content.
- **Adding supporting files:** `write_file` to `skills/<category>/<name>/references/<file>.md`, `templates/<file>`, or `scripts/<file>`. `skill_manage(action='write_file')` also works and enforces the references/templates/scripts/assets subdir allowlist.
- **Always commit** the edit — in-repo skills are source, not runtime state.
## Common Pitfalls
1. **Using `skill_manage(action='create')` for an in-repo skill.** It writes to `~/.hermes/skills/`, not the repo tree. Use `write_file` for in-repo creation.
2. **Leading whitespace before `---`.** The validator checks `content.startswith("---")`; any leading blank line or BOM fails validation.
3. **Description too generic.** Peer descriptions start with "Use when ..." and describe the *trigger class*, not the one task. "Use when debugging X" > "Debug X".
4. **Forgetting the author/license/metadata block.** Not validator-enforced, but every peer has it; omitting makes the skill look half-finished.
5. **Writing a skill that duplicates a peer.** Before creating, `ls skills/<category>/` and open 2-3 peers. Prefer extending an existing skill to creating a narrow sibling.
6. **Expecting the current session to see the new skill.** It won't. The skill loader is initialized at session start. Verify in a fresh session or via `skill_view` using the exact path.
7. **Linking to skills that don't exist in-repo.** `related_skills: [some-user-local-skill]` works for you but breaks for other clones. Prefer only in-repo links.
## Verification Checklist
- [ ] File is at `skills/<category>/<name>/SKILL.md` (not in `~/.hermes/skills/`)
- [ ] Frontmatter starts at byte 0 with `---`, closes with `\n---\n`
- [ ] `name`, `description`, `version`, `author`, `license`, `metadata.hermes.{tags, related_skills}` all present
- [ ] Name ≤ 64 chars, lowercase + hyphens
- [ ] Description ≤ 1024 chars and starts with "Use when ..."
- [ ] Total file ≤ 100,000 chars (aim for 8-15k)
- [ ] Structure: `# Title``## Overview``## When to Use` → body → `## Common Pitfalls``## Verification Checklist`
- [ ] `related_skills` references resolve in-repo (or are explicitly OK to be user-local)
- [ ] `git add skills/<category>/<name>/ && git commit` completed on the intended branch

View File

@ -0,0 +1,318 @@
---
name: node-inspect-debugger
description: Use when debugging Node.js code (ui-tui, tui_gateway child processes, any Node script/test) with real breakpoints, stepping, scope inspection, and expression evaluation. Drives `node --inspect` via the Chrome DevTools Protocol from the terminal — no browser required.
version: 1.0.0
author: Hermes Agent
license: MIT
metadata:
hermes:
tags: [debugging, nodejs, node-inspect, cdp, breakpoints, ui-tui]
related_skills: [systematic-debugging, python-debugpy, debugging-hermes-tui-commands]
---
# Node.js Inspect Debugger
## Overview
When `console.log` isn't enough, drive Node's built-in V8 inspector programmatically from the terminal. You get real breakpoints, step in/over/out, call-stack walking, local/closure scope dumps, and arbitrary expression evaluation in the paused frame.
Two tools, pick one:
- **`node inspect`** — built-in, zero install, CLI REPL. Best for quick poking.
- **`ndb` / CDP via `chrome-remote-interface`** — scriptable from Node/Python; best when you want to automate many breakpoints, collect state across runs, or debug non-interactively from an agent loop.
**Prefer `node inspect` first.** It's always available and the REPL is fast.
## When to Use
- A Node test fails and you need to see intermediate state
- ui-tui crashes or behaves wrong and you want to inspect React/Ink state pre-render
- tui_gateway child processes (`_SlashWorker`, PTY bridge workers) misbehave
- You need to inspect a value in a closure that `console.log` can't reach without patching
- Perf: attach to a running process to capture a CPU profile or heap snapshot
**Don't use for:** things `console.log` solves in under a minute. Breakpoint-driven debugging is heavier; use it when the payoff is real.
## Quick Reference: `node inspect` REPL
Launch paused on first line:
```bash
node inspect path/to/script.js
# or with tsx
node --inspect-brk $(which tsx) path/to/script.ts
```
The `debug>` prompt accepts:
| Command | Action |
|---|---|
| `c` or `cont` | continue |
| `n` or `next` | step over |
| `s` or `step` | step into |
| `o` or `out` | step out |
| `pause` | pause running code |
| `sb('file.js', 42)` | set breakpoint at file.js line 42 |
| `sb(42)` | set breakpoint at line 42 of current file |
| `sb('functionName')` | break when function is called |
| `cb('file.js', 42)` | clear breakpoint |
| `breakpoints` | list all breakpoints |
| `bt` | backtrace (call stack) |
| `list(5)` | show 5 lines of source around current position |
| `watch('expr')` | evaluate expr on every pause |
| `watchers` | show watched expressions |
| `repl` | drop into REPL in current scope (Ctrl+C to exit REPL) |
| `exec expr` | evaluate expression once |
| `restart` | restart script |
| `kill` | kill the script |
| `.exit` | quit debugger |
**In the `repl` sub-mode:** type any JS expression, including access to locals/closure variables. `Ctrl+C` exits back to `debug>`.
## Attaching to a Running Process
When the process is already running (e.g. a long-lived dev server or the TUI gateway):
```bash
# 1. Send SIGUSR1 to enable the inspector on an existing process
kill -SIGUSR1 <pid>
# Node prints: Debugger listening on ws://127.0.0.1:9229/<uuid>
# 2. Attach the debugger CLI
node inspect -p <pid>
# or by URL
node inspect ws://127.0.0.1:9229/<uuid>
```
To start a process with the inspector from the beginning:
```bash
node --inspect script.js # listen on 127.0.0.1:9229, keep running
node --inspect-brk script.js # listen AND pause on first line
node --inspect=0.0.0.0:9230 script.js # custom host:port
```
For TypeScript via tsx:
```bash
node --inspect-brk --import tsx script.ts
# or older tsx
node --inspect-brk -r tsx/cjs script.ts
```
## Programmatic CDP (scripting from terminal)
When you want to automate — set many breakpoints, capture scope state, script a repro — use `chrome-remote-interface`:
```bash
npm i -g chrome-remote-interface # or project-local
# Start your target:
node --inspect-brk=9229 target.js &
```
Driver script (save as `/tmp/cdp-debug.js`):
```javascript
const CDP = require('chrome-remote-interface');
(async () => {
const client = await CDP({ port: 9229 });
const { Debugger, Runtime } = client;
Debugger.paused(async ({ callFrames, reason }) => {
const top = callFrames[0];
console.log(`PAUSED: ${reason} @ ${top.url}:${top.location.lineNumber + 1}`);
// Walk scopes for locals
for (const scope of top.scopeChain) {
if (scope.type === 'local' || scope.type === 'closure') {
const { result } = await Runtime.getProperties({
objectId: scope.object.objectId,
ownProperties: true,
});
for (const p of result) {
console.log(` ${scope.type}.${p.name} =`, p.value?.value ?? p.value?.description);
}
}
}
// Evaluate an expression in the paused frame
const { result } = await Debugger.evaluateOnCallFrame({
callFrameId: top.callFrameId,
expression: 'typeof state !== "undefined" ? JSON.stringify(state) : "n/a"',
});
console.log('state =', result.value ?? result.description);
await Debugger.resume();
});
await Runtime.enable();
await Debugger.enable();
// Set a breakpoint by URL regex + line
await Debugger.setBreakpointByUrl({
urlRegex: '.*app\\.tsx$',
lineNumber: 119, // 0-indexed
columnNumber: 0,
});
await Runtime.runIfWaitingForDebugger();
})();
```
Run it:
```bash
node /tmp/cdp-debug.js
```
Hermes-specific note: `chrome-remote-interface` is NOT in `ui-tui/package.json`. Install it to a throwaway location if you don't want to dirty the project:
```bash
mkdir -p /tmp/cdp-tools && cd /tmp/cdp-tools && npm i chrome-remote-interface
NODE_PATH=/tmp/cdp-tools/node_modules node /tmp/cdp-debug.js
```
## Debugging Hermes ui-tui
The TUI is built Ink + tsx. Two common scenarios:
### Debugging a single Ink component under dev
`ui-tui/package.json` has `npm run dev` (tsx --watch). Add `--inspect-brk` by running tsx directly:
```bash
cd /home/bb/hermes-agent/ui-tui
npm run build # produce dist/ once so transpile isn't needed on first load
node --inspect-brk dist/entry.js
# In another terminal:
node inspect -p <node pid>
```
Then inside `debug>`:
```
sb('dist/app.js', 220) # or wherever the suspect render is
cont
```
When it pauses, `repl` → inspect `props`, state refs, `useInput` handler values, etc.
### Debugging a running `hermes --tui`
The TUI spawns Node from the Python CLI. Easiest path:
```bash
# 1. Launch TUI
hermes --tui &
TUI_PID=$(pgrep -f 'ui-tui/dist/entry' | head -1)
# 2. Enable inspector on that Node PID
kill -SIGUSR1 "$TUI_PID"
# 3. Find the WS URL
curl -s http://127.0.0.1:9229/json/list | jq -r '.[0].webSocketDebuggerUrl'
# 4. Attach
node inspect ws://127.0.0.1:9229/<uuid>
```
Interacting with the TUI (typing in its window) continues to advance execution; your debugger can pause it on a breakpoint at any `sb(...)`.
### Debugging `_SlashWorker` / PTY child processes
Those are Python, not Node — use the `python-debugpy` skill for them. Only Node portions (Ink UI, tui_gateway client, tsx-run tests under `ui-tui/`) use this skill.
## Running Vitest Tests Under the Debugger
```bash
cd /home/bb/hermes-agent/ui-tui
# Run a single test file paused on entry
node --inspect-brk ./node_modules/vitest/vitest.mjs run --no-file-parallelism src/app/foo.test.tsx
```
In another terminal: `node inspect -p <pid>`, then `sb('src/app/foo.tsx', 42)`, `cont`.
Use `--no-file-parallelism` (vitest) or `--runInBand` (jest) so only one worker exists — debugging a pool is painful.
## Heap Snapshots & CPU Profiles (Non-interactive)
From the CDP driver above, swap Debugger for `HeapProfiler` / `Profiler`:
```javascript
// CPU profile for 5 seconds
await client.Profiler.enable();
await client.Profiler.start();
await new Promise(r => setTimeout(r, 5000));
const { profile } = await client.Profiler.stop();
require('fs').writeFileSync('/tmp/cpu.cpuprofile', JSON.stringify(profile));
// Open /tmp/cpu.cpuprofile in Chrome DevTools → Performance tab
```
```javascript
// Heap snapshot
await client.HeapProfiler.enable();
const chunks = [];
client.HeapProfiler.addHeapSnapshotChunk(({ chunk }) => chunks.push(chunk));
await client.HeapProfiler.takeHeapSnapshot({ reportProgress: false });
require('fs').writeFileSync('/tmp/heap.heapsnapshot', chunks.join(''));
```
## Common Pitfalls
1. **Wrong line numbers in TS source.** Breakpoints hit the emitted JS, not the `.ts`. Either (a) break in the built `dist/*.js`, or (b) enable sourcemaps (`node --enable-source-maps`) and use `sb('src/app.tsx', N)` — but only with CDP clients that follow sourcemaps. `node inspect` CLI does not.
2. **`--inspect` vs `--inspect-brk`.** `--inspect` starts the inspector but doesn't pause; your script races past your first breakpoint if you attach too late. Use `--inspect-brk` when you need to set breakpoints before any code runs.
3. **Port collisions.** Default is `9229`. If multiple Node processes are inspecting, pass `--inspect=0` (random port) and read the actual URL from `/json/list`:
```bash
curl -s http://127.0.0.1:9229/json/list # lists all inspectable targets on the host
```
4. **Child processes.** `--inspect` on a parent does NOT inspect its children. Use `NODE_OPTIONS='--inspect-brk' node parent.js` to propagate to every child; be aware they all need unique ports (Node auto-increments when `NODE_OPTIONS='--inspect'` is inherited).
5. **Background kills.** If you `Ctrl+C` out of `node inspect` while the target is paused, the target stays paused. Either `cont` first, or `kill` the target explicitly.
6. **Running `node inspect` through an agent terminal.** It's a PTY-friendly REPL. In Hermes, launch it with `terminal(pty=true)` or `background=true` + `process(action='submit', data='...')`. Non-PTY foreground mode will work for one-shot commands but not for interactive stepping.
7. **Security.** `--inspect=0.0.0.0:9229` exposes arbitrary code execution. Always bind to `127.0.0.1` (the default) unless you have an isolated network.
## Verification Checklist
After setting up a debug session, verify:
- [ ] `curl -s http://127.0.0.1:9229/json/list` returns exactly the target you expect
- [ ] First breakpoint actually hits (if it doesn't, you likely missed `--inspect-brk` or attached after execution completed)
- [ ] Source listing at pause shows the right file (mismatch = sourcemap issue, see pitfall 1)
- [ ] `exec process.pid` in `repl` returns the PID you meant to attach to
## One-Shot Recipes
**"Why is this variable undefined at line X?"**
```bash
node --inspect-brk script.js &
node inspect -p $!
# debug>
sb('script.js', X)
cont
# paused. Now:
repl
> myVariable
> Object.keys(this)
```
**"What's the call path into this function?"**
```
debug> sb('suspectFn')
debug> cont
# paused on entry
debug> bt
```
**"This async chain hangs — where?"**
```
# Start with --inspect (no -brk), let it run to the hang, then:
debug> pause
debug> bt
# Now you see the stuck frame
```

View File

@ -0,0 +1,374 @@
---
name: python-debugpy
description: Use when debugging Python code (run_agent.py, cli.py, tui_gateway, tests, scripts) with real breakpoints, stepping, scope inspection, and post-mortem analysis. Covers `pdb` for interactive REPL debugging and `debugpy` for remote/headless DAP-driven sessions.
version: 1.0.0
author: Hermes Agent
license: MIT
metadata:
hermes:
tags: [debugging, python, pdb, debugpy, breakpoints, dap, post-mortem]
related_skills: [systematic-debugging, node-inspect-debugger, debugging-hermes-tui-commands]
---
# Python Debugger (pdb + debugpy)
## Overview
Three tools, picked by situation:
| Tool | When |
|---|---|
| **`breakpoint()` + pdb** | Local, interactive, simplest. Add `breakpoint()` in the source, run normally, get a REPL at that line. |
| **`python -m pdb`** | Launch an existing script under pdb with no source edits. Useful for quick poking. |
| **`debugpy`** | Remote / headless / "attach to already-running process." Talks DAP, scriptable from terminal, works for long-lived processes (gateway, daemon, PTY children). |
**Start with `breakpoint()`.** It's the cheapest thing that works.
## When to Use
- A test fails and the traceback doesn't reveal why a value is wrong
- You need to step through a function and watch a collection mutate
- A long-running process (hermes gateway, tui_gateway) misbehaves and you can't restart it
- Post-mortem: an exception fired in prod-ish code and you want to inspect locals at the crash site
- A subprocess / child (Python `_SlashWorker`, PTY bridge worker) is the actual bug site
**Don't use for:** things `print()` / `logging.debug` solve in under a minute, or things `pytest -vv --tb=long --showlocals` already reveals.
## pdb Quick Reference
Inside any pdb prompt (`(Pdb)`):
| Command | Action |
|---|---|
| `h` / `h cmd` | help |
| `n` | next line (step over) |
| `s` | step into |
| `r` | return from current function |
| `c` | continue |
| `unt N` | continue until line N |
| `j N` | jump to line N (same function only) |
| `l` / `ll` | list source around current line / full function |
| `w` | where (stack trace) |
| `u` / `d` | move up / down in the stack |
| `a` | print args of the current function |
| `p expr` / `pp expr` | print / pretty-print expression |
| `display expr` | auto-print expr on every stop |
| `b file:line` | set breakpoint |
| `b func` | break on function entry |
| `b file:line, cond` | conditional breakpoint |
| `cl N` | clear breakpoint N |
| `tbreak file:line` | one-shot breakpoint |
| `!stmt` | execute arbitrary Python (assignments included) |
| `interact` | drop into full Python REPL in current scope (Ctrl+D to exit) |
| `q` | quit |
The `interact` command is the most powerful — you can import anything, inspect complex objects, even call methods that mutate state. Locals are read-only by default; use `!x = 42` from the `(Pdb)` prompt to mutate.
## Recipe 1: Local breakpoint
Easiest. Edit the file:
```python
def compute(x, y):
result = some_helper(x)
breakpoint() # <-- drops into pdb here
return result + y
```
Run the code normally. You land at the `breakpoint()` line with full access to locals.
**Don't forget to remove `breakpoint()` before committing.** Use `git diff` or a pre-commit grep:
```bash
rg -n 'breakpoint\(\)' --type py
```
## Recipe 2: Launch a script under pdb (no source edits)
```bash
python -m pdb path/to/script.py arg1 arg2
# Lands at first line of script
(Pdb) b path/to/script.py:42
(Pdb) c
```
## Recipe 3: Debug a pytest test
The hermes test runner and pytest both support this:
```bash
# Drop to pdb on failure (or on any raised exception):
scripts/run_tests.sh tests/path/to/test_file.py::test_name --pdb
# Drop to pdb at the START of the test:
scripts/run_tests.sh tests/path/to/test_file.py::test_name --trace
# Show locals in tracebacks without pdb:
scripts/run_tests.sh tests/path/to/test_file.py --showlocals --tb=long
```
Note: `scripts/run_tests.sh` uses xdist (`-n 4`) by default, and pdb does NOT work under xdist. Add `-p no:xdist` or run a single test with `-n 0`:
```bash
scripts/run_tests.sh tests/foo_test.py::test_bar --pdb -p no:xdist
# or
source .venv/bin/activate
python -m pytest tests/foo_test.py::test_bar --pdb
```
This bypasses the hermetic-env guarantees — fine for debugging, but re-run under the wrapper to confirm before pushing.
## Recipe 4: Post-mortem on any exception
```python
import pdb, sys
try:
run_the_thing()
except Exception:
pdb.post_mortem(sys.exc_info()[2])
```
Or wrap a whole script:
```bash
python -m pdb -c continue script.py
# When it crashes, pdb catches it and you're in the frame of the exception
```
Or set a global hook in a repl/jupyter:
```python
import sys
def excepthook(etype, value, tb):
import pdb; pdb.post_mortem(tb)
sys.excepthook = excepthook
```
## Recipe 5: Remote debug with debugpy (attach to running process)
For long-lived processes: Hermes gateway, tui_gateway, a daemon, a process that's already misbehaving and can't be restarted clean.
### Setup
```bash
source /home/bb/hermes-agent/.venv/bin/activate
pip install debugpy
```
### Pattern A: Source-edit — process waits for debugger at launch
Add near the top of the entry point (or inside the function you want to debug):
```python
import debugpy
debugpy.listen(("127.0.0.1", 5678))
print("debugpy listening on 5678, waiting for client...", flush=True)
debugpy.wait_for_client()
debugpy.breakpoint() # optional: pause immediately once attached
```
Start the process; it blocks on `wait_for_client()`.
### Pattern B: No source edit — launch with `-m debugpy`
```bash
python -m debugpy --listen 127.0.0.1:5678 --wait-for-client your_script.py arg1
```
Equivalent for module entry:
```bash
python -m debugpy --listen 127.0.0.1:5678 --wait-for-client -m your.module
```
### Pattern C: Attach to an already-running process
Needs the PID and debugpy preinstalled in the target's environment:
```bash
python -m debugpy --listen 127.0.0.1:5678 --pid <pid>
# debugpy injects itself into the process. Then attach a client as below.
```
Some kernels/security configs block the ptrace-based injection (`/proc/sys/kernel/yama/ptrace_scope`). Fix with:
```bash
echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope
```
### Connecting a client from the terminal
The easiest terminal-side DAP client is VS Code CLI or a small script. From inside Hermes you have two practical options:
**Option 1: `debugpy`'s own CLI REPL** — not an official feature, but a tiny DAP client script:
```python
# /tmp/dap_client.py
import socket, json, itertools, time, sys
HOST, PORT = "127.0.0.1", 5678
s = socket.create_connection((HOST, PORT))
seq = itertools.count(1)
def send(msg):
msg["seq"] = next(seq)
body = json.dumps(msg).encode()
s.sendall(f"Content-Length: {len(body)}\r\n\r\n".encode() + body)
def recv():
header = b""
while b"\r\n\r\n" not in header:
header += s.recv(1)
length = int(header.decode().split("Content-Length:")[1].split("\r\n")[0].strip())
body = b""
while len(body) < length:
body += s.recv(length - len(body))
return json.loads(body)
send({"type": "request", "command": "initialize", "arguments": {"adapterID": "python"}})
print(recv())
send({"type": "request", "command": "attach", "arguments": {}})
print(recv())
send({"type": "request", "command": "setBreakpoints",
"arguments": {"source": {"path": sys.argv[1]},
"breakpoints": [{"line": int(sys.argv[2])}]}})
print(recv())
send({"type": "request", "command": "configurationDone"})
# ... loop reading events and sending continue/stepIn/etc.
```
This is fine for one-off automation but painful as an interactive UX.
**Option 2: Attach from VS Code / Cursor / Zed** — if the user has one open, they can add a `launch.json`:
```json
{
"name": "Attach to Hermes",
"type": "debugpy",
"request": "attach",
"connect": { "host": "127.0.0.1", "port": 5678 },
"justMyCode": false,
"pathMappings": [
{ "localRoot": "${workspaceFolder}", "remoteRoot": "/home/bb/hermes-agent" }
]
}
```
**Option 3: Ditch DAP, use `remote-pdb`** — usually what you actually want from a terminal agent:
```bash
pip install remote-pdb
```
In your code:
```python
from remote_pdb import set_trace
set_trace(host="127.0.0.1", port=4444) # blocks until connection
```
Then from the terminal:
```bash
nc 127.0.0.1 4444
# You get a (Pdb) prompt exactly as if debugging locally.
```
`remote-pdb` is the cleanest agent-friendly choice when `debugpy`'s DAP protocol is overkill. Use `debugpy` only when you actually need IDE integration.
## Debugging Hermes-specific Processes
### Tests
See Recipe 3. Always add `-p no:xdist` or run single tests without xdist.
### `run_agent.py` / CLI — one-shot
Easiest: add `breakpoint()` near the suspect line, then run `hermes` normally. Control returns to your terminal at the pause point.
### `tui_gateway` subprocess (spawned by `hermes --tui`)
The gateway runs as a child of the Node TUI. Options:
**A. Source-edit the gateway:**
```python
# tui_gateway/server.py near the top of serve()
import debugpy
debugpy.listen(("127.0.0.1", 5678))
debugpy.wait_for_client()
```
Start `hermes --tui`. The TUI will appear frozen (its backend is waiting). Attach a client; execution resumes when you `continue`.
**B. Use `remote-pdb` at a specific handler:**
```python
from remote_pdb import set_trace
set_trace(host="127.0.0.1", port=4444) # in the RPC handler you want to trap
```
Trigger the matching slash command from the TUI, then `nc 127.0.0.1 4444` in another terminal.
### `_SlashWorker` subprocess
Same pattern — `remote-pdb` with `set_trace()` inside the worker's `exec` path. The worker is persistent across slash commands, so the first trigger blocks until you connect; subsequent slash commands pass through normally unless you re-arm.
### Gateway (`gateway/run.py`)
Long-lived. Use `remote-pdb` at a handler, or `debugpy` with `--wait-for-client` if you're restarting the gateway anyway.
## Common Pitfalls
1. **pdb under pytest-xdist silently does nothing.** You won't see the prompt, the test just hangs. Always use `-p no:xdist` or `-n 0`.
2. **`breakpoint()` in CI / non-TTY contexts hangs the process.** Safe locally; never commit it. Add a pre-commit grep as a safety net.
3. **`PYTHONBREAKPOINT=0`** disables all `breakpoint()` calls. Check the env if your breakpoint isn't hitting:
```bash
echo $PYTHONBREAKPOINT
```
4. **`debugpy.listen` blocks only if you also call `wait_for_client()`.** Without it, execution continues and your first breakpoint may fire before the client is attached.
5. **Attach to PID fails on hardened kernels.** `ptrace_scope=1` (Ubuntu default) allows only same-user ptrace of child processes. Workaround: `echo 0 > /proc/sys/kernel/yama/ptrace_scope` (needs root) or launch under `debugpy` from the start.
6. **Threads.** `pdb` only debugs the current thread. For multithreaded code, use `debugpy` (thread-aware DAP) or set `threading.settrace()` per thread.
7. **asyncio.** `pdb` works in coroutines but `await` inside pdb requires Python 3.13+ or `await` from `interact` mode on older versions. For 3.11/3.12, use `asyncio.run_coroutine_threadsafe` tricks or `!stmt`-based awaits via `asyncio.ensure_future`.
8. **`scripts/run_tests.sh` strips credentials and sets `HOME=<tmpdir>`.** If your bug depends on user config or real API keys, it won't reproduce under the wrapper. Debug with raw `pytest` first to repro, then re-confirm under the wrapper.
9. **Forking / multiprocessing.** pdb does not follow forks. Each child needs its own `breakpoint()` or `set_trace()`. For Hermes subagents, debug one process at a time.
## Verification Checklist
- [ ] After `pip install debugpy`, confirm: `python -c "import debugpy; print(debugpy.__version__)"`
- [ ] For remote debug, confirm the port is actually listening: `ss -tlnp | grep 5678`
- [ ] First breakpoint actually hits (if it doesn't, you likely have `PYTHONBREAKPOINT=0`, you're under xdist, or execution finished before attach)
- [ ] `where` / `w` shows the expected call stack
- [ ] Post-debug cleanup: no stray `breakpoint()` / `set_trace()` in committed code
```bash
rg -n 'breakpoint\(\)|set_trace\(|debugpy\.listen' --type py
```
## One-Shot Recipes
**"Why is this dict missing a key?"**
```python
# add above the KeyError site
breakpoint()
# then in pdb:
(Pdb) pp d
(Pdb) pp list(d.keys())
(Pdb) w # how did we get here
```
**"This test passes in isolation but fails in the suite."**
```bash
scripts/run_tests.sh tests/the_test.py --pdb -p no:xdist
# But if it only fails WITH other tests:
source .venv/bin/activate
python -m pytest tests/ -x --pdb -p no:xdist
# Now it pdb-traps at the exact failing test after state accumulated.
```
**"My async handler deadlocks."**
```python
# Add at handler entry
import remote_pdb; remote_pdb.set_trace(host="127.0.0.1", port=4444)
```
Trigger the handler. `nc 127.0.0.1 4444`, then `w` to see the suspended frame, `!import asyncio; asyncio.all_tasks()` to see what else is pending.
**"Post-mortem on a crash in an Ink child process / subprocess."**
```bash
PYTHONFAULTHANDLER=1 python -m pdb -c continue path/to/entrypoint.py
# On crash, pdb lands at the frame of the exception with full locals
```

View File

@ -145,6 +145,7 @@ def test_launch_tui_exports_model_and_provider(monkeypatch, main_mod):
assert env["HERMES_INFERENCE_MODEL"] == "nous/hermes-test"
assert env["HERMES_TUI_PROVIDER"] == "nous"
assert env["HERMES_INFERENCE_PROVIDER"] == "nous"
assert env["NODE_ENV"] == "production"
def test_print_tui_exit_summary_includes_resume_and_token_totals(monkeypatch, capsys):

View File

@ -222,6 +222,35 @@ class TestMessageStorage:
assert conv[0] == {"role": "user", "content": "Hello"}
assert conv[1] == {"role": "assistant", "content": "Hi!"}
def test_get_messages_as_conversation_includes_ancestor_chain(self, db):
db.create_session("root", "tui")
db.append_message("root", role="user", content="first prompt")
db.append_message("root", role="assistant", content="first answer")
db.create_session("child", "tui", parent_session_id="root")
db.append_message("child", role="user", content="second prompt")
db.append_message("child", role="assistant", content="second answer")
conv = db.get_messages_as_conversation("child", include_ancestors=True)
assert [m["content"] for m in conv] == [
"first prompt",
"first answer",
"second prompt",
"second answer",
]
def test_get_messages_as_conversation_avoids_repeated_resume_prompts_from_ancestors(self, db):
db.create_session("root", "tui")
db.append_message("root", role="user", content="same prompt")
db.append_message("root", role="user", content="same prompt")
db.append_message("root", role="assistant", content="answer")
db.create_session("child", "tui", parent_session_id="root")
db.append_message("child", role="user", content="next prompt")
conv = db.get_messages_as_conversation("child", include_ancestors=True)
assert [m["content"] for m in conv if m["role"] == "user"] == ["same prompt", "next prompt"]
def test_finish_reason_stored(self, db):
db.create_session(session_id="s1", source="cli")
db.append_message("s1", role="assistant", content="Done", finish_reason="stop")

View File

@ -59,6 +59,69 @@ def test_write_json_returns_false_on_broken_pipe(monkeypatch):
assert server.write_json({"ok": True}) is False
def test_history_to_messages_preserves_tool_calls_for_resume_display():
history = [
{"role": "user", "content": "first prompt"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_1",
"function": {
"name": "search_files",
"arguments": json.dumps({"pattern": "resume"}),
},
}
],
},
{"role": "tool", "content": "{}", "tool_call_id": "call_1"},
{"role": "assistant", "content": "first answer"},
{"role": "user", "content": "second prompt"},
]
assert server._history_to_messages(history) == [
{"role": "user", "text": "first prompt"},
{"context": "resume", "name": "search_files", "role": "tool"},
{"role": "assistant", "text": "first answer"},
{"role": "user", "text": "second prompt"},
]
def test_session_resume_uses_parent_lineage_for_display(monkeypatch):
captured = {}
class FakeDB:
def get_session(self, target):
return {"id": target}
def reopen_session(self, target):
captured["reopened"] = target
def get_messages_as_conversation(self, target, include_ancestors=False):
captured.setdefault("history_calls", []).append((target, include_ancestors))
return [
{"role": "user", "content": "root prompt"},
{"role": "assistant", "content": "root answer"},
] if include_ancestors else [{"role": "user", "content": "tip prompt"}]
monkeypatch.setattr(server, "_get_db", lambda: FakeDB())
monkeypatch.setattr(server, "_enable_gateway_prompts", lambda: None)
monkeypatch.setattr(server, "_set_session_context", lambda target: [])
monkeypatch.setattr(server, "_clear_session_context", lambda tokens: None)
monkeypatch.setattr(server, "_make_agent", lambda *args, **kwargs: types.SimpleNamespace(model="test"))
monkeypatch.setattr(server, "_session_info", lambda agent: {"model": "test", "tools": {}, "skills": {}})
monkeypatch.setattr(server, "_init_session", lambda sid, key, agent, history, cols=80: None)
resp = server.handle_request({"id": "1", "method": "session.resume", "params": {"session_id": "tip"}})
assert resp["result"]["messages"] == [
{"role": "user", "text": "root prompt"},
{"role": "assistant", "text": "root answer"},
]
assert captured["history_calls"] == [("tip", False), ("tip", True)]
def test_status_callback_emits_kind_and_text():
with patch("tui_gateway.server._emit") as emit:
cb = server._agent_cbs("sid")["status_callback"]
@ -347,6 +410,35 @@ def test_complete_slash_includes_provider_alias():
assert any(item["text"] == "provider" for item in resp["result"]["items"])
def test_complete_slash_includes_tui_details_command():
resp = server.handle_request(
{"id": "1", "method": "complete.slash", "params": {"text": "/det"}}
)
assert any(item["text"] == "/details" for item in resp["result"]["items"])
def test_complete_slash_details_args():
resp_root = server.handle_request(
{"id": "0", "method": "complete.slash", "params": {"text": "/details"}}
)
resp_section = server.handle_request(
{"id": "1", "method": "complete.slash", "params": {"text": "/details t"}}
)
resp_mode = server.handle_request(
{
"id": "2",
"method": "complete.slash",
"params": {"text": "/details thinking e"},
}
)
assert resp_root["result"]["replace_from"] == len("/details")
assert any(item["text"] == " thinking" for item in resp_root["result"]["items"])
assert any(item["text"] == "thinking" for item in resp_section["result"]["items"])
assert any(item["text"] == "expanded" for item in resp_mode["result"]["items"])
def test_config_set_reasoning_updates_live_session_and_agent(tmp_path, monkeypatch):
monkeypatch.setattr(server, "_hermes_home", tmp_path)
agent = types.SimpleNamespace(reasoning_config=None)

View File

@ -124,6 +124,7 @@ _stdout_lock = threading.Lock()
_cfg_lock = threading.Lock()
_cfg_cache: dict | None = None
_cfg_mtime: float | None = None
_cfg_path = None
_SLASH_WORKER_TIMEOUT_S = max(
5.0, float(os.environ.get("HERMES_TUI_SLASH_TIMEOUT_S", "45") or 45)
)
@ -443,14 +444,14 @@ def _normalize_completion_path(path_part: str) -> str:
def _load_cfg() -> dict:
global _cfg_cache, _cfg_mtime
global _cfg_cache, _cfg_mtime, _cfg_path
try:
import yaml
p = _hermes_home / "config.yaml"
mtime = p.stat().st_mtime if p.exists() else None
with _cfg_lock:
if _cfg_cache is not None and _cfg_mtime == mtime:
if _cfg_cache is not None and _cfg_mtime == mtime and _cfg_path == p:
return copy.deepcopy(_cfg_cache)
if p.exists():
with open(p) as f:
@ -460,6 +461,7 @@ def _load_cfg() -> dict:
with _cfg_lock:
_cfg_cache = copy.deepcopy(data)
_cfg_mtime = mtime
_cfg_path = p
return data
except Exception:
pass
@ -467,7 +469,7 @@ def _load_cfg() -> dict:
def _save_cfg(cfg: dict):
global _cfg_cache, _cfg_mtime
global _cfg_cache, _cfg_mtime, _cfg_path
import yaml
path = _hermes_home / "config.yaml"
@ -475,6 +477,7 @@ def _save_cfg(cfg: dict):
yaml.safe_dump(cfg, f)
with _cfg_lock:
_cfg_cache = copy.deepcopy(cfg)
_cfg_path = path
try:
_cfg_mtime = path.stat().st_mtime
except Exception:
@ -913,8 +916,16 @@ def _probe_config_health(cfg: dict) -> str:
def _session_info(agent) -> dict:
reasoning_config = getattr(agent, "reasoning_config", None)
reasoning_effort = ""
if isinstance(reasoning_config, dict) and reasoning_config.get("enabled") is not False:
reasoning_effort = str(reasoning_config.get("effort", "") or "")
service_tier = getattr(agent, "service_tier", None) or ""
info: dict = {
"model": getattr(agent, "model", ""),
"reasoning_effort": reasoning_effort,
"service_tier": service_tier,
"fast": service_tier == "priority",
"tools": {},
"skills": {},
"cwd": os.getcwd(),
@ -1013,7 +1024,7 @@ def _tool_summary(name: str, result: str, duration_s: float | None) -> str | Non
if n is not None:
text = f"Extracted {n} {'page' if n == 1 else 'pages'}"
return f"{text or 'Completed'}{suffix}" if (text or dur) else None
return f"{text}{suffix}" if text else None
def _on_tool_start(sid: str, tool_call_id: str, name: str, args: dict):
@ -1029,11 +1040,9 @@ def _on_tool_start(sid: str, tool_call_id: str, name: str, args: dict):
pass
session.setdefault("tool_started_at", {})[tool_call_id] = time.time()
if _tool_progress_enabled(sid):
_emit(
"tool.start",
sid,
{"tool_id": tool_call_id, "name": name, "context": _tool_ctx(name, args)},
)
# tool.complete is the source of truth for todos (full list from the
# tool result). args.todos here may be a partial merge update.
_emit("tool.start", sid, {"tool_id": tool_call_id, "name": name, "context": _tool_ctx(name, args)})
def _on_tool_complete(sid: str, tool_call_id: str, name: str, args: dict, result: str):
@ -1050,6 +1059,13 @@ def _on_tool_complete(sid: str, tool_call_id: str, name: str, args: dict, result
summary = _tool_summary(name, result, duration_s)
if summary:
payload["summary"] = summary
if name == "todo":
try:
data = json.loads(result)
if isinstance(data, dict) and isinstance(data.get("todos"), list):
payload["todos"] = data.get("todos")
except Exception:
pass
try:
from agent.display import render_edit_diff_with_delta
@ -1690,7 +1706,8 @@ def _(rid, params: dict) -> dict:
try:
db.reopen_session(target)
history = db.get_messages_as_conversation(target)
messages = _history_to_messages(history)
display_history = db.get_messages_as_conversation(target, include_ancestors=True)
messages = _history_to_messages(display_history)
tokens = _set_session_context(target)
try:
agent = _make_agent(sid, target, session_id=target)
@ -1738,11 +1755,20 @@ def _(rid, params: dict) -> dict:
@method("session.history")
def _(rid, params: dict) -> dict:
session, err = _sess(params, rid)
return err or _ok(
if err:
return err
history = list(session.get("history", []))
db = _get_db()
if db is not None and session.get("session_key"):
try:
history = db.get_messages_as_conversation(session["session_key"], include_ancestors=True)
except Exception:
pass
return _ok(
rid,
{
"count": len(session.get("history", [])),
"messages": _history_to_messages(list(session.get("history", []))),
"count": len(history),
"messages": _history_to_messages(history),
},
)
@ -3680,6 +3706,84 @@ def _(rid, params: dict) -> dict:
return _ok(rid, {"items": items})
def _details_completion_item(value: str, meta: str = "") -> dict:
return {"text": value, "display": value, "meta": meta}
def _details_root_completion_item(value: str, meta: str, needs_leading_space: bool) -> dict:
return _details_completion_item(
f" {value}" if needs_leading_space else value,
meta,
)
def _details_completions(text: str) -> list[dict] | None:
if not text.lower().startswith("/details"):
return None
stripped = text.strip()
if stripped and not "/details".startswith(stripped.lower().split()[0]):
return None
body = text[len("/details"):]
if body.startswith(" "):
body = body[1:]
parts = body.split()
has_trailing_space = text.endswith(" ")
sections = ("thinking", "tools", "subagents", "activity")
modes = ("hidden", "collapsed", "expanded")
if not body or (len(parts) == 0 and has_trailing_space):
return [
*[
_details_root_completion_item(mode, "global mode", not has_trailing_space)
for mode in modes
],
_details_root_completion_item("cycle", "cycle global mode", not has_trailing_space),
*[
_details_root_completion_item(section, "section override", not has_trailing_space)
for section in sections
],
]
if len(parts) == 1 and not has_trailing_space:
prefix = parts[0].lower()
candidates = [*modes, "cycle", *sections]
return [
_details_completion_item(
candidate,
(
"section override"
if candidate in sections
else "cycle global mode"
if candidate == "cycle"
else "global mode"
),
)
for candidate in candidates
if candidate.startswith(prefix) and candidate != prefix
]
if len(parts) == 1 and has_trailing_space and parts[0].lower() in sections:
return [
*[_details_completion_item(mode, f"set {parts[0].lower()}") for mode in modes],
_details_completion_item("reset", f"clear {parts[0].lower()} override"),
]
if len(parts) == 2 and not has_trailing_space and parts[0].lower() in sections:
prefix = parts[1].lower()
return [
_details_completion_item(
candidate,
f"clear {parts[0].lower()} override" if candidate == "reset" else f"set {parts[0].lower()}",
)
for candidate in (*modes, "reset")
if candidate.startswith(prefix) and candidate != prefix
]
return []
@method("complete.slash")
def _(rid, params: dict) -> dict:
text = params.get("text", "")
@ -3712,6 +3816,11 @@ def _(rid, params: dict) -> dict:
"display": "/compact",
"meta": "Toggle compact display mode",
},
{
"text": "/details",
"display": "/details",
"meta": "Control agent detail visibility",
},
{
"text": "/logs",
"display": "/logs",
@ -3723,6 +3832,17 @@ def _(rid, params: dict) -> dict:
item["text"] == extra["text"] for item in items
):
items.append(extra)
details_items = _details_completions(text)
if details_items is not None:
return _ok(
rid,
{
"items": details_items,
"replace_from": text.rfind(" ") + 1 if " " in text else len(text),
},
)
return _ok(
rid,
{"items": items, "replace_from": text.rfind(" ") + 1 if " " in text else 1},

View File

@ -0,0 +1,15 @@
module.exports = {
assumptions: {
setPublicClassFields: true
},
plugins: [
[
'babel-plugin-react-compiler',
{
target: '19',
sources: filename => Boolean(filename && !filename.includes('node_modules'))
}
]
],
babelrc: false
}

View File

@ -3,6 +3,7 @@ import typescriptEslint from '@typescript-eslint/eslint-plugin'
import typescriptParser from '@typescript-eslint/parser'
import perfectionist from 'eslint-plugin-perfectionist'
import reactPlugin from 'eslint-plugin-react'
import reactCompiler from 'eslint-plugin-react-compiler'
import hooksPlugin from 'eslint-plugin-react-hooks'
import unusedImports from 'eslint-plugin-unused-imports'
import globals from 'globals'
@ -43,6 +44,7 @@ export default [
'custom-rules': customRules,
perfectionist,
react: reactPlugin,
'react-compiler': reactCompiler,
'react-hooks': hooksPlugin,
'unused-imports': unusedImports
},
@ -53,6 +55,7 @@ export default [
'@typescript-eslint/no-unused-vars': 'off',
'no-undef': 'off',
'no-unused-vars': 'off',
'react-compiler/react-compiler': 'warn',
'padding-line-between-statements': [
1,
{ blankLine: 'always', next: ['block-like', 'block', 'return', 'if', 'class', 'continue', 'debugger', 'break', 'multiline-const', 'multiline-let'], prev: '*' },
@ -89,6 +92,7 @@ export default [
'no-constant-condition': 'off',
'no-empty': 'off',
'no-redeclare': 'off',
'react-compiler/react-compiler': 'off',
'react-hooks/exhaustive-deps': 'off'
}
},

601
ui-tui/package-lock.json generated
View File

@ -16,14 +16,19 @@
"unicode-animations": "^1.0.3"
},
"devDependencies": {
"@babel/cli": "^7.28.6",
"@babel/core": "^7.29.0",
"@babel/plugin-syntax-jsx": "^7.28.6",
"@eslint/js": "^9",
"@types/node": "^25.5.0",
"@types/react": "^19.2.14",
"@typescript-eslint/eslint-plugin": "^8",
"@typescript-eslint/parser": "^8",
"babel-plugin-react-compiler": "^1.0.0",
"eslint": "^9",
"eslint-plugin-perfectionist": "^5",
"eslint-plugin-react": "^7",
"eslint-plugin-react-compiler": "^19.1.0-rc.2",
"eslint-plugin-react-hooks": "^7",
"eslint-plugin-unused-imports": "^4",
"globals": "^16",
@ -58,6 +63,36 @@
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@babel/cli": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/cli/-/cli-7.28.6.tgz",
"integrity": "sha512-6EUNcuBbNkj08Oj4gAZ+BUU8yLCgKzgVX4gaTh09Ya2C8ICM4P+G30g4m3akRxSYAp3A/gnWchrNst7px4/nUQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.28",
"commander": "^6.2.0",
"convert-source-map": "^2.0.0",
"fs-readdir-recursive": "^1.1.0",
"glob": "^7.2.0",
"make-dir": "^2.1.0",
"slash": "^2.0.0"
},
"bin": {
"babel": "bin/babel.js",
"babel-external-helpers": "bin/babel-external-helpers.js"
},
"engines": {
"node": ">=6.9.0"
},
"optionalDependencies": {
"@nicolo-ribaudo/chokidar-2": "2.1.8-no-fsevents.3",
"chokidar": "^3.6.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/code-frame": {
"version": "7.29.0",
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz",
@ -141,6 +176,19 @@
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-annotate-as-pure": {
"version": "7.27.3",
"resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz",
"integrity": "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.27.3"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-compilation-targets": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz",
@ -168,6 +216,38 @@
"semver": "bin/semver.js"
}
},
"node_modules/@babel/helper-create-class-features-plugin": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.28.6.tgz",
"integrity": "sha512-dTOdvsjnG3xNT9Y0AUg1wAl38y+4Rl4sf9caSQZOXdNqVn+H+HbbJ4IyyHaIqNR6SW9oJpA/RuRjsjCw2IdIow==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.27.3",
"@babel/helper-member-expression-to-functions": "^7.28.5",
"@babel/helper-optimise-call-expression": "^7.27.1",
"@babel/helper-replace-supers": "^7.28.6",
"@babel/helper-skip-transparent-expression-wrappers": "^7.27.1",
"@babel/traverse": "^7.28.6",
"semver": "^6.3.1"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"dev": true,
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/@babel/helper-globals": {
"version": "7.28.0",
"resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
@ -178,6 +258,20 @@
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-member-expression-to-functions": {
"version": "7.28.5",
"resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.28.5.tgz",
"integrity": "sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/traverse": "^7.28.5",
"@babel/types": "^7.28.5"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-module-imports": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz",
@ -210,6 +304,61 @@
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-optimise-call-expression": {
"version": "7.27.1",
"resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz",
"integrity": "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.27.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-plugin-utils": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz",
"integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-replace-supers": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.28.6.tgz",
"integrity": "sha512-mq8e+laIk94/yFec3DxSjCRD2Z0TAjhVbEJY3UQrlwVo15Lmt7C2wAUbK4bjnTs4APkwsYLTahXRraQXhb1WCg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-member-expression-to-functions": "^7.28.5",
"@babel/helper-optimise-call-expression": "^7.27.1",
"@babel/traverse": "^7.28.6"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0"
}
},
"node_modules/@babel/helper-skip-transparent-expression-wrappers": {
"version": "7.27.1",
"resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz",
"integrity": "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/traverse": "^7.27.1",
"@babel/types": "^7.27.1"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/helper-string-parser": {
"version": "7.27.1",
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
@ -270,6 +419,40 @@
"node": ">=6.0.0"
}
},
"node_modules/@babel/plugin-proposal-private-methods": {
"version": "7.18.6",
"resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz",
"integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==",
"deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-methods instead.",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-create-class-features-plugin": "^7.18.6",
"@babel/helper-plugin-utils": "^7.18.6"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/plugin-syntax-jsx": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.28.6.tgz",
"integrity": "sha512-wgEmr06G6sIpqr8YDwA2dSRTE3bJ+V0IfpzfSY3Lfgd7YWOaAdlykvJi13ZKBt8cZHfgH1IXN+CL656W3uUa4w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-plugin-utils": "^7.28.6"
},
"engines": {
"node": ">=6.9.0"
},
"peerDependencies": {
"@babel/core": "^7.0.0-0"
}
},
"node_modules/@babel/template": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz",
@ -1156,6 +1339,14 @@
"@emnapi/runtime": "^1.7.1"
}
},
"node_modules/@nicolo-ribaudo/chokidar-2": {
"version": "2.1.8-no-fsevents.3",
"resolved": "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz",
"integrity": "sha512-s88O1aVtXftvp5bCPB7WnmXc5IwOZZ7YPuwNPt+GtOOXpPvad1LfbmjYv+qII7zP6RU2QGnqve27dnLycEnyEQ==",
"dev": true,
"license": "MIT",
"optional": true
},
"node_modules/@oxc-project/types": {
"version": "0.124.0",
"resolved": "https://registry.npmjs.org/@oxc-project/types/-/types-0.124.0.tgz",
@ -1952,6 +2143,35 @@
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/anymatch": {
"version": "3.1.3",
"resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
"integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
"dev": true,
"license": "ISC",
"optional": true,
"dependencies": {
"normalize-path": "^3.0.0",
"picomatch": "^2.0.4"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/anymatch/node_modules/picomatch": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
"dev": true,
"license": "MIT",
"optional": true,
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
@ -2145,6 +2365,16 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/babel-plugin-react-compiler": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/babel-plugin-react-compiler/-/babel-plugin-react-compiler-1.0.0.tgz",
"integrity": "sha512-Ixm8tFfoKKIPYdCCKYTsqv+Fd4IJ0DQqMyEimo+pxUOMUR9cVPlwTrFt9Avu+3cb6Zp3mAzl+t1MrG2fxxKsxw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.26.0"
}
},
"node_modules/balanced-match": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
@ -2177,6 +2407,20 @@
"require-from-string": "^2.0.2"
}
},
"node_modules/binary-extensions": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
"integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==",
"dev": true,
"license": "MIT",
"optional": true,
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/brace-expansion": {
"version": "5.0.5",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz",
@ -2190,6 +2434,20 @@
"node": "18 || 20 || >=22"
}
},
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browserslist": {
"version": "4.28.2",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz",
@ -2332,6 +2590,46 @@
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/chokidar": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz",
"integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
"anymatch": "~3.1.2",
"braces": "~3.0.2",
"glob-parent": "~5.1.2",
"is-binary-path": "~2.1.0",
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.6.0"
},
"engines": {
"node": ">= 8.10.0"
},
"funding": {
"url": "https://paulmillr.com/funding/"
},
"optionalDependencies": {
"fsevents": "~2.3.2"
}
},
"node_modules/chokidar/node_modules/glob-parent": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"license": "ISC",
"optional": true,
"dependencies": {
"is-glob": "^4.0.1"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/cli-boxes": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
@ -2407,6 +2705,16 @@
"dev": true,
"license": "MIT"
},
"node_modules/commander": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz",
"integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">= 6"
}
},
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
@ -2999,6 +3307,50 @@
"eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8 || ^9.7"
}
},
"node_modules/eslint-plugin-react-compiler": {
"version": "19.1.0-rc.2",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-compiler/-/eslint-plugin-react-compiler-19.1.0-rc.2.tgz",
"integrity": "sha512-oKalwDGcD+RX9mf3NEO4zOoUMeLvjSvcbbEOpquzmzqEEM2MQdp7/FY/Hx9NzmUwFzH1W9SKTz5fihfMldpEYw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/core": "^7.24.4",
"@babel/parser": "^7.24.4",
"@babel/plugin-proposal-private-methods": "^7.18.6",
"hermes-parser": "^0.25.1",
"zod": "^3.22.4",
"zod-validation-error": "^3.0.3"
},
"engines": {
"node": "^14.17.0 || ^16.0.0 || >= 18.0.0"
},
"peerDependencies": {
"eslint": ">=7"
}
},
"node_modules/eslint-plugin-react-compiler/node_modules/zod": {
"version": "3.25.76",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
"integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
"dev": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/eslint-plugin-react-compiler/node_modules/zod-validation-error": {
"version": "3.5.4",
"resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-3.5.4.tgz",
"integrity": "sha512-+hEiRIiPobgyuFlEojnqjJnhFvg4r/i3cqgcm67eehZf/WBaK3g6cD02YU9mtdVxZjv8CzCA9n/Rhrs3yAAvAw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18.0.0"
},
"peerDependencies": {
"zod": "^3.24.4"
}
},
"node_modules/eslint-plugin-react-hooks": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz",
@ -3309,6 +3661,20 @@
"node": ">=16.0.0"
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
@ -3363,6 +3729,20 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/fs-readdir-recursive": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz",
"integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==",
"dev": true,
"license": "MIT"
},
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true,
"license": "ISC"
},
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
@ -3521,6 +3901,28 @@
"url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
}
},
"node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me",
"dev": true,
"license": "ISC",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob-parent": {
"version": "6.0.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
@ -3534,6 +3936,37 @@
"node": ">=10.13.0"
}
},
"node_modules/glob/node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"dev": true,
"license": "MIT"
},
"node_modules/glob/node_modules/brace-expansion": {
"version": "1.1.14",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.14.tgz",
"integrity": "sha512-MWPGfDxnyzKU7rNOW9SP/c50vi3xrmrua/+6hfPbCS2ABNWfx24vPidzvC7krjU/RTo235sV776ymlsMtGKj8g==",
"dev": true,
"license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
}
},
"node_modules/glob/node_modules/minimatch": {
"version": "3.1.5",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz",
"integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==",
"dev": true,
"license": "ISC",
"dependencies": {
"brace-expansion": "^1.1.7"
},
"engines": {
"node": "*"
}
},
"node_modules/globals": {
"version": "16.5.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-16.5.0.tgz",
@ -3736,6 +4169,25 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/inflight": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
"dev": true,
"license": "ISC",
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
}
},
"node_modules/inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
"dev": true,
"license": "ISC"
},
"node_modules/ink": {
"version": "6.8.0",
"resolved": "https://registry.npmjs.org/ink/-/ink-6.8.0.tgz",
@ -3919,6 +4371,20 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-binary-path": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
"integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
"binary-extensions": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/is-boolean-object": {
"version": "1.2.2",
"resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz",
@ -4115,6 +4581,17 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"license": "MIT",
"optional": true,
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-number-object": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz",
@ -4745,6 +5222,30 @@
"@jridgewell/sourcemap-codec": "^1.5.5"
}
},
"node_modules/make-dir": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
"integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
"dev": true,
"license": "MIT",
"dependencies": {
"pify": "^4.0.1",
"semver": "^5.6.0"
},
"engines": {
"node": ">=6"
}
},
"node_modules/make-dir/node_modules/semver": {
"version": "5.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
"integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==",
"dev": true,
"license": "ISC",
"bin": {
"semver": "bin/semver"
}
},
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
@ -4875,6 +5376,17 @@
"dev": true,
"license": "MIT"
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
"integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
"dev": true,
"license": "MIT",
"optional": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
@ -4994,6 +5506,16 @@
],
"license": "MIT"
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dev": true,
"license": "ISC",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/onetime": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
@ -5109,6 +5631,16 @@
"node": ">=8"
}
},
"node_modules/path-is-absolute": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
@ -5153,6 +5685,16 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pify": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
"integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/possible-typed-array-names": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz",
@ -5271,6 +5813,34 @@
"react": "^19.2.0"
}
},
"node_modules/readdirp": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
"picomatch": "^2.2.1"
},
"engines": {
"node": ">=8.10.0"
}
},
"node_modules/readdirp/node_modules/picomatch": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
"dev": true,
"license": "MIT",
"optional": true,
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/reflect.getprototypeof": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz",
@ -5652,6 +6222,16 @@
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
"license": "ISC"
},
"node_modules/slash": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
"integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/slice-ansi": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-8.0.0.tgz",
@ -5990,6 +6570,20 @@
"node": ">=14.0.0"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/ts-api-utils": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.5.0.tgz",
@ -6607,6 +7201,13 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"dev": true,
"license": "ISC"
},
"node_modules/ws": {
"version": "8.20.0",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz",

View File

@ -6,7 +6,8 @@
"scripts": {
"dev": "npm run build --prefix packages/hermes-ink && tsx --watch src/entry.tsx",
"start": "tsx src/entry.tsx",
"build": "npm run build --prefix packages/hermes-ink && tsc -p tsconfig.build.json && chmod +x dist/entry.js",
"build": "npm run build --prefix packages/hermes-ink && tsc -p tsconfig.build.json && npm run build:compile && chmod +x dist/entry.js",
"build:compile": "babel dist --out-dir dist --config-file ./babel.compiler.config.cjs --extensions .js --keep-file-extension",
"type-check": "tsc --noEmit -p tsconfig.json",
"lint": "eslint src/ packages/",
"lint:fix": "eslint src/ packages/ --fix",
@ -24,14 +25,19 @@
"unicode-animations": "^1.0.3"
},
"devDependencies": {
"@babel/cli": "^7.28.6",
"@babel/core": "^7.29.0",
"@babel/plugin-syntax-jsx": "^7.28.6",
"@eslint/js": "^9",
"@types/node": "^25.5.0",
"@types/react": "^19.2.14",
"@typescript-eslint/eslint-plugin": "^8",
"@typescript-eslint/parser": "^8",
"babel-plugin-react-compiler": "^1.0.0",
"eslint": "^9",
"eslint-plugin-perfectionist": "^5",
"eslint-plugin-react": "^7",
"eslint-plugin-react-compiler": "^19.1.0-rc.2",
"eslint-plugin-react-hooks": "^7",
"eslint-plugin-unused-imports": "^4",
"globals": "^16",

View File

@ -4,6 +4,8 @@ export type { StderrHandle } from './src/hooks/use-stderr.ts'
export { default as useStdout } from './src/hooks/use-stdout.ts'
export type { StdoutHandle } from './src/hooks/use-stdout.ts'
export { Ansi } from './src/ink/Ansi.tsx'
export { evictInkCaches } from './src/ink/cache-eviction.ts'
export type { EvictLevel, InkCacheSizes } from './src/ink/cache-eviction.ts'
export { AlternateScreen } from './src/ink/components/AlternateScreen.tsx'
export { default as Box } from './src/ink/components/Box.tsx'
export type { Props as BoxProps } from './src/ink/components/Box.tsx'

View File

@ -1,6 +1,7 @@
export { default as useStderr } from './hooks/use-stderr.js'
export { default as useStdout } from './hooks/use-stdout.js'
export { Ansi } from './ink/Ansi.js'
export { evictInkCaches, type EvictLevel, type InkCacheSizes } from './ink/cache-eviction.js'
export { AlternateScreen } from './ink/components/AlternateScreen.js'
export { default as Box } from './ink/components/Box.js'
export { default as Link } from './ink/components/Link.js'
@ -21,6 +22,8 @@ export { useTerminalFocus } from './ink/hooks/use-terminal-focus.js'
export { useTerminalTitle } from './ink/hooks/use-terminal-title.js'
export { useTerminalViewport } from './ink/hooks/use-terminal-viewport.js'
export { default as measureElement } from './ink/measure-element.js'
export { scrollFastPathStats, type ScrollFastPathStats } from './ink/render-node-to-output.js'
export { createRoot, default as render, renderSync } from './ink/root.js'
export { stringWidth } from './ink/stringWidth.js'
export { isXtermJs } from './ink/terminal.js'
export { default as TextInput, UncontrolledTextInput } from 'ink-text-input'

View File

@ -0,0 +1,45 @@
// Unified cache eviction for the four hot Ink module-level caches:
// - widthCache (stringWidth.ts)
// - wrapCache (wrap-text.ts)
// - sliceCache (sliceAnsi.ts)
// - lineWidthCache (line-width-cache.ts)
//
// Used by the host (TUI) under memory pressure or on session swap to drop
// content-keyed entries that won't recur. All caches are content-keyed
// (not session-keyed), so cross-session sharing is normally beneficial —
// only evict when memory tightens or when the user explicitly resets.
import { evictSliceCache, sliceCacheSize } from '../utils/sliceAnsi.js'
import { evictLineWidthCache, lineWidthCacheSize } from './line-width-cache.js'
import { evictWidthCache, widthCacheSize } from './stringWidth.js'
import { evictWrapCache, wrapCacheSize } from './wrap-text.js'
export interface InkCacheSizes {
lineWidth: number
slice: number
width: number
wrap: number
}
function inkCacheSizes(): InkCacheSizes {
return {
lineWidth: lineWidthCacheSize(),
slice: sliceCacheSize(),
width: widthCacheSize(),
wrap: wrapCacheSize()
}
}
export type EvictLevel = 'all' | 'half'
export function evictInkCaches(level: EvictLevel = 'half'): InkCacheSizes {
const keep = level === 'half' ? 0.5 : 0
evictWidthCache(keep)
evictWrapCache(keep)
evictSliceCache(keep)
evictLineWidthCache(keep)
return inkCacheSizes()
}

View File

@ -29,7 +29,7 @@ import {
FOCUS_IN,
FOCUS_OUT
} from '../termio/csi.js'
import { DBP, DFE, DISABLE_MOUSE_TRACKING, EBP, EFE, HIDE_CURSOR, SHOW_CURSOR } from '../termio/dec.js'
import { DBP, DFE, DISABLE_MOUSE_TRACKING, EBP, EFE, SHOW_CURSOR } from '../termio/dec.js'
import AppContext from './AppContext.js'
import { ClockProvider } from './ClockContext.js'
@ -205,12 +205,6 @@ export default class App extends PureComponent<Props, State> {
</TerminalSizeContext.Provider>
)
}
override componentDidMount() {
// In accessibility mode, keep the native cursor visible for screen magnifiers and other tools
if (this.props.stdout.isTTY) {
this.props.stdout.write(HIDE_CURSOR)
}
}
override componentWillUnmount() {
if (this.props.stdout.isTTY) {
this.props.stdout.write(SHOW_CURSOR)
@ -470,7 +464,7 @@ export default class App extends PureComponent<Props, State> {
}
if (this.props.stdout.isTTY) {
this.props.stdout.write(HIDE_CURSOR + EFE)
this.props.stdout.write(EFE)
}
this.inputEmitter.emit('resume')
@ -569,18 +563,17 @@ function processKeysInBatch(app: App, items: ParsedInput[], _unused1: undefined,
/** Exported for testing. Mutates app.props.selection and click/hover state. */
export function handleMouseEvent(app: App, m: ParsedMouse): void {
// Allow disabling click handling while keeping wheel scroll (which goes
// through the keybinding system as 'wheelup'/'wheeldown', not here).
if (isMouseClicksDisabled()) {
return
}
const sel = app.props.selection
// Terminal coords are 1-indexed; screen buffer is 0-indexed
const col = m.col - 1
const row = m.row - 1
const baseButton = m.button & 0x03
// Disable app click handling without blocking wheel/right-click dispatch.
if (isMouseClicksDisabled() && baseButton === 0) {
return
}
if (m.action === 'press') {
if ((m.button & 0x20) !== 0 && baseButton === 3) {
if (app.mouseCaptureTarget) {

View File

@ -38,6 +38,7 @@ export type ScrollBoxHandle = {
* padding). Used for drag-to-scroll edge detection.
*/
getViewportTop: () => number
getLastManualScrollAt: () => number
/**
* True when scroll is pinned to the bottom. Set by scrollToBottom, the
* initial stickyScroll attribute, and by the renderer when positional
@ -94,6 +95,7 @@ function ScrollBox({ children, ref, stickyScroll, ...style }: PropsWithChildren<
// forces a React render: sticky is attribute-observed, no DOM-only path.
const [, forceRender] = useState(0)
const listenersRef = useRef(new Set<() => void>())
const manualScrollAtRef = useRef(0)
const renderQueuedRef = useRef(false)
const notify = () => {
@ -135,6 +137,7 @@ function ScrollBox({ children, ref, stickyScroll, ...style }: PropsWithChildren<
// Explicit false overrides the DOM attribute so manual scroll
// breaks stickiness. Render code checks ?? precedence.
el.stickyScroll = false
manualScrollAtRef.current = Date.now()
el.pendingScrollDelta = undefined
el.scrollAnchor = undefined
el.scrollTop = Math.max(0, Math.floor(y))
@ -148,6 +151,7 @@ function ScrollBox({ children, ref, stickyScroll, ...style }: PropsWithChildren<
}
box.stickyScroll = false
manualScrollAtRef.current = Date.now()
box.pendingScrollDelta = undefined
box.scrollAnchor = {
el,
@ -163,11 +167,8 @@ function ScrollBox({ children, ref, stickyScroll, ...style }: PropsWithChildren<
}
el.stickyScroll = false
// Wheel input cancels any in-flight anchor seek — user override.
manualScrollAtRef.current = Date.now()
el.scrollAnchor = undefined
// Accumulate in pendingScrollDelta; renderer drains it at a capped
// rate so fast flicks show intermediate frames. Pure accumulator:
// scroll-up followed by scroll-down naturally cancels.
el.pendingScrollDelta = (el.pendingScrollDelta ?? 0) + Math.floor(dy)
scrollMutated(el)
},
@ -207,6 +208,9 @@ function ScrollBox({ children, ref, stickyScroll, ...style }: PropsWithChildren<
getViewportTop() {
return domRef.current?.scrollViewportTop ?? 0
},
getLastManualScrollAt() {
return manualScrollAtRef.current
},
isSticky() {
const el = domRef.current

View File

@ -11,7 +11,25 @@ function parseOne(sequence: string) {
return keys[0]!
}
describe('InputEvent macOS command modifiers', () => {
describe('enhanced keyboard modifier parsing', () => {
it('detects modified Enter sequences for multiline composer shortcuts', () => {
const shiftEnter = new InputEvent(parseOne('\u001b[13;2u'))
const ctrlEnter = new InputEvent(parseOne('\u001b[13;5u'))
const modifyOtherShiftEnter = new InputEvent(parseOne('\u001b[27;2;13~'))
expect(shiftEnter.key.return).toBe(true)
expect(shiftEnter.key.shift).toBe(true)
expect(shiftEnter.input).toBe('')
expect(ctrlEnter.key.return).toBe(true)
expect(ctrlEnter.key.ctrl).toBe(true)
expect(ctrlEnter.input).toBe('')
expect(modifyOtherShiftEnter.key.return).toBe(true)
expect(modifyOtherShiftEnter.key.shift).toBe(true)
expect(modifyOtherShiftEnter.input).toBe('')
})
it('preserves Cmd as super for kitty keyboard CSI-u sequences', () => {
const parsed = parseOne('\u001b[99;9u')
const event = new InputEvent(parsed)

View File

@ -2,6 +2,9 @@ import { nonAlphanumericKeys, type ParsedKey } from '../parse-keypress.js'
import { Event } from './event.js'
const inputForSpecialSequence = (name: string): string =>
name === 'space' ? ' ' : name === 'return' || name === 'escape' ? '' : name
export type Key = {
upArrow: boolean
downArrow: boolean
@ -116,11 +119,7 @@ function parseKey(keypress: ParsedKey): [Key, string] {
// so the raw "[57358u" doesn't leak into the prompt. See #38781.
input = ''
} else {
// 'space' → ' '; 'escape' → '' (key.escape carries it;
// processedAsSpecialSequence bypasses the nonAlphanumericKeys
// clear below, so we must handle it explicitly here);
// otherwise use key name.
input = keypress.name === 'space' ? ' ' : keypress.name === 'escape' ? '' : keypress.name
input = inputForSpecialSequence(keypress.name)
}
processedAsSpecialSequence = true
@ -138,7 +137,7 @@ function parseKey(keypress: ParsedKey): [Key, string] {
// guards against future terminal behavior.
input = ''
} else {
input = keypress.name === 'space' ? ' ' : keypress.name === 'escape' ? '' : keypress.name
input = inputForSpecialSequence(keypress.name)
}
processedAsSpecialSequence = true

View File

@ -46,6 +46,14 @@ export type FrameEvent = {
write: number
/** Pre-optimize patch count (proxy for how much changed this frame) */
patches: number
/** Post-optimize patch count. */
optimizedPatches: number
/** Bytes written to stdout this frame. */
writeBytes: number
/** Whether stdout.write returned false. */
backpressure: boolean
/** Previous stdout.write callback latency; 0 if drained before next frame. */
prevFrameDrainMs: number
/** yoga calculateLayout() time (runs in resetAfterCommit, before onRender) */
yoga: number
/** React reconcile time: scrollMutated → resetAfterCommit. 0 if no commit. */

View File

@ -19,6 +19,7 @@ import App from './components/App.js'
import type { CursorDeclaration, CursorDeclarationSetter } from './components/CursorDeclarationContext.js'
import { FRAME_INTERVAL_MS } from './constants.js'
import * as dom from './dom.js'
import { markDirty } from './dom.js'
import { KeyboardEvent } from './events/keyboard-event.js'
import { FocusManager } from './focus.js'
import { emptyFrame, type Frame, type FrameEvent } from './frame.js'
@ -61,6 +62,7 @@ import {
getSelectedText,
hasSelection,
moveFocus,
selectionBounds,
type SelectionState,
selectLineAt,
selectWordAt,
@ -163,6 +165,15 @@ export default class Ink {
private backFrame: Frame
private lastPoolResetTime = performance.now()
private drainTimer: ReturnType<typeof setTimeout> | null = null
// Write-drain telemetry: pendingWriteStart is the performance.now() of
// the most recent stdout.write waiting for its drain callback. Set to
// null when the callback fires (drained). Read on the NEXT frame and
// reported as prevFrameDrainMs so the FrameEvent records how long the
// previous write took to actually hit the terminal — distinguishes
// "queued in Node" (write returned true) from "terminal accepted bytes"
// (callback fired).
private pendingWriteStart: number | null = null
private lastDrainMs = 0
private lastYogaCounters: {
ms: number
visited: number
@ -251,6 +262,9 @@ export default class Ink {
// into one follow-up microtask instead of stacking renders.
private isRendering = false
private immediateRerenderRequested = false
private selectionDragCell: { col: number; row: number } | null = null
private selectionAutoScrollTimer: ReturnType<typeof setInterval> | null = null
private selectionAutoScrollDir: -1 | 0 | 1 = 0
constructor(private readonly options: Options) {
autoBind(this)
@ -965,7 +979,42 @@ export default class Ink {
}
const tWrite = performance.now()
writeDiffToTerminal(this.terminal, optimized, this.altScreenActive && !SYNC_OUTPUT_SUPPORTED)
// Capture any stale pending write BEFORE starting this frame's write —
// if the callback already fired, pendingWriteStart is null and lastDrainMs
// already reflects the previous frame's drain. If it hasn't fired, we
// report "still pending" via a non-zero duration based on now-then so
// backpressure shows up even if Node never flushes this session.
const staleDrain = this.pendingWriteStart !== null ? performance.now() - this.pendingWriteStart : this.lastDrainMs
const prevFrameDrainMs = Math.round(staleDrain * 100) / 100
this.lastDrainMs = 0
// Only track drain on TTY. Piped/non-TTY stdout bypasses flow control.
const trackDrain = this.options.stdout.isTTY && hasDiff
const drainStart = trackDrain ? tWrite : 0
if (trackDrain) {
this.pendingWriteStart = drainStart
}
const { bytes: writeBytes, backpressure } = writeDiffToTerminal(
this.terminal,
optimized,
this.altScreenActive && !SYNC_OUTPUT_SUPPORTED,
trackDrain
? () => {
// Callback fires once Node has flushed the chunk to the OS.
// Capture the drain time and clear pending so the NEXT frame's
// staleDrain = the real end-to-end flush time.
if (this.pendingWriteStart === drainStart) {
this.lastDrainMs = performance.now() - drainStart
this.pendingWriteStart = null
}
}
: undefined
)
const writeMs = performance.now() - tWrite
// Update blit safety for the NEXT frame. The frame just rendered
@ -1003,6 +1052,10 @@ export default class Ink {
optimize: optimizeMs,
write: writeMs,
patches: diff.length,
optimizedPatches: optimized.length,
writeBytes,
backpressure,
prevFrameDrainMs,
yoga: yogaMs,
commit: commitMs,
yogaVisited: yc.visited,
@ -1323,7 +1376,9 @@ export default class Ink {
}
if (process.env.HERMES_TUI_DEBUG_CLIPBOARD) {
console.error('[clipboard] no path reached the clipboard (headless + no tmux?) — set HERMES_TUI_FORCE_OSC52=1 to force the escape sequence')
console.error(
'[clipboard] no path reached the clipboard (headless + no tmux?) — set HERMES_TUI_FORCE_OSC52=1 to force the escape sequence'
)
}
} catch (err) {
if (process.env.HERMES_TUI_DEBUG_CLIPBOARD) {
@ -1650,6 +1705,8 @@ export default class Ink {
return undefined
}
this.stopSelectionAutoScroll()
return dispatchMouse(
this.rootNode,
col,
@ -1664,6 +1721,7 @@ export default class Ink {
return
}
this.stopSelectionAutoScroll()
dispatchMouse(this.rootNode, col, row, 'onMouseUp', button, isEmptyCellAt(this.frontFrame.screen, col, row), target)
}
dispatchMouseDrag(target: dom.DOMElement, col: number, row: number, button: number): void {
@ -1789,6 +1847,18 @@ export default class Ink {
return
}
if (this.selectionDragCell?.col === col && this.selectionDragCell.row === row) {
this.updateSelectionAutoScroll(row)
return
}
this.selectionDragCell = { col, row }
this.applySelectionDrag(col, row)
this.updateSelectionAutoScroll(row)
}
private applySelectionDrag(col: number, row: number): void {
const sel = this.selection
if (sel.anchorSpan) {
@ -1800,6 +1870,118 @@ export default class Ink {
this.notifySelectionChange()
}
private updateSelectionAutoScroll(row: number): void {
if (!this.selection.isDragging || !this.altScreenActive) {
this.stopSelectionAutoScroll()
return
}
const dir: -1 | 0 | 1 = row <= 0 ? -1 : row >= this.terminalRows - 1 ? 1 : 0
if (dir === 0) {
this.stopSelectionAutoScroll()
return
}
if (this.selectionAutoScrollDir === dir && this.selectionAutoScrollTimer) {
return
}
this.stopSelectionAutoScroll()
this.selectionAutoScrollDir = dir
this.selectionAutoScrollTimer = setInterval(() => this.stepSelectionAutoScroll(), 50)
}
private stepSelectionAutoScroll(): void {
if (!this.selection.isDragging || !this.altScreenActive || this.selectionAutoScrollDir === 0) {
this.stopSelectionAutoScroll()
return
}
const box = this.findPrimaryScrollBox()
if (!box) {
this.stopSelectionAutoScroll()
return
}
const viewport = Math.max(0, box.scrollViewportHeight ?? 0)
const max = Math.max(0, (box.scrollHeight ?? 0) - viewport)
const current = box.scrollTop ?? 0
const next = Math.max(0, Math.min(max, current + this.selectionAutoScrollDir))
if (next === current) {
return
}
const top = box.scrollViewportTop ?? 0
const bottom = top + viewport - 1
const before = selectionBounds(this.selection)
if (before) {
if (this.selectionAutoScrollDir > 0) {
captureScrolledRows(this.selection, this.frontFrame.screen, top, top, 'above')
} else {
captureScrolledRows(this.selection, this.frontFrame.screen, bottom, bottom, 'below')
}
}
box.stickyScroll = false
box.pendingScrollDelta = undefined
box.scrollAnchor = undefined
box.scrollTop = next
markDirty(box)
shiftAnchor(this.selection, -this.selectionAutoScrollDir, top, bottom)
if (this.selectionDragCell) {
this.selectionDragCell = {
col: this.selectionDragCell.col,
row: this.selectionAutoScrollDir > 0 ? bottom : top
}
}
this.applySelectionDrag(
this.selectionDragCell?.col ?? 0,
this.selectionDragCell?.row ?? (this.selectionAutoScrollDir > 0 ? bottom : top)
)
}
private stopSelectionAutoScroll(): void {
if (this.selectionAutoScrollTimer) {
clearInterval(this.selectionAutoScrollTimer)
this.selectionAutoScrollTimer = null
}
this.selectionAutoScrollDir = 0
this.selectionDragCell = null
}
private findPrimaryScrollBox(): dom.DOMElement | undefined {
const stack = [this.rootNode]
while (stack.length) {
const node = stack.shift()!
if (
node.style.overflowY === 'scroll' &&
node.scrollHeight !== undefined &&
node.scrollViewportHeight !== undefined
) {
return node
}
for (const child of node.childNodes) {
if (child.nodeName !== '#text') {
stack.push(child)
}
}
}
}
// Methods to properly suspend stdin for external editor usage
// This is needed to prevent Ink from swallowing keystrokes when an external editor is active
private stdinListeners: Array<{

View File

@ -1,3 +1,4 @@
import { lruEvict } from './lru.js'
import { stringWidth } from './stringWidth.js'
// During streaming, text grows but completed lines are immutable.
@ -11,18 +12,27 @@ export function lineWidth(line: string): number {
const cached = cache.get(line)
if (cached !== undefined) {
cache.delete(line)
cache.set(line, cached)
return cached
}
const width = stringWidth(line)
// Evict when cache grows too large (e.g. after many different responses).
// Simple full-clear is fine — the cache repopulates in one frame.
if (cache.size >= MAX_CACHE_SIZE) {
cache.clear()
cache.delete(cache.keys().next().value!)
}
cache.set(line, width)
return width
}
export function lineWidthCacheSize(): number {
return cache.size
}
export function evictLineWidthCache(keepRatio = 0): void {
lruEvict(cache, keepRatio)
}

View File

@ -0,0 +1,14 @@
// Shared eviction for the hot Ink LRU caches (widthCache, wrapCache,
// sliceCache, lineWidthCache). Hot-path touch-on-read stays inlined per
// cache — only the bulk eviction is factored here.
export function lruEvict<K, V>(cache: Map<K, V>, keepRatio: number): void {
if (keepRatio <= 0) {
return cache.clear()
}
const target = Math.floor(cache.size * keepRatio)
while (cache.size > target) {
cache.delete(cache.keys().next().value!)
}
}

View File

@ -467,9 +467,21 @@ export default class Output {
if (clipHorizontally) {
lines = lines.map(line => {
const from = x < clip.x1! ? clip.x1! - x : 0
const width = stringWidth(line)
const to = x + width > clip.x2! ? clip.x2! - x : width
const startsBefore = x < clip.x1!
const endsAfter = x + width > clip.x2!
// Fast path: line fits entirely within the clip box — skip
// tokenize/slice. Common case for transcript text where
// containers are wider than rendered content. CPU profile
// (Apr 2026): sliceAnsi at 18% total during scroll, mostly
// no-op (line, 0, width) slices.
if (!startsBefore && !endsAfter) {
return line
}
const from = startsBefore ? clip.x1! - x : 0
const to = endsAfter ? clip.x2! - x : width
let sliced = sliceAnsi(line, from, to)
// Wide chars (CJK, emoji) occupy 2 cells. When `to` lands

View File

@ -67,6 +67,37 @@ export function resetScrollHint(): void {
absoluteRectsCur = []
}
// Fast-path diagnostics. Bumped from the ScrollBox fast-path branch
// whenever a scroll hint was captured. Reveals why a fast path was
// declined (heightDelta mismatch, no prevScreen, etc.) so we can chase
// the last mile of PageUp/wheel latency. Zero cost when no reader —
// it's all integer bumps. Exposed as a counter object so external
// probes can snapshot + diff.
export type ScrollFastPathStats = {
captured: number
taken: number
declined: {
noPrevScreen: number
heightDeltaMismatch: number
other: number
}
lastDeclineReason?: string
lastHeightDelta?: number
lastHintDelta?: number
lastScrollHeight?: number
lastPrevHeight?: number
}
export const scrollFastPathStats: ScrollFastPathStats = {
captured: 0,
taken: 0,
declined: {
noPrevScreen: 0,
heightDeltaMismatch: 0,
other: 0
}
}
export function getScrollHint(): ScrollHint | null {
return scrollHint
}
@ -927,6 +958,27 @@ function renderNodeToOutput(
const safeForFastPath = !hint || heightDelta === 0 || (hint.delta > 0 && heightDelta === hint.delta)
// Diagnostics (opt-in via scrollFastPathStats reader). Only
// counts when a hint was captured — cases where nothing scrolled
// (hint === null) are not declines, just idle frames.
if (hint) {
scrollFastPathStats.captured++
scrollFastPathStats.lastHintDelta = hint.delta
scrollFastPathStats.lastScrollHeight = scrollHeight
scrollFastPathStats.lastPrevHeight = prevHeight
scrollFastPathStats.lastHeightDelta = heightDelta
if (!safeForFastPath) {
scrollFastPathStats.declined.heightDeltaMismatch++
scrollFastPathStats.lastDeclineReason = `heightDelta=${heightDelta} hintDelta=${hint.delta}`
} else if (!prevScreen) {
scrollFastPathStats.declined.noPrevScreen++
scrollFastPathStats.lastDeclineReason = 'noPrevScreen'
} else {
scrollFastPathStats.taken++
}
}
// scrollHint is set above when hint is captured. If safeForFastPath
// is false the full path renders a next.screen that doesn't match
// the DECSTBM shift — emitting DECSTBM leaves stale rows (seen as

View File

@ -4,6 +4,8 @@ import stripAnsi from 'strip-ansi'
import { getGraphemeSegmenter } from '../utils/intl.js'
import { lruEvict } from './lru.js'
const EMOJI_REGEX = emojiRegex()
/**
@ -270,6 +272,70 @@ const bunStringWidth = typeof Bun !== 'undefined' && typeof Bun.stringWidth ===
const BUN_STRING_WIDTH_OPTS = { ambiguousIsNarrow: true } as const
export const stringWidth: (str: string) => number = bunStringWidth
const rawStringWidth: (str: string) => number = bunStringWidth
? str => bunStringWidth(str, BUN_STRING_WIDTH_OPTS)
: stringWidthJavaScript
// Memoize stringWidth — it's pure, hot (~100k calls/frame per the comment
// above), and the underlying impl scans every grapheme + tests EMOJI_REGEX.
// CPU profile (Apr 2026) showed stringWidth dominating at 21% of total
// runtime during scroll. Cache is global (vs per-frame) since the same
// strings recur across frames in a stable transcript.
//
// Pure-ASCII short-strings (the >90% common case) skip the cache: the inline
// loop in stringWidthJavaScript is already faster than a Map.get for them.
const widthCache = new Map<string, number>()
const WIDTH_CACHE_LIMIT = 8192
export const stringWidth: (str: string) => number = str => {
if (!str) {
return 0
}
// ASCII fast-path detection — for short ASCII, skip the cache.
if (str.length <= 64) {
let asciiOnly = true
for (let i = 0; i < str.length; i++) {
const code = str.charCodeAt(i)
if (code >= 127 || code === 0x1b) {
asciiOnly = false
break
}
}
if (asciiOnly) {
return rawStringWidth(str)
}
}
const cached = widthCache.get(str)
if (cached !== undefined) {
// True LRU: refresh recency by re-inserting (Map iteration is insertion order).
widthCache.delete(str)
widthCache.set(str, cached)
return cached
}
const w = rawStringWidth(str)
if (widthCache.size >= WIDTH_CACHE_LIMIT) {
widthCache.delete(widthCache.keys().next().value!)
}
widthCache.set(str, w)
return w
}
export function widthCacheSize(): number {
return widthCache.size
}
export function evictWidthCache(keepRatio = 0): void {
lruEvict(widthCache, keepRatio)
}

View File

@ -176,7 +176,7 @@ export function isXtermJs(): boolean {
// in xterm.js-based terminals like VS Code). tmux is allowlisted because it
// accepts modifyOtherKeys and doesn't forward the kitty sequence to the outer
// terminal.
const EXTENDED_KEYS_TERMINALS = ['iTerm.app', 'kitty', 'WezTerm', 'ghostty', 'tmux', 'windows-terminal']
const EXTENDED_KEYS_TERMINALS = ['iTerm.app', 'kitty', 'WezTerm', 'ghostty', 'tmux', 'windows-terminal', 'vscode']
/** True if this terminal correctly handles extended key reporting
* (Kitty keyboard protocol + xterm modifyOtherKeys). */
@ -203,10 +203,15 @@ export type Terminal = {
stderr: Writable
}
export function writeDiffToTerminal(terminal: Terminal, diff: Diff, skipSyncMarkers = false): void {
export function writeDiffToTerminal(
terminal: Terminal,
diff: Diff,
skipSyncMarkers = false,
onDrain?: () => void
): { bytes: number; backpressure: boolean } {
// No output if there are no patches
if (diff.length === 0) {
return
return { bytes: 0, backpressure: false }
}
// BSU/ESU wrapping is opt-out to keep main-screen behavior unchanged.
@ -278,5 +283,13 @@ export function writeDiffToTerminal(terminal: Terminal, diff: Diff, skipSyncMark
buffer += ESU
}
terminal.stdout.write(buffer)
// Node's Writable.write returns false when the internal buffer is full
// (backpressure). On a slow terminal parser that's the tell: we're
// producing bytes faster than the outer terminal can consume them.
// The 2-arg form attaches a drain callback that fires once the chunk
// is actually flushed to the OS socket/pipe — giving us end-to-end
// drain timing, not just "queued in Node".
const wrote = onDrain ? terminal.stdout.write(buffer, () => onDrain()) : terminal.stdout.write(buffer)
return { bytes: Buffer.byteLength(buffer, 'utf8'), backpressure: !wrote }
}

View File

@ -87,7 +87,8 @@ export function shouldEmitClipboardSequence(env: NodeJS.ProcessEnv = process.env
const override = (
env.HERMES_TUI_FORCE_OSC52 ??
env.HERMES_TUI_CLIPBOARD_OSC52 ??
env.HERMES_TUI_COPY_OSC52 ?? ''
env.HERMES_TUI_COPY_OSC52 ??
''
).trim()
if (ENV_ON_RE.test(override)) {
@ -196,16 +197,13 @@ export async function setClipboard(text: string): Promise<ClipboardResult> {
// forever but SSH_CONNECTION is in tmux's default update-environment and
// clears on local attach. Fire-and-forget, but `copyNativeAttempted`
// tells us whether ANY native path will be tried on this platform.
const nativeAttempted =
!process.env['SSH_CONNECTION'] && copyNative(text)
const nativeAttempted = !process.env['SSH_CONNECTION'] && copyNative(text)
const tmuxBufferLoaded = await tmuxLoadBuffer(text)
// Inner OSC uses BEL directly (not osc()) — ST's ESC would need doubling
// too, and BEL works everywhere for OSC 52.
const sequence = tmuxBufferLoaded
? (emitSequence ? tmuxPassthrough(`${ESC}]52;c;${b64}${BEL}`) : '')
: (emitSequence ? raw : '')
const sequence = emitSequence ? (tmuxBufferLoaded ? tmuxPassthrough(`${ESC}]52;c;${b64}${BEL}`) : raw) : ''
// Success if any path was taken. Native and tmux are fire-and-forget,
// so we can't truly confirm the clipboard was written — but if native

View File

@ -1,11 +1,46 @@
import sliceAnsi from '../utils/sliceAnsi.js'
import { lruEvict } from './lru.js'
import { stringWidth } from './stringWidth.js'
import type { Styles } from './styles.js'
import { wrapAnsi } from './wrapAnsi.js'
const ELLIPSIS = '…'
// CPU profile (Apr 2026) showed `wrap-ansi` → `string-width` consuming 30% of
// total runtime during fast scroll: every layout pass re-wraps every visible
// line via wrap-ansi, which calls string-width once per grapheme. The output
// is pure of (text, maxWidth, wrapType), so memoize it. LRU-bounded so long
// sessions don't accrete unbounded cache.
const WRAP_CACHE_LIMIT = 4096
const wrapCache = new Map<string, string>()
function memoizedWrap(text: string, maxWidth: number, wrapType: Styles['textWrap']): string {
// Key folds maxWidth + wrapType into the prefix so the same text re-wrapped
// at a different width doesn't collide. Width prefix bounded by viewport
// (~10 distinct widths in a session); wrapType bounded by enum (~6 values).
const key = `${maxWidth}|${wrapType}|${text}`
const cached = wrapCache.get(key)
if (cached !== undefined) {
// LRU touch
wrapCache.delete(key)
wrapCache.set(key, cached)
return cached
}
const result = computeWrap(text, maxWidth, wrapType)
if (wrapCache.size >= WRAP_CACHE_LIMIT) {
wrapCache.delete(wrapCache.keys().next().value!)
}
wrapCache.set(key, result)
return result
}
// sliceAnsi may include a boundary-spanning wide char (e.g. CJK at position
// end-1 with width 2 overshoots by 1). Retry with a tighter bound once.
function sliceFit(text: string, start: number, end: number): string {
@ -42,12 +77,9 @@ function truncate(text: string, columns: number, position: 'start' | 'middle' |
return sliceFit(text, 0, columns - 1) + ELLIPSIS
}
export default function wrapText(text: string, maxWidth: number, wrapType: Styles['textWrap']): string {
function computeWrap(text: string, maxWidth: number, wrapType: Styles['textWrap']): string {
if (wrapType === 'wrap') {
return wrapAnsi(text, maxWidth, {
trim: false,
hard: true
})
return wrapAnsi(text, maxWidth, { trim: false, hard: true })
}
if (wrapType === 'wrap-char') {
@ -55,25 +87,32 @@ export default function wrapText(text: string, maxWidth: number, wrapType: Style
}
if (wrapType === 'wrap-trim') {
return wrapAnsi(text, maxWidth, {
trim: true,
hard: true
})
return wrapAnsi(text, maxWidth, { trim: true, hard: true })
}
if (wrapType!.startsWith('truncate')) {
let position: 'end' | 'middle' | 'start' = 'end'
if (wrapType === 'truncate-middle') {
position = 'middle'
}
if (wrapType === 'truncate-start') {
position = 'start'
}
const position: 'end' | 'middle' | 'start' =
wrapType === 'truncate-middle' ? 'middle' : wrapType === 'truncate-start' ? 'start' : 'end'
return truncate(text, maxWidth, position)
}
return text
}
export default function wrapText(text: string, maxWidth: number, wrapType: Styles['textWrap']): string {
// Skip cache for trivial inputs (faster than Map lookup).
if (!text || maxWidth <= 0) {
return computeWrap(text, maxWidth, wrapType)
}
return memoizedWrap(text, maxWidth, wrapType)
}
export function wrapCacheSize(): number {
return wrapCache.size
}
export function evictWrapCache(keepRatio = 0): void {
lruEvict(wrapCache, keepRatio)
}

View File

@ -1,5 +1,6 @@
import { type AnsiCode, ansiCodesToString, reduceAnsiCodes, tokenize, undoAnsiCodes } from '@alcalzone/ansi-tokenize'
import { lruEvict } from '../ink/lru.js'
import { stringWidth } from '../ink/stringWidth.js'
function isEndCode(code: AnsiCode): boolean {
@ -10,7 +11,54 @@ function filterStartCodes(codes: AnsiCode[]): AnsiCode[] {
return codes.filter(c => !isEndCode(c))
}
// LRU cache: same (string, start, end) → same output. Output.get() re-emits
// identical writes every frame for stable transcript content; this avoids
// re-tokenizing them. CPU profile (Apr 2026) showed sliceAnsi at 18% total
// time during scroll. Bounded at 4096 entries — entries are short clipped
// lines so memory cost is small.
const sliceCache = new Map<string, string>()
const SLICE_CACHE_LIMIT = 4096
export default function sliceAnsi(str: string, start: number, end?: number): string {
if (!str) {
return ''
}
// Hot-path: only cache when end is defined (the Output.get() use-case).
if (end !== undefined) {
const key = `${start}|${end}|${str}`
const cached = sliceCache.get(key)
if (cached !== undefined) {
sliceCache.delete(key)
sliceCache.set(key, cached)
return cached
}
const result = computeSlice(str, start, end)
if (sliceCache.size >= SLICE_CACHE_LIMIT) {
sliceCache.delete(sliceCache.keys().next().value!)
}
sliceCache.set(key, result)
return result
}
return computeSlice(str, start, end)
}
export function sliceCacheSize(): number {
return sliceCache.size
}
export function evictSliceCache(keepRatio = 0): void {
lruEvict(sliceCache, keepRatio)
}
function computeSlice(str: string, start: number, end?: number): string {
const tokens = tokenize(str)
let activeCodes: AnsiCode[] = []
let position = 0

View File

@ -0,0 +1,121 @@
#!/usr/bin/env node
/* global Buffer, console, process, setImmediate */
import inspector from 'node:inspector'
import { performance } from 'node:perf_hooks'
import React from 'react'
import { render } from '@hermes/ink'
import { AppLayout } from '../src/components/appLayout.tsx'
import { resetOverlayState } from '../src/app/overlayStore.ts'
import { resetTurnState } from '../src/app/turnStore.ts'
import { resetUiState } from '../src/app/uiStore.ts'
const session = new inspector.Session()
session.connect()
const post = (method, params = {}) => new Promise((resolve, reject) => {
session.post(method, params, (err, result) => err ? reject(err) : resolve(result))
})
const historySize = Number(process.env.HISTORY || 500)
const mountedRows = Number(process.env.MOUNTED || 120)
class Sink {
columns = Number(process.env.COLS || 120)
rows = Number(process.env.ROWS || 42)
isTTY = true
bytes = 0
writes = 0
listeners = new Map()
write(chunk) {
this.bytes += Buffer.byteLength(String(chunk ?? ''))
this.writes++
return true
}
on(event, fn) { this.listeners.set(event, fn); return this }
off(event) { this.listeners.delete(event); return this }
once(event, fn) { this.listeners.set(event, fn); return this }
removeListener(event) { this.listeners.delete(event); return this }
}
const theme = {
brand: { prompt: '' },
color: {
amber: '#d19a66', bronze: '#8b6f47', dim: '#6b7280', error: '#ff5555', gold: '#ffd166', label: '#61afef',
ok: '#98c379', warn: '#e5c07b', cornsilk: '#fff8dc', prompt: '#c678dd', shellDollar: '#98c379',
statusCritical: '#ff5555', statusBad: '#e06c75', statusWarn: '#e5c07b', statusGood: '#98c379',
selectionBg: '#44475a'
}
}
const noop = () => {}
const historyItems = [
{ kind: 'intro', role: 'system', text: '', info: { model: 'test', tools: {}, skills: {}, version: 'test' } },
...Array.from({ length: historySize }, (_, i) => ({
role: i % 5 === 0 ? 'user' : 'assistant',
text: `message ${i}\n${'lorem ipsum '.repeat(80)}`
}))
]
const scrollRef = { current: {
getScrollTop: () => 0,
getPendingDelta: () => 0,
getScrollHeight: () => historySize * 4,
getViewportHeight: () => 30,
getViewportTop: () => 0,
isSticky: () => true,
subscribe: () => () => {},
scrollBy: noop,
scrollTo: noop,
scrollToBottom: noop,
setClampBounds: noop,
getLastManualScrollAt: () => 0
} }
const baseProps = streamingText => ({
actions: { answerApproval: noop, answerClarify: noop, answerSecret: noop, answerSudo: noop, onModelSelect: noop, resumeById: noop, setStickyPrompt: noop },
composer: { cols: 120, compIdx: 0, completions: [], empty: false, handleTextPaste: () => null, input: '', inputBuf: [], pagerPageSize: 10, queueEditIdx: null, queuedDisplay: [], submit: noop, updateInput: noop },
mouseTracking: false,
progress: {
activity: [], outcome: '', reasoning: streamingText, reasoningActive: true, reasoningStreaming: true,
reasoningTokens: Math.ceil(streamingText.length / 4), showProgressArea: true, showStreamingArea: true,
streamPendingTools: [], streamSegments: [], streaming: streamingText, subagents: [], toolTokens: 0, tools: [], turnTrail: [], todos: []
},
status: { cwdLabel: '~/repo', goodVibesTick: 0, sessionStartedAt: Date.now(), showStickyPrompt: false, statusColor: theme.color.ok, stickyPrompt: '', turnStartedAt: Date.now(), voiceLabel: 'voice off' },
transcript: {
historyItems,
scrollRef,
virtualHistory: { bottomSpacer: 0, end: historyItems.length, measureRef: () => noop, offsets: historyItems.map((_, i) => i * 4), start: Math.max(0, historyItems.length - mountedRows), topSpacer: 0 },
virtualRows: historyItems.map((msg, index) => ({ index, key: `m${index}`, msg }))
}
})
async function main() {
resetUiState()
resetTurnState()
resetOverlayState()
const stdout = new Sink()
const stdin = { isTTY: true, setRawMode: noop, on: noop, off: noop, resume: noop, pause: noop }
const text = Array.from({ length: Number(process.env.LINES || 1200) }, (_, i) => `stream line ${i} ${'x'.repeat(90)}`).join('\n')
const inst = render(React.createElement(AppLayout, baseProps('')), { stdout, stdin, stderr: stdout, debug: false, exitOnCtrlC: false })
await post('Profiler.enable')
await post('HeapProfiler.enable')
await post('Profiler.start')
const startMem = process.memoryUsage()
const t0 = performance.now()
const iterations = Number(process.env.ITERS || 40)
for (let i = 1; i <= iterations; i++) {
const prefix = text.slice(0, Math.floor(text.length * i / iterations))
inst.rerender(React.createElement(AppLayout, baseProps(prefix)))
await new Promise(r => setImmediate(r))
}
const elapsed = performance.now() - t0
const prof = await post('Profiler.stop')
const endMem = process.memoryUsage()
await post('HeapProfiler.collectGarbage')
const afterGc = process.memoryUsage()
inst.unmount()
session.disconnect()
console.log(JSON.stringify({ elapsedMs: Math.round(elapsed), stdoutBytes: stdout.bytes, stdoutWrites: stdout.writes, startMem, endMem, afterGc, profileNodes: prof.profile.nodes.length }, null, 2))
}
main().catch(err => { console.error(err); process.exit(1) })

View File

@ -59,6 +59,79 @@ describe('createGatewayEventHandler', () => {
patchUiState({ showReasoning: true })
})
it('archives incomplete todos into transcript flow at end of turn so they scroll up', () => {
const appended: Msg[] = []
const todos = [
{ content: 'Gather ingredients', id: 'prep', status: 'completed' },
{ content: 'Boil water', id: 'boil', status: 'in_progress' },
{ content: 'Make sauce', id: 'sauce', status: 'pending' }
]
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: {}, type: 'message.start' } as any)
onEvent({ payload: { name: 'todo', todos, tool_id: 'todo-1' }, type: 'tool.start' } as any)
expect(getTurnState().todos).toEqual(todos)
onEvent({ payload: { text: 'Started a todo list.' }, type: 'message.complete' } as any)
const trail = appended.find(msg => msg.kind === 'trail' && msg.todos?.length)
const finalText = appended.find(msg => msg.role === 'assistant' && msg.text === 'Started a todo list.')
expect(finalText).toBeDefined()
expect(trail).toMatchObject({ kind: 'trail', role: 'system', todos, todoIncomplete: true })
// Todo archive must sit ABOVE the final assistant text so the panel
// doesn't visibly jump across the final answer at end-of-turn.
expect(appended.indexOf(trail!)).toBeLessThan(appended.indexOf(finalText!))
expect(getTurnState().todos).toEqual([])
})
it('archives completed todos into transcript flow at end of turn', () => {
const appended: Msg[] = []
const todos = [{ content: 'Serve tiny latte', id: 'serve', status: 'completed' }]
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: { name: 'todo', todos, tool_id: 'todo-1' }, type: 'tool.start' } as any)
onEvent({ payload: { text: 'done' }, type: 'message.complete' } as any)
expect(getTurnState().todos).toEqual([])
expect(appended).toContainEqual({
kind: 'trail',
role: 'system',
text: '',
todoCollapsedByDefault: true,
todos
})
})
it('keeps the current todo list visible when the next message starts', () => {
const appended: Msg[] = []
const todos = [{ content: 'Boil water', id: 'boil', status: 'in_progress' }]
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: { name: 'todo', todos, tool_id: 'todo-1' }, type: 'tool.start' } as any)
expect(getTurnState().todos).toEqual(todos)
onEvent({ payload: {}, type: 'message.start' } as any)
expect(getTurnState().todos).toEqual(todos)
})
it('clears the visible todo list when the todo tool returns an empty list', () => {
const appended: Msg[] = []
const todos = [{ content: 'Boil water', id: 'boil', status: 'in_progress' }]
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: { name: 'todo', todos, tool_id: 'todo-1' }, type: 'tool.start' } as any)
expect(getTurnState().todos).toEqual(todos)
onEvent({ payload: { name: 'todo', todos: [], tool_id: 'todo-1' }, type: 'tool.complete' } as any)
expect(getTurnState().todos).toEqual([])
})
it('persists completed tool rows when message.complete lands immediately after tool.complete', () => {
const appended: Msg[] = []
@ -82,15 +155,37 @@ describe('createGatewayEventHandler', () => {
type: 'message.complete'
} as any)
expect(appended).toHaveLength(1)
expect(appended[0]).toMatchObject({
role: 'assistant',
text: 'final answer',
thinking: 'mapped the page'
})
expect(appended).toHaveLength(2)
expect(appended[0]).toMatchObject({ kind: 'trail', role: 'system', text: '', thinking: 'mapped the page' })
expect(appended[0]?.tools).toHaveLength(1)
expect(appended[0]?.tools?.[0]).toContain('hero cards')
expect(appended[0]?.toolTokens).toBeGreaterThan(0)
expect(appended[1]).toMatchObject({ role: 'assistant', text: 'final answer' })
})
it('groups sequential completed tools into one trail when the turn completes', () => {
const appended: Msg[] = []
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: { context: 'alpha', name: 'search_files', tool_id: 'tool-1' }, type: 'tool.start' } as any)
onEvent({
payload: { name: 'search_files', summary: 'first done', tool_id: 'tool-1' },
type: 'tool.complete'
} as any)
onEvent({ payload: { context: 'beta', name: 'read_file', tool_id: 'tool-2' }, type: 'tool.start' } as any)
onEvent({ payload: { name: 'read_file', summary: 'second done', tool_id: 'tool-2' }, type: 'tool.complete' } as any)
expect(getTurnState().streamSegments.filter(msg => msg.kind === 'trail' && msg.tools?.length)).toHaveLength(1)
expect(getTurnState().streamSegments[0]?.tools).toHaveLength(2)
expect(getTurnState().streamPendingTools).toEqual([])
onEvent({ payload: { text: '' }, type: 'message.complete' } as any)
const toolTrails = appended.filter(msg => msg.kind === 'trail' && msg.tools?.length)
expect(toolTrails).toHaveLength(1)
expect(toolTrails[0]?.tools).toHaveLength(2)
expect(toolTrails[0]?.tools?.[0]).toContain('Search Files')
expect(toolTrails[0]?.tools?.[1]).toContain('Read File')
})
it('keeps tool tokens across handler recreation mid-turn', () => {
@ -118,9 +213,53 @@ describe('createGatewayEventHandler', () => {
type: 'message.complete'
} as any)
expect(appended).toHaveLength(1)
expect(appended).toHaveLength(2)
expect(appended[0]?.tools).toHaveLength(1)
expect(appended[0]?.toolTokens).toBeGreaterThan(0)
expect(appended[1]).toMatchObject({ role: 'assistant', text: 'final answer' })
})
it('streams legacy thinking.delta into visible reasoning state', () => {
vi.useFakeTimers()
const appended: Msg[] = []
const streamed = 'short streamed reasoning'
createGatewayEventHandler(buildCtx(appended))({ payload: { text: streamed }, type: 'thinking.delta' } as any)
vi.runOnlyPendingTimers()
expect(getTurnState().reasoning).toBe(streamed)
expect(getTurnState().reasoningActive).toBe(true)
expect(getTurnState().reasoningTokens).toBe(estimateTokensRough(streamed))
vi.useRealTimers()
})
it('preserves streamed reasoning as one completed thinking panel after segment flushes', () => {
const appended: Msg[] = []
const streamed = 'first reasoning chunk\nsecond reasoning chunk'
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: { text: streamed }, type: 'reasoning.delta' } as any)
onEvent({ payload: { text: 'Before edit.' }, type: 'message.delta' } as any)
turnController.flushStreamingSegment()
onEvent({ payload: { text: 'final answer' }, type: 'message.complete' } as any)
expect(appended.map(msg => msg.thinking).filter(Boolean)).toEqual([streamed])
expect(appended[appended.length - 1]).toMatchObject({ role: 'assistant', text: 'final answer' })
})
it('filters spinner/status-only reasoning noise from completed thinking', () => {
const appended: Msg[] = []
const streamed = '(¬_¬) synthesizing...\nactual plan\n( ͡° ͜ʖ ͡°) pondering...\nnext step'
const onEvent = createGatewayEventHandler(buildCtx(appended))
onEvent({ payload: { text: streamed }, type: 'reasoning.delta' } as any)
onEvent({ payload: { text: 'final answer' }, type: 'message.complete' } as any)
expect(appended[0]?.thinking).toBe(streamed)
expect(appended[0]?.text).toBe('')
expect(appended[appended.length - 1]).toMatchObject({ role: 'assistant', text: 'final answer' })
})
it('ignores fallback reasoning.available when streamed reasoning already exists', () => {
@ -134,9 +273,10 @@ describe('createGatewayEventHandler', () => {
onEvent({ payload: { text: fallback }, type: 'reasoning.available' } as any)
onEvent({ payload: { text: 'final answer' }, type: 'message.complete' } as any)
expect(appended).toHaveLength(1)
expect(appended).toHaveLength(2)
expect(appended[0]?.thinking).toBe(streamed)
expect(appended[0]?.thinkingTokens).toBe(estimateTokensRough(streamed))
expect(appended[1]).toMatchObject({ role: 'assistant', text: 'final answer' })
})
it('uses message.complete reasoning when no streamed reasoning ref', () => {
@ -147,9 +287,10 @@ describe('createGatewayEventHandler', () => {
onEvent({ payload: { reasoning: fromServer, text: 'final answer' }, type: 'message.complete' } as any)
expect(appended).toHaveLength(1)
expect(appended).toHaveLength(2)
expect(appended[0]?.thinking).toBe(fromServer)
expect(appended[0]?.thinkingTokens).toBe(estimateTokensRough(fromServer))
expect(appended[1]).toMatchObject({ role: 'assistant', text: 'final answer' })
})
it('anchors inline_diff as its own segment where the edit happened', () => {
@ -170,23 +311,40 @@ describe('createGatewayEventHandler', () => {
expect(appended).toHaveLength(0)
expect(turnController.segmentMessages).toEqual([
{ role: 'assistant', text: 'Editing the file' },
{ kind: 'diff', role: 'assistant', text: block }
{
kind: 'diff',
role: 'assistant',
text: block,
tools: [expect.stringMatching(/^Patch\("foo\.ts"\)(?: \([^)]+\))? ✓$/)]
}
])
onEvent({ payload: { text: 'patch applied' }, type: 'message.complete' } as any)
// Four transcript messages: pre-tool narration → tool trail → diff
// (kind='diff', so MessageLine gives it blank-line breathing room) →
// post-tool narration. The final message does NOT contain a diff.
expect(appended).toHaveLength(4)
expect(appended[0]?.text).toBe('Editing the file')
expect(appended[1]).toMatchObject({ kind: 'trail' })
expect(appended[1]).toMatchObject({ kind: 'diff', text: block })
expect(appended[1]?.tools?.[0]).toContain('Patch')
expect(appended[2]).toMatchObject({ kind: 'diff', text: block })
expect(appended[3]?.text).toBe('patch applied')
expect(appended[3]?.text).not.toContain('```diff')
})
it('keeps full final responses from duplicating flushed pre-diff narration', () => {
const appended: Msg[] = []
const onEvent = createGatewayEventHandler(buildCtx(appended))
const diff = '--- a/foo.ts\n+++ b/foo.ts\n@@\n-old\n+new'
const block = `\`\`\`diff\n${diff}\n\`\`\``
onEvent({ payload: { text: 'Before edit. ' }, type: 'message.delta' } as any)
onEvent({ payload: { context: 'foo.ts', name: 'patch', tool_id: 'tool-1' }, type: 'tool.start' } as any)
onEvent({ payload: { inline_diff: diff, summary: 'patched', tool_id: 'tool-1' }, type: 'tool.complete' } as any)
onEvent({ payload: { text: 'After edit.' }, type: 'message.delta' } as any)
onEvent({ payload: { text: 'Before edit. After edit.' }, type: 'message.complete' } as any)
expect(appended.map(msg => msg.text.trim()).filter(Boolean)).toEqual(['Before edit.', block, 'After edit.'])
expect(appended[1]?.tools?.[0]).toContain('Patch')
})
it('drops the diff segment when the final assistant text narrates the same diff', () => {
const appended: Msg[] = []
const onEvent = createGatewayEventHandler(buildCtx(appended))
@ -212,12 +370,12 @@ describe('createGatewayEventHandler', () => {
onEvent({ payload: { text: 'done' }, type: 'message.complete' } as any)
// Tool trail first, then diff segment (kind='diff'), then final narration.
expect(appended).toHaveLength(3)
expect(appended[0]?.kind).toBe('trail')
expect(appended[1]?.kind).toBe('diff')
expect(appended[1]?.text).not.toContain('┊ review diff')
expect(appended[1]?.text).toContain('--- a/foo.ts')
expect(appended[2]?.text).toBe('done')
expect(appended).toHaveLength(2)
expect(appended[0]?.kind).toBe('diff')
expect(appended[0]?.text).not.toContain('┊ review diff')
expect(appended[0]?.text).toContain('--- a/foo.ts')
expect(appended[0]?.tools?.[0]).toContain('Tool')
expect(appended[1]?.text).toBe('done')
})
it('drops the diff segment when assistant writes its own ```diff fence', () => {
@ -250,15 +408,13 @@ describe('createGatewayEventHandler', () => {
// Tool row is now placed before the diff, so telemetry does not render
// below the patch that came from that tool.
expect(appended).toHaveLength(3)
expect(appended[0]?.kind).toBe('trail')
expect(appended).toHaveLength(2)
expect(appended[0]?.kind).toBe('diff')
expect(appended[0]?.text).toContain('```diff')
expect(appended[0]?.tools?.[0]).toContain('Review Diff')
expect(appended[0]?.tools?.[0]).not.toContain('--- a/foo.ts')
expect(appended[1]?.kind).toBe('diff')
expect(appended[1]?.text).toContain('```diff')
expect(appended[1]?.text).toBe('done')
expect(appended[1]?.tools ?? []).toEqual([])
expect(appended[2]?.text).toBe('done')
expect(appended[2]?.tools ?? []).toEqual([])
})
it('shows setup panel for missing provider startup error', () => {

View File

@ -25,6 +25,36 @@ describe('createSlashHandler', () => {
expect(ctx.gateway.gw.request).not.toHaveBeenCalled()
})
it('persists typed /model switches by default', async () => {
patchUiState({ sid: 'sid-abc' })
const ctx = buildCtx({
gateway: {
...buildGateway(),
rpc: vi.fn(() => Promise.resolve({ value: 'x-model' }))
}
})
expect(createSlashHandler(ctx)('/model x-model')).toBe(true)
expect(ctx.gateway.rpc).toHaveBeenCalledWith('config.set', {
key: 'model',
session_id: 'sid-abc',
value: 'x-model --global'
})
})
it('does not duplicate --global for explicit persistent model switches', () => {
patchUiState({ sid: 'sid-abc' })
const ctx = buildCtx()
createSlashHandler(ctx)('/model x-model --global')
expect(ctx.gateway.rpc).toHaveBeenCalledWith('config.set', {
key: 'model',
session_id: 'sid-abc',
value: 'x-model --global'
})
})
it('opens the skills hub locally for bare /skills', () => {
const ctx = buildCtx()
@ -89,6 +119,7 @@ describe('createSlashHandler', () => {
expect(getUiState().detailsMode).toBe('collapsed')
expect(createSlashHandler(ctx)('/details toggle')).toBe(true)
expect(getUiState().detailsMode).toBe('expanded')
expect(getUiState().detailsModeCommandOverride).toBe(true)
expect(ctx.gateway.rpc).toHaveBeenCalledWith('config.set', {
key: 'details_mode',
value: 'expanded'
@ -311,9 +342,7 @@ describe('createSlashHandler', () => {
expect(rpc).toHaveBeenCalledWith('session.save', { session_id: 'sid-abc' })
await vi.waitFor(() => {
expect(ctx.transcript.sys).toHaveBeenCalledWith(
'conversation saved to: /tmp/hermes_conversation_test.json'
)
expect(ctx.transcript.sys).toHaveBeenCalledWith('conversation saved to: /tmp/hermes_conversation_test.json')
})
})

View File

@ -78,19 +78,25 @@ describe('sectionMode', () => {
expect(sectionMode('subagents', 'hidden', {})).toBe('hidden')
})
it('streams thinking + tools expanded by default regardless of global mode', () => {
it('streams thinking + tools expanded by default for persisted config values', () => {
expect(sectionMode('thinking', 'collapsed', {})).toBe('expanded')
expect(sectionMode('thinking', 'hidden', undefined)).toBe('expanded')
expect(sectionMode('tools', 'collapsed', {})).toBe('expanded')
expect(sectionMode('tools', 'hidden', undefined)).toBe('expanded')
})
it('hides the activity panel by default regardless of global mode', () => {
it('hides the activity panel by default for persisted config values', () => {
expect(sectionMode('activity', 'collapsed', {})).toBe('hidden')
expect(sectionMode('activity', 'expanded', undefined)).toBe('hidden')
expect(sectionMode('activity', 'hidden', {})).toBe('hidden')
})
it('applies in-session /details mode globally over built-in defaults', () => {
expect(sectionMode('thinking', 'collapsed', {}, true)).toBe('collapsed')
expect(sectionMode('tools', 'hidden', {}, true)).toBe('hidden')
expect(sectionMode('activity', 'expanded', undefined, true)).toBe('expanded')
})
it('honours per-section overrides over both the section default and global mode', () => {
expect(sectionMode('thinking', 'collapsed', { thinking: 'collapsed' })).toBe('collapsed')
expect(sectionMode('tools', 'collapsed', { tools: 'hidden' })).toBe('hidden')

View File

@ -1,7 +1,26 @@
import { describe, expect, it } from 'vitest'
import { toTranscriptMessages } from '../domain/messages.js'
import { upsert } from '../lib/messages.js'
describe('toTranscriptMessages', () => {
it('preserves assistant tool-call rows so resume does not drop prior turns', () => {
const rows = [
{ role: 'user', text: 'first prompt' },
{ role: 'tool', context: 'repo', name: 'search_files', text: 'ignored raw result' },
{ role: 'assistant', text: 'first answer' },
{ role: 'user', text: 'second prompt' }
]
expect(toTranscriptMessages(rows).map(msg => [msg.role, msg.text])).toEqual([
['user', 'first prompt'],
['assistant', 'first answer'],
['user', 'second prompt']
])
expect(toTranscriptMessages(rows)[1]?.tools?.[0]).toContain('Search Files')
})
})
describe('upsert', () => {
it('appends when last role differs', () => {
expect(upsert([{ role: 'user', text: 'hi' }], 'assistant', 'hello')).toHaveLength(2)

View File

@ -1,6 +1,7 @@
import { describe, expect, it } from 'vitest'
import { hasReasoningTag, splitReasoning } from '../lib/reasoning.js'
import { cleanThinkingText } from '../lib/text.js'
describe('splitReasoning', () => {
it('extracts <think>…</think> and strips it from text', () => {
@ -48,3 +49,13 @@ describe('splitReasoning', () => {
expect(hasReasoningTag('no tags at all')).toBe(false)
})
})
describe('cleanThinkingText', () => {
it('removes face/status ticker fragments while preserving real reasoning', () => {
expect(
cleanThinkingText(
'(¬_¬) synthesizing...**Resolving comments on GitHub**\n( ͡° ͜ʖ ͡°) musing...\nActual step\n٩(๑❛ᴗ❛๑)۶ contemplating...next step'
)
).toBe('**Resolving comments on GitHub**\nActual step\nnext step')
})
})

View File

@ -0,0 +1,55 @@
import { describe, expect, it, vi } from 'vitest'
import { scrollWithSelectionBy } from '../app/scroll.js'
function makeScroll(overrides: Partial<Record<string, unknown>> = {}) {
return {
getPendingDelta: vi.fn(() => 0),
getScrollHeight: vi.fn(() => 100),
getScrollTop: vi.fn(() => 10),
getViewportHeight: vi.fn(() => 20),
getViewportTop: vi.fn(() => 0),
scrollBy: vi.fn(),
...overrides
}
}
describe('scrollWithSelectionBy', () => {
it('clamps to the actual remaining scroll distance before calling scrollBy', () => {
const s = makeScroll({
getScrollHeight: vi.fn(() => 30),
getScrollTop: vi.fn(() => 9),
getViewportHeight: vi.fn(() => 20)
})
const selection = {
captureScrolledRows: vi.fn(),
getState: vi.fn(() => null),
shiftAnchor: vi.fn(),
shiftSelection: vi.fn()
}
scrollWithSelectionBy(10, { scrollRef: { current: s as never }, selection })
expect(s.scrollBy).toHaveBeenCalledWith(1)
})
it('does nothing at the edge instead of queueing dead pending deltas', () => {
const s = makeScroll({
getScrollHeight: vi.fn(() => 30),
getScrollTop: vi.fn(() => 10),
getViewportHeight: vi.fn(() => 20)
})
const selection = {
captureScrolledRows: vi.fn(),
getState: vi.fn(() => null),
shiftAnchor: vi.fn(),
shiftSelection: vi.fn()
}
scrollWithSelectionBy(10, { scrollRef: { current: s as never }, selection })
expect(s.scrollBy).not.toHaveBeenCalled()
})
})

View File

@ -0,0 +1,46 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { patchTurnState, resetTurnState } from '../app/turnStore.js'
import { $uiState, resetUiState } from '../app/uiStore.js'
const shallowEqual = <T extends Record<string, unknown>>(a: T, b: T) =>
Object.keys(a).length === Object.keys(b).length && Object.keys(a).every(key => Object.is(a[key], b[key]))
const subscribeSelected = <T extends Record<string, unknown>>(selector: () => T) => {
let current = selector()
let calls = 0
const unsubscribe = $uiState.listen(() => {
const next = selector()
if (shallowEqual(next, current)) {
return
}
current = next
calls++
})
return { calls: () => calls, unsubscribe }
}
describe('TUI state isolation', () => {
beforeEach(() => {
resetUiState()
resetTurnState()
})
it('does not notify ui/composer subscribers for high-frequency turn updates', () => {
const composerRelevant = subscribeSelected(() => ({ busy: $uiState.get().busy, sid: $uiState.get().sid }))
try {
for (let i = 0; i < 50; i++) {
patchTurnState({ streaming: `token ${i}` })
}
} finally {
composerRelevant.unsubscribe()
}
expect(composerRelevant.calls()).toBe(0)
})
})

View File

@ -0,0 +1,79 @@
import { describe, expect, it } from 'vitest'
import { findStableBoundary } from '../components/streamingMarkdown.js'
// We test the pure boundary logic by rendering the component's ref
// behaviour through repeated calls. Since React isn't being rendered here,
// we reach into the module to test findStableBoundary via its exported
// behaviour — but the pure helper isn't exported. So test the component's
// observable output: pass sequential text values and verify the stable
// prefix never retreats.
//
// Strategy: mount StreamingMd in isolation and observe which <Md>
// instances it renders (by text prop). Without a DOM renderer that's
// heavy, so we validate the helper behaviour by directly invoking the
// fence/boundary logic via a re-exported surface.
import { DEFAULT_THEME } from '../theme.js'
describe('findStableBoundary', () => {
it('returns -1 when no blank line exists yet', () => {
expect(findStableBoundary('partial line with no newline yet')).toBe(-1)
})
it('returns -1 when only single newlines exist', () => {
expect(findStableBoundary('line one\nline two\nline three')).toBe(-1)
})
it('splits after the last blank line separator', () => {
// 'first\n\nsecond\n\nthird' → last blank = before 'third'
const text = 'first paragraph\n\nsecond paragraph\n\nthird'
const idx = findStableBoundary(text)
expect(text.slice(0, idx)).toBe('first paragraph\n\nsecond paragraph\n\n')
expect(text.slice(idx)).toBe('third')
})
it('refuses to split inside an open fenced block', () => {
// Fence opens, contains a blank line inside the code, no close yet.
const text = '```ts\nfn();\n\nmore code here'
expect(findStableBoundary(text)).toBe(-1)
})
it('splits before an open fenced block but not inside', () => {
const text = 'intro paragraph\n\n```ts\nfn();\n\nmore code'
const idx = findStableBoundary(text)
expect(text.slice(0, idx)).toBe('intro paragraph\n\n')
expect(text.slice(idx).startsWith('```ts')).toBe(true)
})
it('allows splitting after a fenced block closes', () => {
const text = '```ts\nfn();\n```\n\nnarration continues'
const idx = findStableBoundary(text)
expect(text.slice(0, idx)).toBe('```ts\nfn();\n```\n\n')
expect(text.slice(idx)).toBe('narration continues')
})
it('walks backwards through nested fence boundaries safely', () => {
// Two closed fences + narration + one new open fence. The only legal
// split is before the open fence, not between the closed ones.
const text = '```js\na\n```\n\nmid text\n\n```python\nstill open'
const idx = findStableBoundary(text)
expect(text.slice(0, idx)).toBe('```js\na\n```\n\nmid text\n\n')
})
it('handles empty input', () => {
expect(findStableBoundary('')).toBe(-1)
})
})
describe('streaming theme assumption', () => {
it('theme is exportable (component import sanity check)', () => {
// Sanity that the theme we pass doesn't change shape. Component import
// already happens above — this is a smoke test that the module graph
// for streamingMarkdown wires up without cycles.
expect(DEFAULT_THEME.color.amber).toBeTruthy()
})
})

View File

@ -1,14 +1,20 @@
import { describe, expect, it } from 'vitest'
import {
boundedHistoryRenderText,
boundedLiveRenderText,
buildToolTrailLine,
edgePreview,
estimateRows,
estimateTokensRough,
fmtK,
isToolTrailResultLine,
lastCotTrailIndex,
parseToolTrailResultLine,
pasteTokenLabel,
sameToolTrailGroup
sameToolTrailGroup,
splitToolDuration,
thinkingPreview
} from '../lib/text.js'
describe('isToolTrailResultLine', () => {
@ -19,6 +25,16 @@ describe('isToolTrailResultLine', () => {
})
})
describe('buildToolTrailLine', () => {
it('puts completion duration inline before the result marker', () => {
const line = buildToolTrailLine('read_file', 'x', false, '', 0.94)
expect(line).toBe('Read File("x") (0.9s) ✓')
expect(parseToolTrailResultLine(line)).toEqual({ call: 'Read File("x") (0.9s)', detail: '', mark: '✓' })
expect(splitToolDuration('Read File("x") (0.9s)')).toEqual({ label: 'Read File("x")', duration: ' (0.9s)' })
})
})
describe('lastCotTrailIndex', () => {
it('finds last non-result line', () => {
expect(lastCotTrailIndex(['a ✓', 'thinking…'])).toBe(1)
@ -68,6 +84,48 @@ describe('estimateTokensRough', () => {
})
})
describe('thinkingPreview', () => {
it('adds paragraph breaks before markdown thinking headings', () => {
const raw =
'**Considering user instructions**\nI need to answer.**Planning tool execution**\nI can run tools.**Determining weather search parameters**\nUse SF.'
expect(thinkingPreview(raw, 'full')).toBe(
'**Considering user instructions**\nI need to answer.\n\n**Planning tool execution**\nI can run tools.\n\n**Determining weather search parameters**\nUse SF.'
)
})
})
describe('boundedLiveRenderText', () => {
it('preserves short live text verbatim', () => {
expect(boundedLiveRenderText('one\ntwo', { maxChars: 100, maxLines: 10 })).toBe('one\ntwo')
})
it('keeps the live tail by character budget', () => {
const out = boundedLiveRenderText('abcdefghij', { maxChars: 4, maxLines: 10 })
expect(out).toContain('ghij')
expect(out).toContain('omitted')
expect(out).not.toContain('abcdef')
})
it('keeps the live tail by line budget', () => {
const out = boundedLiveRenderText(['a', 'b', 'c', 'd'].join('\n'), { maxChars: 100, maxLines: 2 })
expect(out).toContain('c\nd')
expect(out).toContain('omitted 2 lines')
expect(out).not.toContain('a\nb')
})
})
describe('boundedHistoryRenderText', () => {
it('uses a non-live omission label for completed history', () => {
const out = boundedHistoryRenderText('abcdefghij', { maxChars: 4, maxLines: 10 })
expect(out).toContain('[showing tail; omitted')
expect(out).not.toContain('live tail')
})
})
describe('edgePreview', () => {
it('keeps both ends for long text', () => {
expect(edgePreview('Vampire Bondage ropes slipped from her neck, still stained with blood', 8, 18)).toBe(

View File

@ -1,6 +1,7 @@
import { describe, expect, it } from 'vitest'
import { cursorLayout, offsetFromPosition } from '../components/textInput.js'
import { offsetFromPosition } from '../components/textInput.js'
import { cursorLayout, inputVisualHeight, stableComposerColumns } from '../lib/inputMetrics.js'
describe('cursorLayout — char-wrap parity with wrap-ansi', () => {
it('places cursor mid-line at its column', () => {
@ -35,6 +36,19 @@ describe('cursorLayout — char-wrap parity with wrap-ansi', () => {
})
})
describe('input metrics helpers', () => {
it('computes visual height from the wrapped cursor line', () => {
expect(inputVisualHeight('abcdefgh', 8)).toBe(2)
expect(inputVisualHeight('one\ntwo', 40)).toBe(2)
})
it('reserves gutters on wide panes without starving narrow composer width', () => {
expect(stableComposerColumns(100, 3)).toBe(93)
expect(stableComposerColumns(10, 3)).toBe(5)
expect(stableComposerColumns(6, 3)).toBe(1)
})
})
describe('offsetFromPosition — char-wrap inverse of cursorLayout', () => {
it('returns 0 for empty input', () => {
expect(offsetFromPosition('', 0, 0, 10)).toBe(0)

View File

@ -0,0 +1,66 @@
import { beforeEach, describe, expect, it } from 'vitest'
import {
archiveDoneTodos,
archiveTodosAtTurnEnd,
getTurnState,
patchTurnState,
resetTurnState,
toggleTodoCollapsed
} from '../app/turnStore.js'
describe('turnStore live progress helpers', () => {
beforeEach(() => resetTurnState())
it('archives completed todos into a transcript trail and clears the live anchor', () => {
patchTurnState({
todos: [
{ content: 'prep', id: 'prep', status: 'completed' },
{ content: 'serve', id: 'serve', status: 'completed' }
]
})
expect(archiveTodosAtTurnEnd()).toEqual([
{
kind: 'trail',
role: 'system',
text: '',
todoCollapsedByDefault: true,
todos: [
{ content: 'prep', id: 'prep', status: 'completed' },
{ content: 'serve', id: 'serve', status: 'completed' }
]
}
])
expect(getTurnState().todos).toEqual([])
})
it('archives incomplete todos with an incomplete flag so the hint renders', () => {
patchTurnState({
todos: [
{ content: 'cook', id: 'cook', status: 'completed' },
{ content: 'serve', id: 'serve', status: 'in_progress' },
{ content: 'eat', id: 'eat', status: 'pending' }
]
})
const archived = archiveTodosAtTurnEnd()
expect(archived).toHaveLength(1)
expect(archived[0]!.todoIncomplete).toBe(true)
expect(archived[0]!.todos?.map(t => t.id)).toEqual(['cook', 'serve', 'eat'])
expect(getTurnState().todos).toEqual([])
})
it('returns nothing when there are no todos at turn end', () => {
expect(archiveTodosAtTurnEnd()).toEqual([])
expect(archiveDoneTodos()).toEqual([])
})
it('tracks collapsed state independently of todo content', () => {
toggleTodoCollapsed()
expect(getTurnState().todoCollapsed).toBe(true)
toggleTodoCollapsed()
expect(getTurnState().todoCollapsed).toBe(false)
})
})

View File

@ -0,0 +1,38 @@
import { describe, expect, it } from 'vitest'
import { getViewportSnapshot, viewportSnapshotKey } from '../lib/viewportStore.js'
describe('viewportStore', () => {
it('normalizes absent scroll handles', () => {
expect(getViewportSnapshot(null)).toEqual({
atBottom: true,
bottom: 0,
pending: 0,
scrollHeight: 0,
top: 0,
viewportHeight: 0
})
})
it('includes pending scroll delta in snapshot math and keying', () => {
const handle = {
getPendingDelta: () => 3,
getScrollHeight: () => 40,
getScrollTop: () => 10,
getViewportHeight: () => 5,
isSticky: () => false
}
const snap = getViewportSnapshot(handle as any)
expect(snap).toMatchObject({
atBottom: false,
bottom: 18,
pending: 3,
scrollHeight: 40,
top: 13,
viewportHeight: 5
})
expect(viewportSnapshotKey(snap)).toBe('0:16:5:40:3')
})
})

View File

@ -0,0 +1,27 @@
import { describe, expect, it } from 'vitest'
import { estimatedMsgHeight, messageHeightKey, wrappedLines } from '../lib/virtualHeights.js'
import type { Msg } from '../types.js'
describe('virtual height estimates', () => {
it('uses stable content keys across resumed message objects', () => {
const msg: Msg = { role: 'assistant', text: 'same text', tools: ['Search Files [long message]'] }
expect(messageHeightKey(msg)).toBe(messageHeightKey({ ...msg }))
})
it('accounts for wrapping and preserved blank-block rhythm', () => {
const msg: Msg = { role: 'assistant', text: `one\n\n${'x'.repeat(90)}` }
expect(wrappedLines(msg.text, 30)).toBe(5)
expect(estimatedMsgHeight(msg, 35, { compact: false, details: false })).toBeGreaterThan(5)
})
it('includes detail sections when visible', () => {
const msg: Msg = { role: 'assistant', text: 'ok', thinking: 'line 1\nline 2', tools: ['Tool A', 'Tool B'] }
expect(estimatedMsgHeight(msg, 80, { compact: false, details: true })).toBeGreaterThan(
estimatedMsgHeight(msg, 80, { compact: false, details: false })
)
})
})

View File

@ -0,0 +1,19 @@
import { describe, expect, it } from 'vitest'
import { shouldSetVirtualClamp } from '../hooks/useVirtualHistory.js'
describe('virtual history clamp bounds', () => {
it('does not clamp sticky live tail content', () => {
expect(shouldSetVirtualClamp({ itemCount: 20, sticky: true, viewportHeight: 10 })).toBe(false)
})
it('sets clamp bounds after manual scroll breaks sticky mode', () => {
expect(shouldSetVirtualClamp({ itemCount: 20, sticky: false, viewportHeight: 10 })).toBe(true)
})
it('does not clamp while a live tail is growing below virtual history', () => {
expect(shouldSetVirtualClamp({ itemCount: 20, liveTailActive: true, sticky: false, viewportHeight: 10 })).toBe(
false
)
})
})

View File

@ -0,0 +1,138 @@
import { describe, expect, it } from 'vitest'
import { computeWheelStep, initWheelAccel } from '../lib/wheelAccel.js'
describe('wheelAccel — native path', () => {
it('first click after init returns base', () => {
const s = initWheelAccel(false, 1)
expect(computeWheelStep(s, 1, 1000)).toBe(1)
})
it('same-direction fast events ramp mult (window-mode)', () => {
const s = initWheelAccel(false, 1)
computeWheelStep(s, 1, 1000)
computeWheelStep(s, 1, 1020)
computeWheelStep(s, 1, 1040)
// Key property: doesn't shrink below base.
expect(computeWheelStep(s, 1, 1060)).toBeGreaterThanOrEqual(1)
})
it('gap beyond window resets mult to base', () => {
const s = initWheelAccel(false, 1)
for (let t = 1000; t < 1100; t += 20) {
computeWheelStep(s, 1, t)
}
expect(computeWheelStep(s, 1, 2000)).toBe(1)
})
it('direction flip defers one event for bounce detection', () => {
const s = initWheelAccel(false, 1)
computeWheelStep(s, 1, 1000)
expect(computeWheelStep(s, -1, 1050)).toBe(0)
})
it('flip-back within bounce window engages wheelMode', () => {
const s = initWheelAccel(false, 1)
computeWheelStep(s, 1, 1000)
computeWheelStep(s, -1, 1050)
computeWheelStep(s, 1, 1100)
expect(s.wheelMode).toBe(true)
})
it('flip-back outside bounce window is a real reversal (no wheelMode)', () => {
const s = initWheelAccel(false, 1)
computeWheelStep(s, 1, 1000)
computeWheelStep(s, -1, 1050)
computeWheelStep(s, 1, 1400)
expect(s.wheelMode).toBe(false)
})
it('5 consecutive sub-5ms events disengage wheelMode (trackpad signature)', () => {
const s = initWheelAccel(false, 1)
s.wheelMode = true
s.dir = 1
s.time = 1000
for (let t = 1002; t <= 1010; t += 2) {
computeWheelStep(s, 1, t)
}
expect(s.wheelMode).toBe(false)
})
it('1.5s idle disengages wheelMode', () => {
const s = initWheelAccel(false, 1)
s.wheelMode = true
s.dir = 1
s.time = 1000
computeWheelStep(s, 1, 3000)
expect(s.wheelMode).toBe(false)
})
})
describe('wheelAccel — xterm.js path', () => {
it('first click returns 2 after long idle', () => {
const s = initWheelAccel(true, 1)
expect(computeWheelStep(s, 1, 1000)).toBeGreaterThanOrEqual(1)
})
it('sub-5ms burst returns 1 (same-direction, same-batch)', () => {
const s = initWheelAccel(true, 1)
computeWheelStep(s, 1, 1000)
expect(computeWheelStep(s, 1, 1002)).toBe(1)
})
it('slow steady scroll stays in precision range', () => {
const s = initWheelAccel(true, 1)
for (let t = 1000; t < 2000; t += 33) {
const r = computeWheelStep(s, 1, t)
expect(r).toBeGreaterThanOrEqual(1)
expect(r).toBeLessThanOrEqual(6)
}
})
it('direction reversal resets mult', () => {
const s = initWheelAccel(true, 1)
for (let t = 1000; t < 1100; t += 20) {
computeWheelStep(s, 1, t)
}
const beforeFlip = s.mult
computeWheelStep(s, -1, 1200)
expect(s.mult).toBeLessThanOrEqual(beforeFlip)
expect(s.mult).toBe(2)
})
it('frac stays in [0,1) across events', () => {
const s = initWheelAccel(true, 1)
// Correctness invariant of fractional carry: never negative, never reaches 1.
for (let t = 1000; t < 1200; t += 30) {
computeWheelStep(s, 1, t)
expect(s.frac).toBeGreaterThanOrEqual(0)
expect(s.frac).toBeLessThan(1)
}
})
})

View File

@ -220,7 +220,12 @@ export function createGatewayEventHandler(ctx: GatewayEventHandlerContext): (ev:
const text = ev.payload?.text
if (text !== undefined) {
scheduleThinkingStatus(text ? String(text) : statusFromBusy())
const value = String(text)
scheduleThinkingStatus(value || statusFromBusy())
if (value) {
turnController.recordReasoningDelta(value)
}
}
return
@ -367,6 +372,7 @@ export function createGatewayEventHandler(ctx: GatewayEventHandlerContext): (ev:
return
case 'tool.start':
turnController.recordTodos(ev.payload.todos)
turnController.recordToolStart(ev.payload.tool_id, ev.payload.name ?? 'tool', ev.payload.context ?? '')
return
@ -374,24 +380,25 @@ export function createGatewayEventHandler(ctx: GatewayEventHandlerContext): (ev:
const inlineDiffText =
ev.payload.inline_diff && getUiState().inlineDiffs ? stripAnsi(String(ev.payload.inline_diff)).trim() : ''
turnController.recordToolComplete(
ev.payload.tool_id,
ev.payload.name,
ev.payload.error,
inlineDiffText ? '' : ev.payload.summary
)
if (!inlineDiffText) {
return
if (inlineDiffText) {
turnController.recordInlineDiffToolComplete(
inlineDiffText,
ev.payload.tool_id,
ev.payload.name,
ev.payload.error,
ev.payload.duration_s
)
} else {
turnController.recordToolComplete(
ev.payload.tool_id,
ev.payload.name,
ev.payload.error,
ev.payload.summary,
ev.payload.duration_s,
ev.payload.todos
)
}
// Anchor the diff to where the edit happened in the turn — between
// the narration that preceded the tool call and whatever the agent
// streams afterwards. The previous end-merge put the diff at the
// bottom of the final message even when the edit fired mid-turn,
// which read as "the agent wrote this after saying that".
turnController.pushInlineDiffSegment(inlineDiffText)
return
}

View File

@ -7,8 +7,6 @@ import type { ImageAttachResponse } from '../gatewayTypes.js'
import type { RpcResult } from '../lib/rpc.js'
import type { Theme } from '../theme.js'
import type {
ActiveTool,
ActivityItem,
ApprovalReq,
ClarifyReq,
ConfirmReq,
@ -19,7 +17,6 @@ import type {
SectionVisibility,
SessionInfo,
SlashCatalog,
SubagentProgress,
SudoReq,
Usage
} from '../types.js'
@ -31,8 +28,12 @@ export interface StateSetter<T> {
export type StatusBarMode = 'bottom' | 'off' | 'top'
export interface SelectionApi {
captureScrolledRows: (firstRow: number, lastRow: number, side: 'above' | 'below') => void
clearSelection: () => void
copySelection: () => Promise<string>
getState: () => unknown
shiftAnchor: (dRow: number, minRow: number, maxRow: number) => void
shiftSelection: (dRow: number, minRow: number, maxRow: number) => void
}
export interface CompletionItem {
@ -86,6 +87,7 @@ export interface UiState {
busy: boolean
compact: boolean
detailsMode: DetailsMode
detailsModeCommandOverride: boolean
info: null | SessionInfo
inlineDiffs: boolean
mouseTracking: boolean
@ -303,21 +305,7 @@ export interface AppLayoutComposerProps {
}
export interface AppLayoutProgressProps {
activity: ActivityItem[]
outcome: string
reasoning: string
reasoningActive: boolean
reasoningStreaming: boolean
reasoningTokens: number
showProgressArea: boolean
showStreamingArea: boolean
streamPendingTools: string[]
streamSegments: Msg[]
streaming: string
subagents: SubagentProgress[]
toolTokens: number
tools: ActiveTool[]
turnTrail: string[]
}
export interface AppLayoutStatusProps {

55
ui-tui/src/app/scroll.ts Normal file
View File

@ -0,0 +1,55 @@
import type { ScrollBoxHandle } from '@hermes/ink'
import type { SelectionApi } from './interfaces.js'
export interface SelectionSnap {
anchor?: { row: number } | null
focus?: { row: number } | null
isDragging?: boolean
}
export interface ScrollWithSelectionOptions {
readonly scrollRef: { readonly current: ScrollBoxHandle | null }
readonly selection: SelectionApi
}
export function scrollWithSelectionBy(delta: number, { scrollRef, selection }: ScrollWithSelectionOptions): void {
const s = scrollRef.current
if (!s) {
return
}
const cur = s.getScrollTop() + s.getPendingDelta()
const viewport = Math.max(0, s.getViewportHeight())
const max = Math.max(0, s.getScrollHeight() - viewport)
const actual = Math.max(0, Math.min(max, cur + delta)) - cur
if (actual === 0) {
return
}
const sel = selection.getState() as null | SelectionSnap
const top = s.getViewportTop()
const bottom = top + viewport - 1
if (
sel?.anchor &&
sel.focus &&
sel.anchor.row >= top &&
sel.anchor.row <= bottom &&
(sel.isDragging || (sel.focus.row >= top && sel.focus.row <= bottom))
) {
const shift = sel.isDragging ? selection.shiftAnchor : selection.shiftSelection
if (actual > 0) {
selection.captureScrolledRows(top, top + actual - 1, 'above')
} else {
selection.captureScrolledRows(bottom + actual + 1, bottom, 'below')
}
shift(-actual, top, bottom)
}
s.scrollBy(actual)
}

View File

@ -184,7 +184,7 @@ export const coreCommands: SlashCommand[] = [
}
const mode = parseDetailsMode(r?.value) ?? ui.detailsMode
patchUiState({ detailsMode: mode })
patchUiState({ detailsMode: mode, detailsModeCommandOverride: false })
const overrides = SECTION_NAMES.filter(s => ui.sections[s])
.map(s => `${s}=${ui.sections[s]}`)
@ -224,7 +224,7 @@ export const coreCommands: SlashCommand[] = [
return transcript.sys(DETAILS_USAGE)
}
patchUiState({ detailsMode: next })
patchUiState({ detailsMode: next, detailsModeCommandOverride: true })
gateway.rpc<ConfigSetResponse>('config.set', { key: 'details_mode', value: next }).catch(() => {})
transcript.sys(`details: ${next}`)
}
@ -260,7 +260,9 @@ export const coreCommands: SlashCommand[] = [
if (text) {
return sys(`copied ${text.length} characters`)
} else {
return sys('clipboard copy failed — try HERMES_TUI_FORCE_OSC52=1 to force the escape sequence; HERMES_TUI_DEBUG_CLIPBOARD=1 for details')
return sys(
'clipboard copy failed — try HERMES_TUI_FORCE_OSC52=1 to force the escape sequence; HERMES_TUI_DEBUG_CLIPBOARD=1 for details'
)
}
}

View File

@ -220,7 +220,7 @@ export const opsCommands: SlashCommand[] = [
const [sub, ...rest] = text.split(/\s+/)
const query = rest.join(' ').trim()
const { rpc } = ctx.gateway
const { page, panel, sys } = ctx.transcript
const { panel, sys } = ctx.transcript
if (sub === 'list') {
rpc<SkillsListResponse>('skills.manage', { action: 'list' })

View File

@ -15,6 +15,14 @@ import { patchOverlayState } from '../../overlayStore.js'
import { patchUiState } from '../../uiStore.js'
import type { SlashCommand } from '../types.js'
const GLOBAL_MODEL_FLAG_RE = /(?:^|\s)--global(?:\s|$)/
const persistedModelArg = (arg: string) => {
const trimmed = arg.trim()
return !trimmed || GLOBAL_MODEL_FLAG_RE.test(trimmed) ? trimmed : `${trimmed} --global`
}
export const sessionCommands: SlashCommand[] = [
{
aliases: ['bg', 'btw'],
@ -47,25 +55,27 @@ export const sessionCommands: SlashCommand[] = [
return
}
if (!arg) {
if (!arg.trim()) {
return patchOverlayState({ modelPicker: true })
}
ctx.gateway.rpc<ConfigSetResponse>('config.set', { key: 'model', session_id: ctx.sid, value: arg.trim() }).then(
ctx.guarded<ConfigSetResponse>(r => {
if (!r.value) {
return ctx.transcript.sys('error: invalid response: model switch')
}
ctx.gateway
.rpc<ConfigSetResponse>('config.set', { key: 'model', session_id: ctx.sid, value: persistedModelArg(arg) })
.then(
ctx.guarded<ConfigSetResponse>(r => {
if (!r.value) {
return ctx.transcript.sys('error: invalid response: model switch')
}
ctx.transcript.sys(`model → ${r.value}`)
ctx.local.maybeWarn(r)
ctx.transcript.sys(`model → ${r.value}`)
ctx.local.maybeWarn(r)
patchUiState(state => ({
...state,
info: state.info ? { ...state.info, model: r.value! } : { model: r.value!, skills: {}, tools: {} }
}))
})
)
patchUiState(state => ({
...state,
info: state.info ? { ...state.info, model: r.value! } : { model: r.value!, skills: {}, tools: {} }
}))
})
)
}
},

View File

@ -1,18 +1,26 @@
import { REASONING_PULSE_MS, STREAM_BATCH_MS } from '../config/timing.js'
import {
REASONING_PULSE_MS,
STREAM_BATCH_MS,
STREAM_IDLE_BATCH_MS,
STREAM_SCROLL_BATCH_MS,
STREAM_TYPING_BATCH_MS
} from '../config/timing.js'
import type { SessionInterruptResponse, SubagentEventPayload } from '../gatewayTypes.js'
import { appendToolShelfMessage, isToolShelfMessage } from '../lib/liveProgress.js'
import { hasReasoningTag, splitReasoning } from '../lib/reasoning.js'
import {
boundedLiveRenderText,
buildToolTrailLine,
estimateTokensRough,
isTransientTrailLine,
sameToolTrailGroup,
toolTrailLabel
} from '../lib/text.js'
import type { ActiveTool, ActivityItem, Msg, SubagentProgress } from '../types.js'
import type { ActiveTool, ActivityItem, Msg, SubagentProgress, TodoItem } from '../types.js'
import { resetFlowOverlays } from './overlayStore.js'
import { pushSnapshot } from './spawnHistoryStore.js'
import { getTurnState, patchTurnState, resetTurnState } from './turnStore.js'
import { archiveDoneTodos, getTurnState, patchTurnState, resetTurnState } from './turnStore.js'
import { getUiState, patchUiState } from './uiStore.js'
const INTERRUPT_COOLDOWN_MS = 1500
@ -33,10 +41,53 @@ const diffSegmentBody = (msg: Msg): null | string => {
return m ? m[1]! : null
}
const insertBeforeFirstDiff = (segments: Msg[], msg: Msg): Msg[] => {
const index = segments.findIndex(segment => segment.kind === 'diff')
const hasDetails = (msg: Msg): boolean => Boolean(msg.thinking || msg.tools?.length || msg.toolTokens)
return index < 0 ? [...segments, msg] : [...segments.slice(0, index), msg, ...segments.slice(index)]
const isTodoStatus = (status: unknown): status is TodoItem['status'] =>
status === 'pending' || status === 'in_progress' || status === 'completed' || status === 'cancelled'
const parseTodos = (value: unknown): null | TodoItem[] => {
if (!Array.isArray(value)) {
return null
}
return value
.map(item => {
if (!item || typeof item !== 'object') {
return null
}
const row = item as Record<string, unknown>
const status = row.status
if (!isTodoStatus(status)) {
return null
}
return {
content: String(row.content ?? '').trim(),
id: String(row.id ?? '').trim(),
status
}
})
.filter((item): item is TodoItem => Boolean(item?.id && item.content))
}
const textSegments = (segments: Msg[]) =>
segments.filter(msg => msg.role === 'assistant' && msg.kind !== 'diff').map(msg => msg.text)
const finalTail = (finalText: string, segments: Msg[]) => {
let tail = finalText
for (const text of textSegments(segments)) {
const trimmed = text.trim()
if (trimmed && tail.startsWith(trimmed)) {
tail = tail.slice(trimmed.length).trimStart()
}
}
return tail
}
export interface InterruptDeps {
@ -71,14 +122,31 @@ class TurnController {
turnTools: string[] = []
private activeTools: ActiveTool[] = []
private activeReasoningText = ''
private reasoningSegmentIndex: null | number = null
private activityId = 0
private reasoningStreamingTimer: Timer = null
private reasoningTimer: Timer = null
private streamTimer: Timer = null
private streamDelay = STREAM_IDLE_BATCH_MS
private toolProgressTimer: Timer = null
boostStreamingForTyping() {
this.streamDelay = STREAM_TYPING_BATCH_MS
}
boostStreamingForScroll() {
this.streamDelay = Math.max(this.streamDelay, STREAM_SCROLL_BATCH_MS)
}
relaxStreaming() {
this.streamDelay = STREAM_IDLE_BATCH_MS
}
clearReasoning() {
this.reasoningTimer = clear(this.reasoningTimer)
this.activeReasoningText = ''
this.reasoningSegmentIndex = null
this.reasoningText = ''
this.toolTokenAcc = 0
patchTurnState({ reasoning: '', reasoningTokens: 0, toolTokens: 0 })
@ -117,6 +185,8 @@ class TurnController {
this.interrupted = true
gw.request<SessionInterruptResponse>('session.interrupt', { session_id: sid }).catch(() => {})
this.closeReasoningSegment()
const segments = this.segmentMessages
const partial = this.bufRef.trimStart()
const tools = this.pendingSegmentTools
@ -165,31 +235,72 @@ class TurnController {
})
}
flushStreamingSegment() {
const raw = this.bufRef.trimStart()
private syncReasoningSegment() {
const thinking = this.activeReasoningText.trim()
if (!raw) {
if (!thinking) {
return
}
const split = hasReasoningTag(raw) ? splitReasoning(raw) : { reasoning: '', text: raw }
const msg: Msg = {
kind: 'trail',
role: 'system',
text: '',
thinking,
thinkingTokens: estimateTokensRough(thinking),
toolTokens: this.toolTokenAcc || undefined
}
if (this.reasoningSegmentIndex === null) {
this.reasoningSegmentIndex = this.segmentMessages.length
this.segmentMessages = [...this.segmentMessages, msg]
} else {
this.segmentMessages = this.segmentMessages.map((item, i) => (i === this.reasoningSegmentIndex ? msg : item))
}
patchTurnState({ streamSegments: this.segmentMessages })
}
private closeReasoningSegment() {
this.syncReasoningSegment()
this.activeReasoningText = ''
this.reasoningSegmentIndex = null
}
private pushSegment(msg: Msg) {
this.segmentMessages = appendToolShelfMessage(this.segmentMessages, msg)
}
flushStreamingSegment() {
const raw = this.bufRef.trimStart()
const split = raw
? hasReasoningTag(raw)
? splitReasoning(raw)
: { reasoning: '', text: raw }
: { reasoning: '', text: '' }
if (split.reasoning && !this.reasoningText.trim()) {
this.reasoningText = split.reasoning
this.activeReasoningText = split.reasoning
patchTurnState({ reasoning: this.reasoningText, reasoningTokens: estimateTokensRough(this.reasoningText) })
this.syncReasoningSegment()
}
const text = split.text
const msg: Msg = {
role: split.text ? 'assistant' : 'system',
text: split.text,
...(!split.text && { kind: 'trail' as const }),
...(this.pendingSegmentTools.length && { tools: this.pendingSegmentTools })
}
this.streamTimer = clear(this.streamTimer)
if (text) {
const tools = this.pendingSegmentTools
this.segmentMessages = [...this.segmentMessages, { role: 'assistant', text, ...(tools.length && { tools }) }]
this.pendingSegmentTools = []
if (split.text || hasDetails(msg)) {
this.pushSegment(msg)
}
this.pendingSegmentTools = []
this.bufRef = ''
patchTurnState({ streamPendingTools: [], streamSegments: this.segmentMessages, streaming: '' })
}
@ -204,7 +315,38 @@ class TurnController {
}, REASONING_PULSE_MS)
}
pushInlineDiffSegment(diffText: string) {
recordTodos(value: unknown) {
const todos = parseTodos(value)
if (todos !== null) {
patchTurnState({ todos })
}
}
private flushPendingToolsIntoLastSegment() {
if (!this.pendingSegmentTools.length) {
return false
}
const next = appendToolShelfMessage(this.segmentMessages, {
kind: 'trail',
role: 'system',
text: '',
tools: this.pendingSegmentTools
})
if (next.length === this.segmentMessages.length + 1) {
return false
}
this.segmentMessages = next
this.pendingSegmentTools = []
patchTurnState({ streamPendingTools: [], streamSegments: this.segmentMessages })
return true
}
pushInlineDiffSegment(diffText: string, tools: string[] = []) {
// Strip CLI chrome the gateway emits before the unified diff (e.g. a
// leading "┊ review diff" header written by `_emit_inline_diff` for the
// terminal printer). That header only makes sense as stdout dressing,
@ -231,7 +373,10 @@ class TurnController {
return
}
this.segmentMessages = [...this.segmentMessages, { kind: 'diff', role: 'assistant', text: block }]
this.segmentMessages = [
...this.segmentMessages,
{ kind: 'diff', role: 'assistant', text: block, ...(tools.length && { tools }) }
]
patchTurnState({ streamSegments: this.segmentMessages })
}
@ -276,14 +421,25 @@ class TurnController {
}
recordMessageComplete(payload: { rendered?: string; reasoning?: string; text?: string }) {
this.closeReasoningSegment()
const rawText = (payload.rendered ?? payload.text ?? this.bufRef).trimStart()
const split = splitReasoning(rawText)
const finalText = split.text
const finalText = finalTail(split.text, this.segmentMessages)
const existingReasoning = this.reasoningText.trim() || String(payload.reasoning ?? '').trim()
const savedReasoning = [existingReasoning, existingReasoning ? '' : split.reasoning].filter(Boolean).join('\n\n')
const savedReasoningTokens = savedReasoning ? estimateTokensRough(savedReasoning) : 0
const savedToolTokens = this.toolTokenAcc
const tools = this.pendingSegmentTools
let tools = this.pendingSegmentTools
const last = this.segmentMessages[this.segmentMessages.length - 1]
if (tools.length && isToolShelfMessage(last)) {
this.segmentMessages = [
...this.segmentMessages.slice(0, -1),
{ ...last, tools: [...(last.tools ?? []), ...tools] }
]
this.pendingSegmentTools = []
tools = []
}
// Drop diff-only segments the agent is about to narrate in the final
// reply. Without this, a closing "here's the diff …" message would
@ -298,32 +454,31 @@ class TurnController {
return body === null || (!finalHasOwnDiffFence && !finalText.includes(body))
})
const hasDiffSegment = segments.some(msg => msg.kind === 'diff')
const detailsBelongBeforeDiff = hasDiffSegment && (tools.length > 0 || Boolean(savedReasoning))
const hasReasoningSegment =
this.reasoningSegmentIndex !== null || segments.some(msg => Boolean(msg.thinking?.trim()))
const finalMessages = detailsBelongBeforeDiff
? insertBeforeFirstDiff(segments, {
kind: 'trail',
role: 'system',
text: '',
thinking: savedReasoning || undefined,
thinkingTokens: savedReasoning ? savedReasoningTokens : undefined,
toolTokens: savedToolTokens || undefined,
...(tools.length && { tools })
})
: [...segments]
const finalThinking = hasReasoningSegment ? '' : savedReasoning.trim()
const finalDetails: Msg = {
kind: 'trail',
role: 'system',
text: '',
thinking: finalThinking || undefined,
thinkingTokens: finalThinking ? estimateTokensRough(finalThinking) : undefined,
toolTokens: savedToolTokens || undefined,
...(tools.length && { tools })
}
// Archive prepended so the trail msg anchors under the user prompt,
// not between thinking/tools and final assistant text.
const finalMessages: Msg[] = [
...archiveDoneTodos(),
...segments,
...(hasDetails(finalDetails) ? [finalDetails] : [])
]
if (finalText) {
finalMessages.push({
role: 'assistant',
text: finalText,
...(!detailsBelongBeforeDiff && {
thinking: savedReasoning || undefined,
thinkingTokens: savedReasoning ? savedReasoningTokens : undefined,
toolTokens: savedToolTokens || undefined,
...(tools.length && { tools })
})
})
finalMessages.push({ role: 'assistant', text: finalText })
}
const wasInterrupted = this.interrupted
@ -347,6 +502,7 @@ class TurnController {
this.turnTools = []
this.persistedToolLabels.clear()
this.bufRef = ''
this.interrupted = false
patchTurnState({ activity: [], outcome: '' })
return { finalMessages, finalText, wasInterrupted }
@ -379,7 +535,9 @@ class TurnController {
}
this.reasoningText = incoming
this.activeReasoningText = incoming
this.scheduleReasoning()
this.syncReasoningSegment()
this.pulseReasoningStreaming()
}
@ -388,19 +546,65 @@ class TurnController {
return
}
if (!this.activeReasoningText.trim() && this.pendingSegmentTools.length) {
this.flushStreamingSegment()
}
this.reasoningText += text
this.activeReasoningText += text
if (this.reasoningText.length > 80_000) {
this.reasoningText = this.reasoningText.slice(-60_000)
}
this.scheduleReasoning()
this.syncReasoningSegment()
this.pulseReasoningStreaming()
}
recordToolComplete(toolId: string, fallbackName?: string, error?: string, summary?: string) {
recordToolComplete(
toolId: string,
fallbackName?: string,
error?: string,
summary?: string,
duration?: number,
todos?: unknown
) {
this.recordTodos(todos)
const line = this.completeTool(toolId, fallbackName, error, summary, duration)
this.pendingSegmentTools = [...this.pendingSegmentTools, line]
this.flushPendingToolsIntoLastSegment()
this.publishToolState()
}
recordInlineDiffToolComplete(
diffText: string,
toolId: string,
fallbackName?: string,
error?: string,
duration?: number
) {
this.flushStreamingSegment()
this.pushInlineDiffSegment(diffText, [this.completeTool(toolId, fallbackName, error, '', duration)])
this.publishToolState()
}
private completeTool(toolId: string, fallbackName?: string, error?: string, summary?: string, duration?: number) {
const done = this.activeTools.find(tool => tool.id === toolId)
const name = done?.name ?? fallbackName ?? 'tool'
const label = toolTrailLabel(name)
const line = buildToolTrailLine(name, done?.context || '', Boolean(error), error || summary || '')
const fallbackDuration = done?.startedAt ? (Date.now() - done.startedAt) / 1000 : undefined
const line = buildToolTrailLine(
name,
done?.context || '',
Boolean(error),
error || summary || '',
duration ?? fallbackDuration
)
this.activeTools = this.activeTools.filter(tool => tool.id !== toolId)
this.pendingSegmentTools = [...this.pendingSegmentTools, line]
const next = this.turnTools.filter(item => !sameToolTrailGroup(label, item))
@ -409,6 +613,11 @@ class TurnController {
}
this.turnTools = next.slice(-TRAIL_LIMIT)
return line
}
private publishToolState() {
patchTurnState({
streamPendingTools: this.pendingSegmentTools,
tools: this.activeTools,
@ -437,6 +646,7 @@ class TurnController {
recordToolStart(toolId: string, name: string, context: string) {
this.flushStreamingSegment()
this.closeReasoningSegment()
this.pruneTransient()
this.endReasoningPhase()
@ -455,8 +665,10 @@ class TurnController {
this.bufRef = ''
this.interrupted = false
this.lastStatusNote = ''
this.activeReasoningText = ''
this.pendingSegmentTools = []
this.protocolWarned = false
this.reasoningSegmentIndex = null
this.segmentMessages = []
this.turnTools = []
this.toolTokenAcc = 0
@ -492,14 +704,16 @@ class TurnController {
this.streamTimer = null
const raw = this.bufRef.trimStart()
const visible = hasReasoningTag(raw) ? splitReasoning(raw).text : raw
patchTurnState({ streaming: visible })
}, STREAM_BATCH_MS)
patchTurnState({ streaming: boundedLiveRenderText(visible) })
}, this.streamDelay)
}
startMessage() {
this.endReasoningPhase()
this.clearReasoning()
this.activeTools = []
this.activeReasoningText = ''
this.reasoningSegmentIndex = null
this.turnTools = []
this.toolTokenAcc = 0
this.persistedToolLabels.clear()

View File

@ -1,6 +1,8 @@
import { atom } from 'nanostores'
import { useSyncExternalStore } from 'react'
import type { ActiveTool, ActivityItem, Msg, SubagentProgress } from '../types.js'
import { isTodoDone } from '../lib/liveProgress.js'
import type { ActiveTool, ActivityItem, Msg, SubagentProgress, TodoItem } from '../types.js'
const buildTurnState = (): TurnState => ({
activity: [],
@ -13,6 +15,8 @@ const buildTurnState = (): TurnState => ({
streamSegments: [],
streaming: '',
subagents: [],
todoCollapsed: false,
todos: [],
toolTokens: 0,
tools: [],
turnTrail: []
@ -22,9 +26,44 @@ export const $turnState = atom<TurnState>(buildTurnState())
export const getTurnState = () => $turnState.get()
const subscribeTurn = (cb: () => void) => $turnState.listen(() => cb())
export const useTurnSelector = <T>(selector: (state: TurnState) => T): T =>
useSyncExternalStore(
subscribeTurn,
() => selector($turnState.get()),
() => selector($turnState.get())
)
export const patchTurnState = (next: Partial<TurnState> | ((state: TurnState) => TurnState)) =>
$turnState.set(typeof next === 'function' ? next($turnState.get()) : { ...$turnState.get(), ...next })
export const toggleTodoCollapsed = () => patchTurnState(state => ({ ...state, todoCollapsed: !state.todoCollapsed }))
export const archiveDoneTodos = () => archiveTodosAtTurnEnd()
export const archiveTodosAtTurnEnd = () => {
const state = $turnState.get()
if (!state.todos.length) {
return []
}
const done = isTodoDone(state.todos)
const msg: Msg = {
kind: 'trail',
role: 'system',
text: '',
todos: state.todos,
...(done ? { todoCollapsedByDefault: true } : { todoIncomplete: true })
}
patchTurnState({ todoCollapsed: false, todos: [] })
return [msg]
}
export const resetTurnState = () => $turnState.set(buildTurnState())
export interface TurnState {
@ -38,6 +77,8 @@ export interface TurnState {
streamSegments: Msg[]
streaming: string
subagents: SubagentProgress[]
todoCollapsed: boolean
todos: TodoItem[]
toolTokens: number
tools: ActiveTool[]
turnTrail: string[]

View File

@ -11,6 +11,7 @@ const buildUiState = (): UiState => ({
busy: false,
compact: false,
detailsMode: 'collapsed',
detailsModeCommandOverride: false,
info: null,
inlineDiffs: true,
mouseTracking: MOUSE_TRACKING,

View File

@ -45,6 +45,7 @@ export const applyDisplay = (cfg: ConfigFullResponse | null, setBell: (v: boolea
patchUiState({
compact: !!d.tui_compact,
detailsMode: resolveDetailsMode(d),
detailsModeCommandOverride: false,
inlineDiffs: d.inline_diffs !== false,
mouseTracking: d.tui_mouse !== false,
sections: resolveSections(d.sections),

View File

@ -1,6 +1,8 @@
import { useInput } from '@hermes/ink'
import { useStore } from '@nanostores/react'
import { useEffect, useRef } from 'react'
import { TYPING_IDLE_MS } from '../config/timing.js'
import type {
ApprovalRespondResponse,
ConfigSetResponse,
@ -9,6 +11,7 @@ import type {
VoiceRecordResponse
} from '../gatewayTypes.js'
import { isAction, isCopyShortcut, isMac, isVoiceToggleKey } from '../lib/platform.js'
import { computeWheelStep, initWheelAccelForHost } from '../lib/wheelAccel.js'
import { getInputSelection } from './inputSelectionStore.js'
import type { InputHandlerContext, InputHandlerResult } from './interfaces.js'
@ -26,6 +29,27 @@ export function useInputHandlers(ctx: InputHandlerContext): InputHandlerResult {
const overlay = useStore($overlayState)
const isBlocked = useStore($isBlocked)
const pagerPageSize = Math.max(5, (terminal.stdout?.rows ?? 24) - 6)
const scrollIdleTimer = useRef<null | ReturnType<typeof setTimeout>>(null)
// Wheel accel ported from claude-code: inter-event timing drives step size,
// direction flips reset. wheelStep (WHEEL_SCROLL_STEP) is the base; final
// rows = wheelStep × accelMult. State mutates in place across renders.
const wheelAccelRef = useRef(initWheelAccelForHost())
useEffect(() => () => clearTimeout(scrollIdleTimer.current ?? undefined), [])
const scrollTranscript = (delta: number) => {
if (getUiState().busy) {
turnController.boostStreamingForScroll()
clearTimeout(scrollIdleTimer.current ?? undefined)
scrollIdleTimer.current = setTimeout(() => {
scrollIdleTimer.current = null
turnController.relaxStreaming()
}, TYPING_IDLE_MS)
}
terminal.scrollWithSelection(delta)
}
const copySelection = () => {
// ink's copySelection() already calls setClipboard() which handles
@ -258,27 +282,29 @@ export function useInputHandlers(ctx: InputHandlerContext): InputHandlerResult {
return
}
if (key.wheelUp) {
return terminal.scrollWithSelection(-wheelStep)
}
if (key.wheelUp || key.wheelDown) {
const dir: -1 | 1 = key.wheelUp ? -1 : 1
// 0 = direction-flip bounce deferred; skip the no-op scroll.
const rows = computeWheelStep(wheelAccelRef.current, dir, Date.now())
if (key.wheelDown) {
return terminal.scrollWithSelection(wheelStep)
return rows ? scrollTranscript(dir * rows * wheelStep) : undefined
}
if (key.shift && key.upArrow) {
return terminal.scrollWithSelection(-1)
return scrollTranscript(-1)
}
if (key.shift && key.downArrow) {
return terminal.scrollWithSelection(1)
return scrollTranscript(1)
}
if (key.pageUp || key.pageDown) {
// Half-viewport keeps 50% continuity and stays under Ink's
// `delta < innerHeight` DECSTBM fast-path threshold.
const viewport = terminal.scrollRef.current?.getViewportHeight() ?? Math.max(6, (terminal.stdout?.rows ?? 24) - 8)
const step = Math.max(4, viewport - 2)
const step = Math.max(4, Math.floor(viewport / 2))
return terminal.scrollWithSelection(key.pageUp ? -step : step)
return scrollTranscript(key.pageUp ? -step : step)
}
if (key.escape && terminal.hasSelection) {

View File

@ -2,9 +2,10 @@ import { useEffect, useRef } from 'react'
import { LONG_RUN_CHARMS } from '../content/charms.js'
import { pick, toolTrailLabel } from '../lib/text.js'
import type { ActiveTool } from '../types.js'
import { turnController } from './turnController.js'
import { useTurnSelector } from './turnStore.js'
import { getUiState } from './uiStore.js'
const DELAY_MS = 8_000
const INTERVAL_MS = 10_000
@ -15,21 +16,28 @@ interface Slot {
lastAt: number
}
export function useLongRunToolCharms(busy: boolean, tools: ActiveTool[]) {
export function useLongRunToolCharms() {
const tools = useTurnSelector(state => state.tools)
const slots = useRef(new Map<string, Slot>())
useEffect(() => {
if (!busy || !tools.length) {
if (!getUiState().busy || !tools.length) {
slots.current.clear()
return
}
const tick = () => {
if (!getUiState().busy) {
slots.current.clear()
return
}
const now = Date.now()
const liveIds = new Set(tools.map(t => t.id))
for (const key of [...slots.current.keys()]) {
for (const key of Array.from(slots.current.keys())) {
if (!liveIds.has(key)) {
slots.current.delete(key)
}
@ -57,5 +65,5 @@ export function useLongRunToolCharms(busy: boolean, tools: ActiveTool[]) {
const id = setInterval(tick, 1000)
return () => clearInterval(id)
}, [busy, tools])
}, [tools])
}

View File

@ -3,7 +3,7 @@ import { useStore } from '@nanostores/react'
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { STARTUP_RESUME_ID } from '../config/env.js'
import { MAX_HISTORY, WHEEL_SCROLL_STEP } from '../config/limits.js'
import { FULL_RENDER_TAIL_ITEMS, MAX_HISTORY, WHEEL_SCROLL_STEP } from '../config/limits.js'
import { SECTION_NAMES, sectionMode } from '../domain/details.js'
import { attachedImageNotice, imageTokenMeta } from '../domain/messages.js'
import { fmtCwdBranch, shortCwd } from '../domain/paths.js'
@ -16,17 +16,20 @@ import type {
} from '../gatewayTypes.js'
import { useGitBranch } from '../hooks/useGitBranch.js'
import { useVirtualHistory } from '../hooks/useVirtualHistory.js'
import { appendTranscriptMessage } from '../lib/messages.js'
import { asRpcResult, rpcErrorMessage } from '../lib/rpc.js'
import { terminalParityHints } from '../lib/terminalParity.js'
import { buildToolTrailLine, sameToolTrailGroup, toolTrailLabel } from '../lib/text.js'
import { estimatedMsgHeight, messageHeightKey } from '../lib/virtualHeights.js'
import type { Msg, PanelSection, SlashCatalog } from '../types.js'
import { createGatewayEventHandler } from './createGatewayEventHandler.js'
import { createSlashHandler } from './createSlashHandler.js'
import { type GatewayRpc, type TranscriptRow } from './interfaces.js'
import { $overlayState, patchOverlayState } from './overlayStore.js'
import { scrollWithSelectionBy } from './scroll.js'
import { turnController } from './turnController.js'
import { $turnState, patchTurnState } from './turnStore.js'
import { patchTurnState, useTurnSelector } from './turnStore.js'
import { $uiState, getUiState, patchUiState } from './uiStore.js'
import { useComposerState } from './useComposerState.js'
import { useConfigSync } from './useConfigSync.js'
@ -38,6 +41,7 @@ import { useSubmission } from './useSubmission.js'
const GOOD_VIBES_RE = /\b(good bot|thanks|thank you|thx|ty|ily|love you)\b/i
const BRACKET_PASTE_ON = '\x1b[?2004h'
const BRACKET_PASTE_OFF = '\x1b[?2004l'
const MAX_HEIGHT_CACHE_BUCKETS = 12
const capHistory = (items: Msg[]): Msg[] => {
if (items.length <= MAX_HISTORY) {
@ -63,12 +67,6 @@ const statusColorOf = (status: string, t: { dim: string; error: string; ok: stri
return t.dim
}
interface SelectionSnap {
anchor?: { row: number }
focus?: { row: number }
isDragging?: boolean
}
export function useMainApp(gw: GatewayClient) {
const { exit } = useApp()
const { stdout } = useStdout()
@ -110,7 +108,19 @@ export function useMainApp(gw: GatewayClient) {
const ui = useStore($uiState)
const overlay = useStore($overlayState)
const turn = useStore($turnState)
const turnLiveTailActive = useTurnSelector(state =>
Boolean(
state.streaming ||
state.streamPendingTools.length ||
state.streamSegments.length ||
state.reasoning.trim() ||
state.reasoningActive ||
state.tools.length ||
state.subagents.length ||
state.todos.length
)
)
const slashFlightRef = useRef(0)
const slashRef = useRef<(cmd: string) => boolean>(() => false)
@ -123,7 +133,8 @@ export function useMainApp(gw: GatewayClient) {
const historyItemsRef = useRef(historyItems)
const lastUserMsgRef = useRef(lastUserMsg)
const msgIdsRef = useRef(new WeakMap<Msg, string>())
const nextMsgIdRef = useRef(0)
const msgIdSeqRef = useRef(0)
const heightCachesRef = useRef(new Map<string, Map<string, number>>())
colsRef.current = cols
historyItemsRef.current = historyItems
@ -170,7 +181,7 @@ export function useMainApp(gw: GatewayClient) {
return hit
}
const next = `m${++nextMsgIdRef.current}`
const next = `${messageHeightKey(msg)}:${++msgIdSeqRef.current}`
msgIdsRef.current.set(msg, next)
@ -182,53 +193,77 @@ export function useMainApp(gw: GatewayClient) {
[historyItems, messageId]
)
const virtualHistory = useVirtualHistory(scrollRef, virtualRows, cols)
const detailsLayoutKey = useMemo(() => {
const thinking = sectionMode('thinking', ui.detailsMode, ui.sections, ui.detailsModeCommandOverride)
const tools = sectionMode('tools', ui.detailsMode, ui.sections, ui.detailsModeCommandOverride)
return `${thinking}:${tools}`
}, [ui.detailsMode, ui.detailsModeCommandOverride, ui.sections])
const detailsVisible = detailsLayoutKey !== 'hidden:hidden'
const heightCacheKey = `${ui.sid ?? 'draft'}:${cols}:${ui.compact ? '1' : '0'}:${detailsLayoutKey}`
const heightCache = useMemo(() => {
let cache = heightCachesRef.current.get(heightCacheKey)
if (!cache) {
cache = new Map()
heightCachesRef.current.set(heightCacheKey, cache)
if (heightCachesRef.current.size > MAX_HEIGHT_CACHE_BUCKETS) {
heightCachesRef.current.delete(heightCachesRef.current.keys().next().value!)
}
}
return cache
}, [heightCacheKey])
const initialHeights = useMemo(() => {
const out = new Map<string, number>()
for (const row of virtualRows) {
out.set(
row.key,
heightCache.get(row.key) ??
estimatedMsgHeight(row.msg, cols, {
compact: ui.compact,
details: detailsVisible,
limitHistory: row.index < virtualRows.length - FULL_RENDER_TAIL_ITEMS
})
)
}
return out
}, [cols, detailsVisible, heightCache, ui.compact, virtualRows])
const syncHeightCache = useCallback(
(heights: ReadonlyMap<string, number>) => {
for (const row of virtualRows) {
const h = heights.get(row.key)
if (h) {
heightCache.set(row.key, h)
}
}
},
[heightCache, virtualRows]
)
const virtualHistory = useVirtualHistory(scrollRef, virtualRows, cols, {
initialHeights,
liveTailActive: turnLiveTailActive,
onHeightsChange: syncHeightCache
})
const scrollWithSelection = useCallback(
(delta: number) => {
const s = scrollRef.current
if (!s) {
return
}
const sel = selection.getState() as null | SelectionSnap
const top = s.getViewportTop()
const bottom = top + s.getViewportHeight() - 1
if (
!sel?.anchor ||
!sel.focus ||
sel.anchor.row < top ||
sel.anchor.row > bottom ||
(!sel.isDragging && (sel.focus.row < top || sel.focus.row > bottom))
) {
return s.scrollBy(delta)
}
const max = Math.max(0, s.getScrollHeight() - s.getViewportHeight())
const cur = s.getScrollTop() + s.getPendingDelta()
const actual = Math.max(0, Math.min(max, cur + delta)) - cur
if (actual === 0) {
return
}
const shift = sel!.isDragging ? selection.shiftAnchor : selection.shiftSelection
if (actual > 0) {
selection.captureScrolledRows(top, top + actual - 1, 'above')
} else {
selection.captureScrolledRows(bottom + actual + 1, bottom, 'below')
}
shift(-actual, top, bottom)
s.scrollBy(delta)
},
(delta: number) => scrollWithSelectionBy(delta, { scrollRef, selection }),
[selection]
)
const appendMessage = useCallback((msg: Msg) => setHistoryItems(prev => capHistory([...prev, msg])), [])
const appendMessage = useCallback(
(msg: Msg) => setHistoryItems(prev => capHistory(appendTranscriptMessage(prev, msg))),
[]
)
const sys = useCallback((text: string) => appendMessage({ role: 'system', text }), [appendMessage])
@ -407,7 +442,7 @@ export function useMainApp(gw: GatewayClient) {
clipboardPasteRef.current = paste
const { dispatchSubmission, send, sendQueued, shellExec, submit } = useSubmission({
const { dispatchSubmission, send, sendQueued, submit } = useSubmission({
appendMessage,
composerActions,
composerRefs,
@ -438,6 +473,7 @@ export function useMainApp(gw: GatewayClient) {
const next = composerActions.dequeue()
if (next) {
patchUiState({ busy: true, status: 'running…' })
sendQueued(next)
}
}, [ui.sid, ui.busy, composerActions, composerRefs, sendQueued])
@ -528,7 +564,7 @@ export function useMainApp(gw: GatewayClient) {
}
}, [gw, sys])
useLongRunToolCharms(ui.busy, turn.tools)
useLongRunToolCharms()
const slash = useMemo(
() =>
@ -626,30 +662,35 @@ export function useMainApp(gw: GatewayClient) {
const onModelSelect = useCallback((value: string) => {
patchOverlayState({ modelPicker: false })
slashRef.current(`/model ${value}`)
slashRef.current(`/model ${value} --global`)
}, [])
const hasReasoning = Boolean(turn.reasoning.trim())
const hasReasoning = useTurnSelector(state => Boolean(state.reasoning.trim()))
// Per-section overrides win over the global mode — when every section is
// resolved to hidden, the only thing ToolTrail will surface is the
// floating-alert backstop (errors/warnings). Mirror that so we don't
// render an empty wrapper Box above the streaming area in quiet mode.
const anyPanelVisible = SECTION_NAMES.some(s => sectionMode(s, ui.detailsMode, ui.sections) !== 'hidden')
const anyPanelVisible = SECTION_NAMES.some(
s => sectionMode(s, ui.detailsMode, ui.sections, ui.detailsModeCommandOverride) !== 'hidden'
)
const showProgressArea = anyPanelVisible
? Boolean(
ui.busy ||
turn.outcome ||
turn.streamPendingTools.length ||
turn.streamSegments.length ||
turn.subagents.length ||
turn.tools.length ||
turn.turnTrail.length ||
hasReasoning ||
turn.activity.length
)
: turn.activity.some(item => item.tone !== 'info')
const showProgressArea = useTurnSelector(state =>
anyPanelVisible
? Boolean(
ui.busy ||
state.outcome ||
state.streamPendingTools.length ||
state.streamSegments.length ||
state.subagents.length ||
state.tools.length ||
state.todos.length ||
state.turnTrail.length ||
hasReasoning ||
state.activity.length
)
: state.activity.some(item => item.tone !== 'info')
)
const appActions = useMemo(
() => ({
@ -682,33 +723,10 @@ export function useMainApp(gw: GatewayClient) {
[cols, composerActions, composerState, empty, pagerPageSize, submit]
)
const liveTailVisible = (() => {
const s = scrollRef.current
if (!s) {
return true
}
const top = Math.max(0, s.getScrollTop() + s.getPendingDelta())
const vp = Math.max(0, s.getViewportHeight())
const total = Math.max(vp, s.getScrollHeight())
return top + vp >= total - 3
})()
const liveProgress = useMemo(
() => ({ ...turn, showProgressArea, showStreamingArea: Boolean(turn.streaming) }),
[turn, showProgressArea]
)
const frozenProgressRef = useRef(liveProgress)
// Freeze the offscreen live tail so scroll doesn't rebuild unseen streaming UI.
if (liveTailVisible || !ui.busy) {
frozenProgressRef.current = liveProgress
}
const appProgress = liveTailVisible || !ui.busy ? liveProgress : frozenProgressRef.current
// Pass current progress through unfrozen — streaming update throttling
// handles interaction load; progress must stay truthful so panels don't
// randomly disappear when the live tail scrolls offscreen.
const appProgress = useMemo(() => ({ showProgressArea }), [showProgressArea])
const cwd = ui.info?.cwd || process.env.HERMES_CWD || process.cwd()
const gitBranch = useGitBranch(cwd)

View File

@ -1,4 +1,5 @@
import type { ScrollBoxHandle } from '@hermes/ink'
import { evictInkCaches } from '@hermes/ink'
import { type RefObject, useCallback } from 'react'
import { buildSetupRequiredSections, SETUP_REQUIRED_TITLE } from '../content/setup.js'
@ -84,6 +85,9 @@ export function useSessionLifecycle(opts: UseSessionLifecycleOptions) {
setLastUserMsg('')
setStickyPrompt('')
composerActions.setPasteSnips([])
// Half-prune: new session has new keys, but keep a warm pool in case
// the user resumes back to the prior session.
evictInkCaches('half')
}, [composerActions, setHistoryItems, setLastUserMsg, setStickyPrompt, setVoiceProcessing, setVoiceRecording])
const resetVisibleHistory = useCallback(

View File

@ -1,5 +1,6 @@
import { type MutableRefObject, useCallback, useRef } from 'react'
import { type MutableRefObject, useCallback, useEffect, useRef } from 'react'
import { TYPING_IDLE_MS } from '../config/timing.js'
import { attachedImageNotice } from '../domain/messages.js'
import { looksLikeSlashCommand } from '../domain/slash.js'
import type { GatewayClient } from '../gatewayClient.js'
@ -14,6 +15,9 @@ import { turnController } from './turnController.js'
import { getUiState, patchUiState } from './uiStore.js'
const DOUBLE_ENTER_MS = 450
const SESSION_BUSY_RE = /session busy|waiting for model response/i
const isSessionBusyError = (e: unknown) => e instanceof Error && SESSION_BUSY_RE.test(e.message)
const expandSnips = (snips: PasteSnippet[]) => {
const byLabel = new Map<string, string[]>()
@ -44,12 +48,42 @@ export function useSubmission(opts: UseSubmissionOptions) {
} = opts
const lastEmptyAt = useRef(0)
const typingIdleTimer = useRef<ReturnType<typeof setTimeout> | null>(null)
useEffect(() => {
if (typingIdleTimer.current) {
clearTimeout(typingIdleTimer.current)
typingIdleTimer.current = null
}
if (!composerState.input && !composerState.inputBuf.length) {
turnController.relaxStreaming()
return
}
if (getUiState().busy) {
turnController.boostStreamingForTyping()
}
typingIdleTimer.current = setTimeout(() => {
typingIdleTimer.current = null
turnController.relaxStreaming()
}, TYPING_IDLE_MS)
return () => {
if (typingIdleTimer.current) {
clearTimeout(typingIdleTimer.current)
typingIdleTimer.current = null
}
}
}, [composerState.input, composerState.inputBuf])
const send = useCallback(
(text: string) => {
(text: string, showUserMessage = true) => {
const expand = expandSnips(composerState.pasteSnips)
const startSubmit = (displayText: string, submitText: string) => {
const startSubmit = (displayText: string, submitText: string, showUserMessage = true) => {
const sid = getUiState().sid
if (!sid) {
@ -59,12 +93,23 @@ export function useSubmission(opts: UseSubmissionOptions) {
turnController.clearStatusTimer()
maybeGoodVibes(submitText)
setLastUserMsg(text)
appendMessage({ role: 'user', text: displayText })
if (showUserMessage) {
appendMessage({ role: 'user', text: displayText })
}
patchUiState({ busy: true, status: 'running…' })
turnController.bufRef = ''
turnController.interrupted = false
gw.request<PromptSubmitResponse>('prompt.submit', { session_id: sid, text: submitText }).catch((e: Error) => {
if (isSessionBusyError(e)) {
composerActions.enqueue(submitText)
patchUiState({ busy: true, status: 'queued for next turn' })
return sys(`queued: "${submitText.slice(0, 50)}${submitText.length > 50 ? '…' : ''}"`)
}
sys(`error: ${e.message}`)
patchUiState({ busy: false, status: 'ready' })
})
@ -79,7 +124,7 @@ export function useSubmission(opts: UseSubmissionOptions) {
gw.request<InputDetectDropResponse>('input.detect_drop', { session_id: sid, text })
.then(r => {
if (!r?.matched) {
return startSubmit(text, expand(text))
return startSubmit(text, expand(text), showUserMessage)
}
if (r.is_image) {
@ -88,11 +133,11 @@ export function useSubmission(opts: UseSubmissionOptions) {
turnController.pushActivity(`detected file: ${r.name}`)
}
startSubmit(r.text || text, expand(r.text || text))
startSubmit(r.text || text, expand(r.text || text), showUserMessage)
})
.catch(() => startSubmit(text, expand(text)))
.catch(() => startSubmit(text, expand(text), showUserMessage))
},
[appendMessage, composerState.pasteSnips, gw, maybeGoodVibes, setLastUserMsg, sys]
[appendMessage, composerActions, composerState.pasteSnips, gw, maybeGoodVibes, setLastUserMsg, sys]
)
const shellExec = useCallback(
@ -260,6 +305,8 @@ export function useSubmission(opts: UseSubmissionOptions) {
if (doubleTap && live.sid && composerRefs.queueRef.current.length) {
const next = composerActions.dequeue()
composerActions.syncQueue()
if (next) {
composerActions.setQueueEdit(null)
dispatchSubmission(next)
@ -284,7 +331,7 @@ export function useSubmission(opts: UseSubmissionOptions) {
submitRef.current = submit
return { dispatchSubmission, send, sendQueued, shellExec, submit }
return { dispatchSubmission, send, sendQueued, submit }
}
export interface UseSubmissionOptions {

View File

@ -10,7 +10,7 @@ import {
} from '../app/delegationStore.js'
import { patchOverlayState } from '../app/overlayStore.js'
import { $spawnDiff, $spawnHistory, clearDiffPair, type SpawnSnapshot } from '../app/spawnHistoryStore.js'
import { $turnState } from '../app/turnStore.js'
import { useTurnSelector } from '../app/turnStore.js'
import type { GatewayClient } from '../gatewayClient.js'
import type { DelegationPauseResponse, DelegationStatusResponse, SubagentInterruptResponse } from '../gatewayTypes.js'
import { asRpcResult } from '../lib/rpc.js'
@ -683,7 +683,7 @@ function DiffView({
// ── Main overlay ─────────────────────────────────────────────────────
export function AgentsOverlay({ gw, initialHistoryIndex = 0, onClose, t }: AgentsOverlayProps) {
const turn = useStore($turnState)
const liveSubagents = useTurnSelector(state => state.subagents)
const delegation = useStore($delegationState)
const history = useStore($spawnHistory)
const diffPair = useStore($spawnDiff)
@ -705,17 +705,17 @@ export function AgentsOverlay({ gw, initialHistoryIndex = 0, onClose, t }: Agent
const [mode, setMode] = useState<'detail' | 'list'>('list')
const detailScrollRef = useRef<null | ScrollBoxHandle>(null)
const prevLiveCountRef = useRef(turn.subagents.length)
const prevLiveCountRef = useRef(liveSubagents.length)
// ── Derived state ──────────────────────────────────────────────────
const activeSnapshot = historyIndex > 0 ? history[historyIndex - 1] : null
// Instant fallback to history[0] the moment the live list clears — avoids
// a one-frame "no subagents" flash while the auto-follow effect fires.
const justFinishedSnapshot = historyIndex === 0 && turn.subagents.length === 0 ? (history[0] ?? null) : null
const justFinishedSnapshot = historyIndex === 0 && liveSubagents.length === 0 ? (history[0] ?? null) : null
const effectiveSnapshot = activeSnapshot ?? justFinishedSnapshot
const replayMode = effectiveSnapshot != null
const subagents = replayMode ? effectiveSnapshot.subagents : turn.subagents
const subagents = replayMode ? effectiveSnapshot.subagents : liveSubagents
const tree = useMemo(() => buildSubagentTree(subagents), [subagents])
const totals = useMemo(() => treeTotals(tree), [tree])
@ -753,14 +753,14 @@ export function AgentsOverlay({ gw, initialHistoryIndex = 0, onClose, t }: Agent
// dropped into an empty live view. Fires only when transitioning from
// "had live subagents" → "live empty" while in live mode.
const prev = prevLiveCountRef.current
prevLiveCountRef.current = turn.subagents.length
prevLiveCountRef.current = liveSubagents.length
if (historyIndex === 0 && prev > 0 && turn.subagents.length === 0 && history.length > 0) {
if (historyIndex === 0 && prev > 0 && liveSubagents.length === 0 && history.length > 0) {
setHistoryIndex(1)
setCursor(0)
setFlash('turn finished · inspect freely · q to close')
}
}, [history.length, historyIndex, turn.subagents.length])
}, [history.length, historyIndex, liveSubagents.length])
useEffect(() => {
// Reset detail scroll on navigation so the top of the new node shows.

View File

@ -1,15 +1,16 @@
import { Box, type ScrollBoxHandle, Text } from '@hermes/ink'
import { useStore } from '@nanostores/react'
import { type ReactNode, type RefObject, useCallback, useEffect, useMemo, useState, useSyncExternalStore } from 'react'
import { type ReactNode, type RefObject, useEffect, useMemo, useState } from 'react'
import { $delegationState } from '../app/delegationStore.js'
import { $turnState } from '../app/turnStore.js'
import { useTurnSelector } from '../app/turnStore.js'
import { FACES } from '../content/faces.js'
import { VERBS } from '../content/verbs.js'
import { fmtDuration } from '../domain/messages.js'
import { stickyPromptFromViewport } from '../domain/viewport.js'
import { buildSubagentTree, treeTotals, widthByDepth } from '../lib/subagentTree.js'
import { fmtK } from '../lib/text.js'
import { useViewportSnapshot } from '../lib/viewportStore.js'
import type { Theme } from '../theme.js'
import type { Msg, Usage } from '../types.js'
@ -68,9 +69,9 @@ function SpawnHud({ t }: { t: Theme }) {
// Tight HUD that only appears when the session is actually fanning out.
// Colour escalates to warn/error as depth or concurrency approaches the cap.
const delegation = useStore($delegationState)
const turn = useStore($turnState)
const subagents = useTurnSelector(state => state.subagents)
const tree = useMemo(() => buildSubagentTree(turn.subagents), [turn.subagents])
const tree = useMemo(() => buildSubagentTree(subagents), [subagents])
const totals = useMemo(() => treeTotals(tree), [tree])
if (!totals.descendantCount && !delegation.paused) {
@ -138,6 +139,27 @@ function SessionDuration({ startedAt }: { startedAt: number }) {
return fmtDuration(now - startedAt)
}
const effortLabel = (effort?: string) => {
const value = String(effort ?? '')
.trim()
.toLowerCase()
return value && value !== 'medium' && value !== 'normal' && value !== 'default' ? value : ''
}
const shortModelLabel = (model: string) =>
model
.split('/')
.pop()!
.replace(/^claude[-_]/, '')
.replace(/^anthropic[-_]/, '')
.replace(/[-_]/g, ' ')
.replace(/\b(\d+)\s+(\d+)\b/g, '$1.$2')
.trim()
const modelLabel = (model: string, effort?: string, fast?: boolean) =>
[shortModelLabel(model), effortLabel(effort), fast ? 'fast' : ''].filter(Boolean).join(' ')
export function GoodVibesHeart({ tick, t }: { tick: number; t: Theme }) {
const [active, setActive] = useState(false)
const [color, setColor] = useState(t.color.amber)
@ -170,6 +192,8 @@ export function StatusRule({
status,
statusColor,
model,
modelFast,
modelReasoningEffort,
usage,
bgCount,
sessionStartedAt,
@ -200,7 +224,7 @@ export function StatusRule({
) : (
<Text color={statusColor}>{status}</Text>
)}
<Text color={t.color.dim}> {model}</Text>
<Text color={t.color.dim}> {modelLabel(model, modelReasoningEffort, modelFast)}</Text>
{ctxLabel ? <Text color={t.color.dim}> {ctxLabel}</Text> : null}
{bar ? (
<Text color={t.color.dim}>
@ -255,17 +279,7 @@ export function FloatBox({ children, color }: { children: ReactNode; color: stri
}
export function StickyPromptTracker({ messages, offsets, scrollRef, onChange }: StickyPromptTrackerProps) {
useSyncExternalStore(
useCallback((cb: () => void) => scrollRef.current?.subscribe(cb) ?? (() => {}), [scrollRef]),
() => {
const { atBottom, top } = getStickyViewport(scrollRef.current)
return atBottom ? -1 - top : top
},
() => NaN
)
const { atBottom, bottom, top } = getStickyViewport(scrollRef.current)
const { atBottom, bottom, top } = useViewportSnapshot(scrollRef)
const text = stickyPromptFromViewport(messages, offsets, top, bottom, atBottom)
useEffect(() => onChange(text), [onChange, text])
@ -274,42 +288,18 @@ export function StickyPromptTracker({ messages, offsets, scrollRef, onChange }:
}
export function TranscriptScrollbar({ scrollRef, t }: TranscriptScrollbarProps) {
useSyncExternalStore(
useCallback((cb: () => void) => scrollRef.current?.subscribe(cb) ?? (() => {}), [scrollRef]),
() => {
const s = scrollRef.current
if (!s) {
return NaN
}
const vp = Math.max(0, s.getViewportHeight())
const total = Math.max(vp, s.getScrollHeight())
const top = Math.max(0, s.getScrollTop() + s.getPendingDelta())
const thumb = total > vp ? Math.max(1, Math.round((vp * vp) / total)) : vp
const travel = Math.max(1, vp - thumb)
const thumbTop = total > vp ? Math.round((top / Math.max(1, total - vp)) * travel) : 0
return `${thumbTop}:${thumb}:${vp}`
},
() => ''
)
const [hover, setHover] = useState(false)
const [grab, setGrab] = useState<number | null>(null)
const s = scrollRef.current
const vp = Math.max(0, s?.getViewportHeight() ?? 0)
const { scrollHeight: total, top: pos, viewportHeight: vp } = useViewportSnapshot(scrollRef)
if (!vp) {
return <Box width={1} />
}
const total = Math.max(vp, s?.getScrollHeight() ?? vp)
const s = scrollRef.current
const scrollable = total > vp
const thumb = scrollable ? Math.max(1, Math.round((vp * vp) / total)) : vp
const travel = Math.max(1, vp - thumb)
const pos = Math.max(0, (s?.getScrollTop() ?? 0) + (s?.getPendingDelta() ?? 0))
const thumbTop = scrollable ? Math.round((pos / Math.max(1, total - vp)) * travel) : 0
const thumbColor = grab !== null ? t.color.gold : hover ? t.color.amber : t.color.bronze
const trackColor = hover ? t.color.bronze : t.color.dim
@ -370,6 +360,8 @@ interface StatusRuleProps {
cols: number
cwdLabel: string
model: string
modelFast?: boolean
modelReasoningEffort?: string
sessionStartedAt?: null | number
showCost: boolean
status: string
@ -391,15 +383,3 @@ interface TranscriptScrollbarProps {
scrollRef: RefObject<ScrollBoxHandle | null>
t: Theme
}
function getStickyViewport(s?: ScrollBoxHandle | null) {
const top = Math.max(0, (s?.getScrollTop() ?? 0) + (s?.getPendingDelta() ?? 0))
const vp = Math.max(0, s?.getViewportHeight() ?? 0)
const total = Math.max(vp, s?.getScrollHeight() ?? vp)
return {
atBottom: (s?.isSticky() ?? true) || top + vp >= total - 2,
bottom: top + vp,
top
}
}

View File

@ -1,101 +1,26 @@
import { AlternateScreen, Box, NoSelect, ScrollBox, Text } from '@hermes/ink'
import { useStore } from '@nanostores/react'
import { memo } from 'react'
import { Fragment, memo, useMemo } from 'react'
import { useGateway } from '../app/gatewayContext.js'
import type { AppLayoutProgressProps, AppLayoutProps } from '../app/interfaces.js'
import type { AppLayoutProps } from '../app/interfaces.js'
import { $isBlocked, $overlayState, patchOverlayState } from '../app/overlayStore.js'
import { $uiState } from '../app/uiStore.js'
import { INLINE_MODE, SHOW_FPS } from '../config/env.js'
import { FULL_RENDER_TAIL_ITEMS } from '../config/limits.js'
import { PLACEHOLDER } from '../content/placeholders.js'
import type { Theme } from '../theme.js'
import type { DetailsMode, SectionVisibility } from '../types.js'
import { inputVisualHeight, stableComposerColumns } from '../lib/inputMetrics.js'
import { PerfPane } from '../lib/perfPane.js'
import { AgentsOverlay } from './agentsOverlay.js'
import { GoodVibesHeart, StatusRule, StickyPromptTracker, TranscriptScrollbar } from './appChrome.js'
import { FloatingOverlays, PromptZone } from './appOverlays.js'
import { Banner, Panel, SessionPanel } from './branding.js'
import { FpsOverlay } from './fpsOverlay.js'
import { MessageLine } from './messageLine.js'
import { QueuedMessages } from './queuedMessages.js'
import { LiveTodoPanel, StreamingAssistant } from './streamingAssistant.js'
import { TextInput } from './textInput.js'
import { ToolTrail } from './thinking.js'
const StreamingAssistant = memo(function StreamingAssistant({
busy,
cols,
compact,
detailsMode,
progress,
sections,
t
}: StreamingAssistantProps) {
if (!progress.showProgressArea && !progress.showStreamingArea) {
return null
}
return (
<>
{progress.streamSegments.map((msg, i) => (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
key={`seg:${i}`}
msg={msg}
sections={sections}
t={t}
/>
))}
{progress.showProgressArea && (
<Box flexDirection="column" marginBottom={progress.showStreamingArea ? 1 : 0}>
<ToolTrail
activity={progress.activity}
busy={busy}
detailsMode={detailsMode}
outcome={progress.outcome}
reasoning={progress.reasoning}
reasoningActive={progress.reasoningActive}
reasoningStreaming={progress.reasoningStreaming}
reasoningTokens={progress.reasoningTokens}
sections={sections}
subagents={progress.subagents}
t={t}
tools={progress.tools}
toolTokens={progress.toolTokens}
trail={progress.turnTrail}
/>
</Box>
)}
{progress.showStreamingArea && (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
isStreaming
msg={{
role: 'assistant',
text: progress.streaming,
...(progress.streamPendingTools.length && { tools: progress.streamPendingTools })
}}
sections={sections}
t={t}
/>
)}
{!progress.showStreamingArea && !!progress.streamPendingTools.length && (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
msg={{ kind: 'trail', role: 'system', text: '', tools: progress.streamPendingTools }}
sections={sections}
t={t}
/>
)}
</>
)
})
const TranscriptPane = memo(function TranscriptPane({
actions,
@ -105,6 +30,21 @@ const TranscriptPane = memo(function TranscriptPane({
}: Pick<AppLayoutProps, 'actions' | 'composer' | 'progress' | 'transcript'>) {
const ui = useStore($uiState)
// LiveTodoPanel rides as a child of the latest user-message row so it
// visually belongs to the prompt and follows it during scroll. -1 when
// empty → row.index === -1 is always false → no render.
const lastUserIdx = useMemo(() => {
const items = transcript.historyItems
for (let i = items.length - 1; i >= 0; i--) {
if (items[i].role === 'user') {
return i
}
}
return -1
}, [transcript.historyItems])
return (
<>
<ScrollBox flexDirection="column" flexGrow={1} flexShrink={1} ref={transcript.scrollRef} stickyScroll>
@ -126,24 +66,27 @@ const TranscriptPane = memo(function TranscriptPane({
cols={composer.cols}
compact={ui.compact}
detailsMode={ui.detailsMode}
detailsModeCommandOverride={ui.detailsModeCommandOverride}
limitHistoryRender={row.index < transcript.historyItems.length - FULL_RENDER_TAIL_ITEMS}
msg={row.msg}
sections={ui.sections}
t={ui.theme}
/>
)}
{row.index === lastUserIdx && <LiveTodoPanel />}
</Box>
))}
{transcript.virtualHistory.bottomSpacer > 0 ? <Box height={transcript.virtualHistory.bottomSpacer} /> : null}
<StreamingAssistant
busy={ui.busy}
cols={composer.cols}
compact={ui.compact}
detailsMode={ui.detailsMode}
detailsModeCommandOverride={ui.detailsModeCommandOverride}
progress={progress}
sections={ui.sections}
t={ui.theme}
/>
</Box>
</ScrollBox>
@ -171,6 +114,8 @@ const ComposerPane = memo(function ComposerPane({
const isBlocked = useStore($isBlocked)
const sh = (composer.inputBuf[0] ?? composer.input).startsWith('!')
const pw = sh ? 2 : 3
const inputColumns = stableComposerColumns(composer.cols, pw)
const inputHeight = inputVisualHeight(composer.input, inputColumns)
return (
<NoSelect flexDirection="column" flexShrink={0} fromLeftEdge paddingX={1}>
@ -232,10 +177,10 @@ const ComposerPane = memo(function ComposerPane({
)}
</Box>
<Box flexGrow={1} position="relative">
{/* subtract NoSelect paddingX={1} (2 cols) + pw so wrap-ansi and cursorLayout agree */}
<Box flexGrow={0} flexShrink={0} height={inputHeight} position="relative" width={inputColumns}>
{/* Reserve the transcript scrollbar gutter too so typing never rewraps when the scrollbar column repaints. */}
<TextInput
columns={Math.max(20, composer.cols - pw - 2)}
columns={inputColumns}
onChange={composer.updateInput}
onPaste={composer.handleTextPaste}
onSubmit={composer.submit}
@ -292,7 +237,9 @@ const StatusRulePane = memo(function StatusRulePane({
busy={ui.busy}
cols={composer.cols}
cwdLabel={status.cwdLabel}
model={ui.info?.model?.split('/').pop() ?? ''}
model={ui.info?.model ?? ''}
modelFast={ui.info?.fast || ui.info?.service_tier === 'priority'}
modelReasoningEffort={ui.info?.reasoning_effort}
sessionStartedAt={status.sessionStartedAt}
showCost={ui.showCost}
status={ui.status}
@ -316,41 +263,51 @@ export const AppLayout = memo(function AppLayout({
}: AppLayoutProps) {
const overlay = useStore($overlayState)
// Inline mode skips AlternateScreen so the host terminal's native
// scrollback captures rows scrolled off the top; composer + progress
// stay anchored via normal flex-column flow.
const Shell = INLINE_MODE ? Fragment : AlternateScreen
const shellProps = INLINE_MODE ? {} : { mouseTracking }
return (
<AlternateScreen mouseTracking={mouseTracking}>
<Shell {...shellProps}>
<Box flexDirection="column" flexGrow={1}>
<Box flexDirection="row" flexGrow={1}>
{overlay.agents ? (
<AgentsOverlayPane />
<PerfPane id="agents">
<AgentsOverlayPane />
</PerfPane>
) : (
<TranscriptPane actions={actions} composer={composer} progress={progress} transcript={transcript} />
<PerfPane id="transcript">
<TranscriptPane actions={actions} composer={composer} progress={progress} transcript={transcript} />
</PerfPane>
)}
</Box>
{!overlay.agents && (
<>
<PromptZone
cols={composer.cols}
onApprovalChoice={actions.answerApproval}
onClarifyAnswer={actions.answerClarify}
onSecretSubmit={actions.answerSecret}
onSudoSubmit={actions.answerSudo}
/>
<PerfPane id="prompt">
<PromptZone
cols={composer.cols}
onApprovalChoice={actions.answerApproval}
onClarifyAnswer={actions.answerClarify}
onSecretSubmit={actions.answerSecret}
onSudoSubmit={actions.answerSudo}
/>
</PerfPane>
<ComposerPane actions={actions} composer={composer} status={status} />
<PerfPane id="composer">
<ComposerPane actions={actions} composer={composer} status={status} />
</PerfPane>
{SHOW_FPS && (
<Box flexShrink={0} justifyContent="flex-end" paddingRight={1}>
<FpsOverlay />
</Box>
)}
</>
)}
</Box>
</AlternateScreen>
</Shell>
)
})
interface StreamingAssistantProps {
busy: boolean
cols: number
compact?: boolean
detailsMode: DetailsMode
progress: AppLayoutProgressProps
sections?: SectionVisibility
t: Theme
}

View File

@ -0,0 +1,28 @@
// FPS counter overlay (HERMES_TUI_FPS=1). Zero-cost when disabled.
import { Text } from '@hermes/ink'
import { useStore } from '@nanostores/react'
import { SHOW_FPS } from '../config/env.js'
import { $fpsState } from '../lib/fpsStore.js'
const fpsColor = (fps: number) => (fps >= 50 ? 'green' : fps >= 30 ? 'yellow' : 'red')
export function FpsOverlay() {
if (!SHOW_FPS) {
return null
}
return <FpsOverlayInner />
}
function FpsOverlayInner() {
const { fps, lastDurationMs, totalFrames } = useStore($fpsState)
// Zero-pad widths so digit churn doesn't jitter the corner.
return (
<Text color={fpsColor(fps)}>
{fps.toFixed(1).padStart(5)}fps · {lastDurationMs.toFixed(1).padStart(5)}ms · #{totalFrames}
</Text>
)
}

View File

@ -213,8 +213,54 @@ function MdInline({ t, text }: { t: Theme; text: string }) {
return <Text>{parts.length ? parts : <Text>{text}</Text>}</Text>
}
// Cross-instance parsed-children cache: useMemo's per-instance cache dies
// on remount, so virtualization re-parses every row that scrolls back into
// view. Theme-keyed WeakMap drops stale palettes; inner Map is LRU-bounded.
const MD_CACHE_LIMIT = 512
const mdCache = new WeakMap<Theme, Map<string, ReactNode[]>>()
const cacheBucket = (t: Theme) => {
const b = mdCache.get(t)
if (b) {
return b
}
const fresh = new Map<string, ReactNode[]>()
mdCache.set(t, fresh)
return fresh
}
const cacheGet = (b: Map<string, ReactNode[]>, key: string) => {
const v = b.get(key)
if (v) {
b.delete(key)
b.set(key, v)
}
return v
}
const cacheSet = (b: Map<string, ReactNode[]>, key: string, v: ReactNode[]) => {
b.set(key, v)
if (b.size > MD_CACHE_LIMIT) {
b.delete(b.keys().next().value!)
}
}
function MdImpl({ compact, t, text }: MdProps) {
const nodes = useMemo(() => {
const bucket = cacheBucket(t)
const cacheKey = `${compact ? '1' : '0'}|${text}`
const cached = cacheGet(bucket, cacheKey)
if (cached) {
return cached
}
const lines = ensureEmojiPresentation(text).split('\n')
const nodes: ReactNode[] = []
@ -615,6 +661,8 @@ function MdImpl({ compact, t, text }: MdProps) {
i++
}
cacheSet(bucket, cacheKey, nodes)
return nodes
}, [compact, t, text])

View File

@ -5,21 +5,33 @@ import { LONG_MSG } from '../config/limits.js'
import { sectionMode } from '../domain/details.js'
import { userDisplay } from '../domain/messages.js'
import { ROLE } from '../domain/roles.js'
import { compactPreview, hasAnsi, isPasteBackedText, stripAnsi } from '../lib/text.js'
import {
boundedHistoryRenderText,
boundedLiveRenderText,
compactPreview,
hasAnsi,
isPasteBackedText,
stripAnsi
} from '../lib/text.js'
import type { Theme } from '../theme.js'
import type { DetailsMode, Msg, SectionVisibility } from '../types.js'
import type { ActiveTool, DetailsMode, Msg, SectionVisibility } from '../types.js'
import { Md } from './markdown.js'
import { StreamingMd } from './streamingMarkdown.js'
import { ToolTrail } from './thinking.js'
import { TodoPanel } from './todoPanel.js'
export const MessageLine = memo(function MessageLine({
cols,
compact,
detailsMode = 'collapsed',
detailsModeCommandOverride = false,
isStreaming = false,
limitHistoryRender = false,
msg,
sections,
t
t,
tools = []
}: MessageLineProps) {
// Per-section overrides win over the global mode, so resolve each section
// we might consume here once and gate visibility on the *content-bearing*
@ -28,20 +40,33 @@ export const MessageLine = memo(function MessageLine({
// feeds Thinking + Tool calls. Gating on every section would let
// `thinking` (expanded by default) keep an empty wrapper alive when only
// `tools` is hidden — exactly the empty-Box bug Copilot caught.
const thinkingMode = sectionMode('thinking', detailsMode, sections)
const toolsMode = sectionMode('tools', detailsMode, sections)
const activityMode = sectionMode('activity', detailsMode, sections)
const thinkingMode = sectionMode('thinking', detailsMode, sections, detailsModeCommandOverride)
const toolsMode = sectionMode('tools', detailsMode, sections, detailsModeCommandOverride)
const activityMode = sectionMode('activity', detailsMode, sections, detailsModeCommandOverride)
const thinking = msg.thinking?.trim() ?? ''
if (msg.kind === 'trail' && (msg.tools?.length || thinking)) {
if (msg.kind === 'trail' && msg.todos?.length) {
return (
<TodoPanel
defaultCollapsed={msg.todoCollapsedByDefault}
incomplete={msg.todoIncomplete}
t={t}
todos={msg.todos}
/>
)
}
if (msg.kind === 'trail' && (msg.tools?.length || tools.length || thinking)) {
return thinkingMode !== 'hidden' || toolsMode !== 'hidden' || activityMode !== 'hidden' ? (
<Box flexDirection="column" marginTop={1}>
<Box flexDirection="column">
<ToolTrail
commandOverride={detailsModeCommandOverride}
detailsMode={detailsMode}
reasoning={thinking}
reasoningTokens={msg.thinkingTokens}
sections={sections}
t={t}
tools={tools}
toolTokens={msg.toolTokens}
trail={msg.tools ?? []}
/>
@ -84,7 +109,14 @@ export const MessageLine = memo(function MessageLine({
}
if (msg.role === 'assistant') {
return isStreaming ? <Text color={body}>{msg.text}</Text> : <Md compact={compact} t={t} text={msg.text} />
return isStreaming ? (
// Incremental markdown: split at the last stable block boundary so
// only the in-flight tail re-tokenizes per delta. See
// streamingMarkdown.tsx for the cost model.
<StreamingMd compact={compact} t={t} text={boundedLiveRenderText(msg.text)} />
) : (
<Md compact={compact} t={t} text={limitHistoryRender ? boundedHistoryRenderText(msg.text) : msg.text} />
)
}
if (msg.role === 'user' && msg.text.length > LONG_MSG && isPasteBackedText(msg.text)) {
@ -118,6 +150,7 @@ export const MessageLine = memo(function MessageLine({
{showDetails && (
<Box flexDirection="column" marginBottom={1}>
<ToolTrail
commandOverride={detailsModeCommandOverride}
detailsMode={detailsMode}
reasoning={thinking}
reasoningTokens={msg.thinkingTokens}
@ -146,8 +179,11 @@ interface MessageLineProps {
cols: number
compact?: boolean
detailsMode?: DetailsMode
detailsModeCommandOverride?: boolean
isStreaming?: boolean
limitHistoryRender?: boolean
msg: Msg
sections?: SectionVisibility
t: Theme
tools?: ActiveTool[]
}

View File

@ -0,0 +1,110 @@
import { useStore } from '@nanostores/react'
import { memo } from 'react'
import type { AppLayoutProgressProps } from '../app/interfaces.js'
import { toggleTodoCollapsed, useTurnSelector } from '../app/turnStore.js'
import { $uiState } from '../app/uiStore.js'
import { appendToolShelfMessage } from '../lib/liveProgress.js'
import type { DetailsMode, Msg, SectionVisibility } from '../types.js'
import { MessageLine } from './messageLine.js'
import { TodoPanel } from './todoPanel.js'
const groupedSegments = (segments: Msg[]): Msg[] =>
segments.reduce<Msg[]>((acc, msg) => appendToolShelfMessage(acc, msg), [])
export const StreamingAssistant = memo(function StreamingAssistant({
cols,
compact,
detailsMode,
detailsModeCommandOverride,
progress,
sections
}: StreamingAssistantProps) {
const ui = useStore($uiState)
const streamSegments = useTurnSelector(state => state.streamSegments)
const streamPendingTools = useTurnSelector(state => state.streamPendingTools)
const streaming = useTurnSelector(state => state.streaming)
const activeTools = useTurnSelector(state => state.tools)
const showStreamingArea = Boolean(streaming)
if (!progress.showProgressArea && !showStreamingArea && !activeTools.length) {
return null
}
return (
<>
{groupedSegments(streamSegments).map((msg, i) => (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
detailsModeCommandOverride={detailsModeCommandOverride}
key={`seg:${i}`}
msg={msg}
sections={sections}
t={ui.theme}
/>
))}
{!!activeTools.length && (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
detailsModeCommandOverride={detailsModeCommandOverride}
msg={{ kind: 'trail', role: 'system', text: '' }}
sections={sections}
t={ui.theme}
tools={activeTools}
/>
)}
{showStreamingArea && (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
detailsModeCommandOverride={detailsModeCommandOverride}
isStreaming
msg={{
role: 'assistant',
text: streaming,
...(streamPendingTools.length && { tools: streamPendingTools })
}}
sections={sections}
t={ui.theme}
/>
)}
{!showStreamingArea && !!streamPendingTools.length && (
<MessageLine
cols={cols}
compact={compact}
detailsMode={detailsMode}
detailsModeCommandOverride={detailsModeCommandOverride}
msg={{ kind: 'trail', role: 'system', text: '', tools: streamPendingTools }}
sections={sections}
t={ui.theme}
/>
)}
</>
)
})
export const LiveTodoPanel = memo(function LiveTodoPanel() {
const ui = useStore($uiState)
const todos = useTurnSelector(state => state.todos)
const collapsed = useTurnSelector(state => state.todoCollapsed)
return <TodoPanel collapsed={collapsed} onToggle={toggleTodoCollapsed} t={ui.theme} todos={todos} />
})
interface StreamingAssistantProps {
cols: number
compact?: boolean
detailsMode: DetailsMode
detailsModeCommandOverride: boolean
progress: AppLayoutProgressProps
sections?: SectionVisibility
}

View File

@ -0,0 +1,132 @@
// StreamingMd — incremental markdown renderer for in-flight assistant text.
//
// Naive approach (render <Md text={full}/>) re-tokenizes the entire message
// on every stream delta. At 20-char batches over a 3 KB response that's 150
// full re-parses.
//
// This splits `text` at the last stable top-level block boundary (blank
// line outside a fenced code span) into:
// stablePrefix — passed to an inner <Md>, memoized on its exact text
// value. During the turn, the prefix only grows monotonically,
// so its memo key matches the previous render and React
// reuses the cached subtree — zero re-tokenization.
// unstableSuffix — the in-flight block(s). A separate <Md> re-parses just
// this tail on every delta (O(unstable length) vs.
// O(total length)).
//
// The boundary is stored in a ref so it only advances — idempotent under
// StrictMode double-render. Component unmounts between turns (isStreaming
// flips off → message moves to history and renders via <Md> directly), so
// the ref resets naturally.
//
// Layout: the two <Md> subtrees MUST render stacked (column). The parent
// container in messageLine.tsx is a default `flexDirection: 'row'` Box
// (Ink's default), so returning a bare Fragment of two <Md> siblings
// laid them out side-by-side — producing the "two jumbled columns while
// streaming" rendering bug. Wrapping in a flexDirection="column" Box
// here localizes the fix to the streaming path; the non-streaming <Md>
// already returns its own column Box, so its single-child case was never
// affected.
import { Box } from '@hermes/ink'
import { memo, useRef } from 'react'
import type { Theme } from '../theme.js'
import { Md } from './markdown.js'
// Count ``` or ~~~ fence toggles in `s` up to `end`. Odd = currently inside
// a fenced block; we can't split the prefix there or we'd orphan the fence.
const fenceOpenAt = (s: string, end: number) => {
let open = false
let i = 0
while (i < end) {
const nl = s.indexOf('\n', i)
const lineEnd = nl < 0 || nl > end ? end : nl
const line = s.slice(i, lineEnd)
if (/^\s*(?:`{3,}|~{3,})/.test(line)) {
open = !open
}
if (nl < 0 || nl >= end) {
break
}
i = nl + 1
}
return open
}
// Find the last "\n\n" boundary before `end` that is OUTSIDE a fenced code
// block. Returns the index AFTER the second newline (start of the next
// block), or -1 if no safe boundary exists yet.
export const findStableBoundary = (text: string) => {
let idx = text.length
while (idx > 0) {
const boundary = text.lastIndexOf('\n\n', idx - 1)
if (boundary < 0) {
return -1
}
// Boundary candidate: end of stable prefix is boundary + 2 (start of
// next block). Check fence balance up to that point.
const splitAt = boundary + 2
if (!fenceOpenAt(text, splitAt)) {
return splitAt
}
idx = boundary
}
return -1
}
export const StreamingMd = memo(function StreamingMd({ compact, t, text }: StreamingMdProps) {
const stablePrefixRef = useRef('')
// Reset if the text no longer starts with our recorded prefix (defensive;
// normally the component unmounts between turns so this shouldn't trigger).
if (!text.startsWith(stablePrefixRef.current)) {
stablePrefixRef.current = ''
}
const boundary = findStableBoundary(text)
// Only advance the prefix — never retreat. The boundary math looks at the
// FULL text each call; if it returns a larger index than before, we grow
// the cached prefix. Monotonic growth makes the memo key stable across
// deltas (identical string → same <Md> subtree → no re-render).
if (boundary > stablePrefixRef.current.length) {
stablePrefixRef.current = text.slice(0, boundary)
}
const stablePrefix = stablePrefixRef.current
const unstableSuffix = text.slice(stablePrefix.length)
if (!stablePrefix) {
return <Md compact={compact} t={t} text={unstableSuffix} />
}
if (!unstableSuffix) {
return <Md compact={compact} t={t} text={stablePrefix} />
}
return (
<Box flexDirection="column">
<Md compact={compact} t={t} text={stablePrefix} />
<Md compact={compact} t={t} text={unstableSuffix} />
</Box>
)
})
interface StreamingMdProps {
compact?: boolean
t: Theme
text: string
}

View File

@ -4,16 +4,18 @@ import { useEffect, useMemo, useRef, useState } from 'react'
import { setInputSelection } from '../app/inputSelectionStore.js'
import { readClipboardText, writeClipboardText } from '../lib/clipboard.js'
import { cursorLayout } from '../lib/inputMetrics.js'
import { isActionMod, isMac, isMacActionFallback } from '../lib/platform.js'
type InkExt = typeof Ink & {
stringWidth: (s: string) => number
useDeclaredCursor: (a: { line: number; column: number; active: boolean }) => (el: any) => void
useStdout: () => { stdout?: NodeJS.WriteStream }
useTerminalFocus: () => boolean
}
const ink = Ink as unknown as InkExt
const { Box, Text, useStdin, useInput, stringWidth, useDeclaredCursor, useTerminalFocus } = ink
const { Box, Text, useStdin, useInput, useStdout, stringWidth, useDeclaredCursor, useTerminalFocus } = ink
const ESC = '\x1b'
const INV = `${ESC}[7m`
@ -167,50 +169,6 @@ export function lineNav(s: string, p: number, dir: -1 | 1): null | number {
return snapPos(s, Math.min(nextBreak + 1 + col, lineEnd))
}
// mirrors wrap-ansi(..., { wordWrap: false, hard: true }) so the declared
// cursor lines up with what <Text wrap="wrap-char"> actually renders
export function cursorLayout(value: string, cursor: number, cols: number) {
const pos = Math.max(0, Math.min(cursor, value.length))
const w = Math.max(1, cols)
let col = 0,
line = 0
for (const { segment, index } of seg().segment(value)) {
if (index >= pos) {
break
}
if (segment === '\n') {
line++
col = 0
continue
}
const sw = stringWidth(segment)
if (!sw) {
continue
}
if (col + sw > w) {
line++
col = 0
}
col += sw
}
// trailing cursor-cell overflows to the next row at the wrap column
if (col >= w) {
line++
col = 0
}
return { column: col, line }
}
export function offsetFromPosition(value: string, row: number, col: number, cols: number) {
if (!value.length) {
return 0
@ -336,6 +294,7 @@ export function TextInput({
const [sel, setSel] = useState<null | { end: number; start: number }>(null)
const fwdDel = useFwdDelete(focus)
const termFocus = useTerminalFocus()
const { stdout } = useStdout()
const curRef = useRef(cur)
const selRef = useRef<null | { end: number; start: number }>(null)
@ -346,6 +305,10 @@ export function TextInput({
const pasteTimer = useRef<ReturnType<typeof setTimeout> | null>(null)
const pastePos = useRef(0)
const editVersionRef = useRef(0)
const parentChangeTimer = useRef<ReturnType<typeof setTimeout> | null>(null)
const pendingParentValue = useRef<string | null>(null)
const localRenderTimer = useRef<ReturnType<typeof setTimeout> | null>(null)
const lineWidthRef = useRef(stringWidth(value.includes('\n') ? value.slice(value.lastIndexOf('\n') + 1) : value))
const undo = useRef<{ cursor: number; value: string }[]>([])
const redo = useRef<{ cursor: number; value: string }[]>([])
@ -373,21 +336,23 @@ export function TextInput({
active: focus && termFocus && !selected
})
const nativeCursor = focus && termFocus && !selected && !!stdout?.isTTY
const rendered = useMemo(() => {
if (!focus) {
return display || dim(placeholder)
}
if (!display && placeholder) {
return invert(placeholder[0] ?? ' ') + dim(placeholder.slice(1))
return nativeCursor ? dim(placeholder) : invert(placeholder[0] ?? ' ') + dim(placeholder.slice(1))
}
if (selected) {
return renderWithSelection(display, selected.start, selected.end)
}
return renderWithCursor(display, cur)
}, [cur, display, focus, placeholder, selected])
return nativeCursor ? display || ' ' : renderWithCursor(display, cur)
}, [cur, display, focus, nativeCursor, placeholder, selected])
useEffect(() => {
if (self.current) {
@ -398,6 +363,7 @@ export function TextInput({
curRef.current = value.length
selRef.current = null
vRef.current = value
lineWidthRef.current = stringWidth(value.includes('\n') ? value.slice(value.lastIndexOf('\n') + 1) : value)
undo.current = []
redo.current = []
}
@ -428,11 +394,92 @@ export function TextInput({
if (pasteTimer.current) {
clearTimeout(pasteTimer.current)
}
if (parentChangeTimer.current) {
clearTimeout(parentChangeTimer.current)
}
if (localRenderTimer.current) {
clearTimeout(localRenderTimer.current)
}
},
[]
)
const commit = (next: string, nextCur: number, track = true) => {
const flushParentChange = () => {
if (parentChangeTimer.current) {
clearTimeout(parentChangeTimer.current)
parentChangeTimer.current = null
}
const next = pendingParentValue.current
pendingParentValue.current = null
if (next !== null) {
self.current = true
cbChange.current(next)
}
}
const scheduleParentChange = (next: string) => {
pendingParentValue.current = next
if (parentChangeTimer.current) {
return
}
parentChangeTimer.current = setTimeout(flushParentChange, 16)
}
const cancelLocalRender = () => {
if (localRenderTimer.current) {
clearTimeout(localRenderTimer.current)
localRenderTimer.current = null
}
}
const scheduleLocalRender = () => {
if (localRenderTimer.current) {
return
}
localRenderTimer.current = setTimeout(() => {
localRenderTimer.current = null
setCur(curRef.current)
}, 16)
}
const canFastEchoBase = () => focus && termFocus && !selected && !mask && !!stdout?.isTTY
const canFastAppend = (current: string, cursor: number, text: string) => {
const sw = stringWidth(text)
return (
canFastEchoBase() &&
cursor === current.length &&
current.length > 0 &&
!current.includes('\n') &&
sw === text.length &&
lineWidthRef.current + sw < Math.max(1, columns)
)
}
const canFastBackspace = (current: string, cursor: number) => {
if (!canFastEchoBase() || cursor !== current.length || cursor <= 0 || current.includes('\n')) {
return false
}
return stringWidth(current.slice(prevPos(current, cursor), cursor)) === 1
}
const commit = (
next: string,
nextCur: number,
track = true,
syncParent = true,
syncLocal = true,
nextLineWidth?: number
) => {
const prev = vRef.current
const c = snapPos(next, nextCur)
editVersionRef.current += 1
@ -452,13 +499,27 @@ export function TextInput({
redo.current = []
}
setCur(c)
if (syncLocal) {
cancelLocalRender()
setCur(c)
} else {
scheduleLocalRender()
}
curRef.current = c
vRef.current = next
lineWidthRef.current =
nextLineWidth ?? stringWidth(next.includes('\n') ? next.slice(next.lastIndexOf('\n') + 1) : next)
if (next !== prev) {
self.current = true
cbChange.current(next)
if (syncParent) {
flushParentChange()
self.current = true
cbChange.current(next)
} else {
self.current = true
scheduleParentChange(next)
}
}
}
@ -640,9 +701,13 @@ export function TextInput({
}
if (k.return) {
k.shift || (isMac ? isActionMod(k) : k.meta)
? commit(ins(vRef.current, curRef.current, '\n'), curRef.current + 1)
: cbSubmit.current?.(vRef.current)
if (k.shift || k.ctrl || (isMac ? isActionMod(k) : k.meta)) {
flushParentChange()
commit(ins(vRef.current, curRef.current, '\n'), curRef.current + 1)
} else {
flushParentChange()
cbSubmit.current?.(vRef.current)
}
return
}
@ -707,6 +772,14 @@ export function TextInput({
const t = wordLeft(v, c)
v = v.slice(0, t) + v.slice(c)
c = t
} else if (canFastBackspace(v, c)) {
const t = prevPos(v, c)
v = v.slice(0, t) + v.slice(c)
c = t
stdout!.write('\b \b')
commit(v, c, true, false, false, Math.max(0, lineWidthRef.current - 1))
return
} else {
const t = prevPos(v, c)
v = v.slice(0, t) + v.slice(c)
@ -784,8 +857,17 @@ export function TextInput({
v = v.slice(0, range.start) + text + v.slice(range.end)
c = range.start + text.length
} else {
const simpleAppend = canFastAppend(v, c, text)
v = v.slice(0, c) + text + v.slice(c)
c += text.length
if (simpleAppend) {
stdout!.write(text)
commit(v, c, true, false, false, lineWidthRef.current + stringWidth(text))
return
}
}
} else {
return

View File

@ -16,12 +16,14 @@ import {
widthByDepth
} from '../lib/subagentTree.js'
import {
boundedLiveRenderText,
compactPreview,
estimateTokensRough,
fmtK,
formatToolCall,
parseToolTrailResultLine,
pick,
splitToolDuration,
thinkingPreview,
toolTrailLabel
} from '../lib/text.js'
@ -392,10 +394,6 @@ function SubagentAccordion({
const hasTools = item.tools.length > 0
const noteRows = [...(summary ? [summary] : []), ...item.notes]
const hasNotes = noteRows.length > 0
// `showChildren` only seeds the recursive `expanded` prop for nested
// subagents — it MUST NOT be OR-ed into the local section toggles, or
// expand-all permanently locks the inner chevrons open.
const showChildren = expanded || deep
const noteColor = statusTone === 'error' ? t.color.error : statusTone === 'warn' ? t.color.warn : t.color.dim
const sections: {
@ -633,7 +631,12 @@ export const Thinking = memo(function Thinking({
streaming?: boolean
t: Theme
}) {
const preview = useMemo(() => thinkingPreview(reasoning, mode, THINKING_COT_MAX), [mode, reasoning])
const preview = useMemo(() => {
const raw = thinkingPreview(reasoning, mode, THINKING_COT_MAX)
return mode === 'full' ? boundedLiveRenderText(raw) : raw
}, [mode, reasoning])
const lines = useMemo(() => preview.split('\n').map(line => line.replace(/\t/g, ' ')), [preview])
if (!preview && !active) {
@ -646,22 +649,22 @@ export const Thinking = memo(function Thinking({
{preview ? (
mode === 'full' ? (
lines.map((line, index) => (
<Text color={t.color.dim} dim key={index} wrap="wrap-trim">
<Text color={t.color.dim} key={index} wrap="wrap-trim">
{line || ' '}
{index === lines.length - 1 ? (
<StreamCursor color={t.color.dim} dimColor streaming={streaming} visible={active} />
<StreamCursor color={t.color.dim} streaming={streaming} visible={active} />
) : null}
</Text>
))
) : (
<Text color={t.color.dim} dim wrap="truncate-end">
<Text color={t.color.dim} wrap="truncate-end">
{preview}
<StreamCursor color={t.color.dim} dimColor streaming={streaming} visible={active} />
<StreamCursor color={t.color.dim} streaming={streaming} visible={active} />
</Text>
)
) : (
<Text color={t.color.dim} dim>
<StreamCursor color={t.color.dim} dimColor streaming={streaming} visible={active} />
<Text color={t.color.dim}>
<StreamCursor color={t.color.dim} streaming={streaming} visible={active} />
</Text>
)}
</Box>
@ -681,6 +684,7 @@ interface Group {
export const ToolTrail = memo(function ToolTrail({
busy = false,
commandOverride = false,
detailsMode = 'collapsed',
outcome = '',
reasoningActive = false,
@ -696,6 +700,7 @@ export const ToolTrail = memo(function ToolTrail({
activity = []
}: {
busy?: boolean
commandOverride?: boolean
detailsMode?: DetailsMode
outcome?: string
reasoningActive?: boolean
@ -712,12 +717,12 @@ export const ToolTrail = memo(function ToolTrail({
}) {
const visible = useMemo(
() => ({
thinking: sectionMode('thinking', detailsMode, sections),
tools: sectionMode('tools', detailsMode, sections),
subagents: sectionMode('subagents', detailsMode, sections),
activity: sectionMode('activity', detailsMode, sections)
thinking: sectionMode('thinking', detailsMode, sections, commandOverride),
tools: sectionMode('tools', detailsMode, sections, commandOverride),
subagents: sectionMode('subagents', detailsMode, sections, commandOverride),
activity: sectionMode('activity', detailsMode, sections, commandOverride)
}),
[detailsMode, sections]
[commandOverride, detailsMode, sections]
)
const [now, setNow] = useState(() => Date.now())
@ -788,7 +793,7 @@ export const ToolTrail = memo(function ToolTrail({
if (parsed) {
groups.push({
color: parsed.mark === '✗' ? t.color.error : t.color.cornsilk,
content: parsed.detail ? parsed.call : `${parsed.call} ${parsed.mark}`,
content: parsed.call,
details: [],
key: `tr-${i}`,
label: parsed.call
@ -884,6 +889,21 @@ export const ToolTrail = memo(function ToolTrail({
const delegateGroups = groups.filter(g => g.label.startsWith('Delegate Task'))
const inlineDelegateKey = hasSubagents && delegateGroups.length === 1 ? delegateGroups[0]!.key : null
const toolLabel = (group: Group) => {
const { duration, label } = splitToolDuration(String(group.content))
return duration ? (
<>
{label}
<Text color={t.color.statusFg} dim>
{duration}
</Text>
</>
) : (
group.content
)
}
// ── Backstop: floating alerts when every panel is hidden ─────────
//
// Per-section overrides win over the global details_mode (they're computed
@ -1049,7 +1069,7 @@ export const ToolTrail = memo(function ToolTrail({
content={
<>
<Text color={t.color.amber}> </Text>
{group.content}
{toolLabel(group)}
</>
}
rails={rails}

View File

@ -0,0 +1,93 @@
import { Box, Text } from '@hermes/ink'
import { memo, useState } from 'react'
import { countPendingTodos } from '../lib/liveProgress.js'
import { todoGlyph, todoTone } from '../lib/todo.js'
import type { Theme } from '../theme.js'
import type { TodoItem } from '../types.js'
const rowColor = (t: Theme, status: TodoItem['status']) => {
const tone = todoTone(status)
return tone === 'active' ? t.color.cornsilk : tone === 'body' ? t.color.statusFg : t.color.dim
}
export const TodoPanel = memo(function TodoPanel({
collapsed,
defaultCollapsed = false,
incomplete = false,
onToggle,
t,
todos
}: {
collapsed?: boolean
defaultCollapsed?: boolean
incomplete?: boolean
onToggle?: () => void
t: Theme
todos: TodoItem[]
}) {
// Fallback local state for archived todos in transcript where there's no
// external controller. Live TodoPanel passes collapsed+onToggle from the
// turn store so clicks still work there.
const [localCollapsed, setLocalCollapsed] = useState(defaultCollapsed)
const isControlled = typeof collapsed === 'boolean'
const effectiveCollapsed = isControlled ? collapsed : localCollapsed
const handleToggle = () => {
if (onToggle) {
onToggle()
return
}
if (!isControlled) {
setLocalCollapsed(v => !v)
}
}
if (!todos.length) {
return null
}
const done = todos.filter(todo => todo.status === 'completed').length
const pending = countPendingTodos(todos)
return (
<Box flexDirection="column" marginBottom={1}>
<Box onClick={handleToggle}>
<Text color={t.color.dim}>
<Text color={t.color.amber}>{effectiveCollapsed ? '▸ ' : '▾ '}</Text>
<Text bold color={t.color.cornsilk}>
Todo
</Text>{' '}
<Text color={t.color.statusFg} dim>
({done}/{todos.length})
</Text>
{incomplete && pending > 0 && (
<Text color={t.color.dim} dim>
{' '}
· incomplete · {pending} still {pending === 1 ? 'pending' : 'pending/in_progress'}
</Text>
)}
</Text>
</Box>
{!effectiveCollapsed && (
<Box flexDirection="column" marginLeft={2}>
{todos.map(todo => {
const tone = todoTone(todo.status)
const color = rowColor(t, todo.status)
return (
<Text color={color} dim={tone === 'dim'} key={todo.id}>
<Text color={color}>{todoGlyph(todo.status)} </Text>
{todo.content}
</Text>
)
})}
</Box>
)}
</Box>
)
})

View File

@ -1,3 +1,15 @@
const truthy = (v?: string) => /^(?:1|true|yes|on)$/i.test((v ?? '').trim())
export const STARTUP_RESUME_ID = (process.env.HERMES_TUI_RESUME ?? '').trim()
export const MOUSE_TRACKING = !/^(?:1|true|yes|on)$/i.test((process.env.HERMES_TUI_DISABLE_MOUSE ?? '').trim())
export const NO_CONFIRM_DESTRUCTIVE = /^(?:1|true|yes|on)$/i.test((process.env.HERMES_TUI_NO_CONFIRM ?? '').trim())
export const MOUSE_TRACKING = !truthy(process.env.HERMES_TUI_DISABLE_MOUSE)
export const NO_CONFIRM_DESTRUCTIVE = truthy(process.env.HERMES_TUI_NO_CONFIRM)
// Skip AlternateScreen — TUI renders into the primary buffer so the host
// terminal's native scrollback captures whatever scrolls off the top.
// Experiment gate: lets us measure native scroll vs our virtualization on
// the same pipeline.
export const INLINE_MODE = truthy(process.env.HERMES_TUI_INLINE)
// Live FPS counter overlay, fed by ink's onFrame (real render rate, not a
// synthetic timer).
export const SHOW_FPS = truthy(process.env.HERMES_TUI_FPS)

View File

@ -1,5 +1,22 @@
export const LARGE_PASTE = { chars: 8000, lines: 80 }
export const LIVE_RENDER_MAX_CHARS = 16_000
export const LIVE_RENDER_MAX_LINES = 240
// History-render bounds for messages outside FULL_RENDER_TAIL. Each rendered
// line ≈ 1 Yoga/Text node + inline spans, so this is the dominant lever on
// cold-mount cost during PageUp catch-up. 16 lines × 25 mounted ≈ 400 nodes
// — comfortably inside the 16ms per-frame budget. User pages back to
// recognize, not to read; full re-render once it falls inside the tail.
export const HISTORY_RENDER_MAX_CHARS = 800
export const HISTORY_RENDER_MAX_LINES = 16
export const FULL_RENDER_TAIL_ITEMS = 8
export const LONG_MSG = 300
export const MAX_HISTORY = 800
export const THINKING_COT_MAX = 160
export const WHEEL_SCROLL_STEP = 3
// Rows per wheel event (pre-accel). 1 keeps Ink's DECSTBM fast path live
// (each scroll < viewport-1) and produces smooth motion. wheelAccel.ts
// ramps this on sustained scrolls.
export const WHEEL_SCROLL_STEP = 1

View File

@ -1,2 +1,6 @@
export const STREAM_BATCH_MS = 16
export const STREAM_IDLE_BATCH_MS = 16
export const STREAM_SCROLL_BATCH_MS = 96
export const STREAM_TYPING_BATCH_MS = 80
export const TYPING_IDLE_MS = 250
export const REASONING_PULSE_MS = 700

View File

@ -57,9 +57,20 @@ export const resolveSections = (raw: unknown): SectionVisibility =>
) as SectionVisibility)
: {}
// Effective mode for one section: explicit override → SECTION_DEFAULTS → global.
// Single source of truth for "is this section open by default / rendered at all".
export const sectionMode = (name: SectionName, global: DetailsMode, sections?: SectionVisibility): DetailsMode =>
sections?.[name] ?? SECTION_DEFAULTS[name] ?? global
// Effective mode for one section: explicit override → global command mode →
// built-in live-stream defaults → global config mode.
//
// The `commandOverride` flag is set for in-session `/details <mode>` changes.
// That command should immediately apply to every section, including sections
// with built-in defaults like thinking/tools=expanded and activity=hidden. On
// startup/config sync we keep those defaults layered above the persisted global
// config so the TUI still opens live reasoning/tools by default unless the user
// pins explicit per-section overrides.
export const sectionMode = (
name: SectionName,
global: DetailsMode,
sections?: SectionVisibility,
commandOverride = false
): DetailsMode => sections?.[name] ?? (commandOverride ? global : (SECTION_DEFAULTS[name] ?? global))
export const nextDetailsMode = (m: DetailsMode): DetailsMode => MODES[(MODES.indexOf(m) + 1) % MODES.length]!

View File

@ -1,4 +1,6 @@
#!/usr/bin/env -S node --max-old-space-size=8192 --expose-gc
import type { FrameEvent } from '@hermes/ink'
import { GatewayClient } from './gatewayClient.js'
import { setupGracefulExit } from './lib/gracefulExit.js'
import { formatBytes, type HeapDumpResult, performHeapDump } from './lib/memory.js'
@ -41,6 +43,21 @@ if (process.env.HERMES_HEAPDUMP_ON_START === '1') {
process.on('beforeExit', () => stopMemoryMonitor())
const [{ render }, { App }] = await Promise.all([import('@hermes/ink'), import('./app.js')])
const [ink, { App }, { logFrameEvent }, { trackFrame }] = await Promise.all([
import('@hermes/ink'),
import('./app.js'),
import('./lib/perfPane.js'),
import('./lib/fpsStore.js')
])
render(<App gw={gw} />, { exitOnCtrlC: false })
// Both consumers are undefined when their env flags are off; only attach
// onFrame when at least one is on so ink skips timing in the default case.
const onFrame =
logFrameEvent || trackFrame
? (event: FrameEvent) => {
logFrameEvent?.(event)
trackFrame?.(event.durationMs)
}
: undefined
ink.render(<App gw={gw} />, { exitOnCtrlC: false, onFrame })

View File

@ -364,11 +364,6 @@ export interface SpawnTreeLoadResponse {
subagents?: unknown[]
}
export interface SpawnTreeSaveResponse {
path?: string
session_id?: string
}
export type GatewayEvent =
| { payload?: { skin?: GatewaySkin }; session_id?: string; type: 'gateway.ready' }
| { payload?: GatewaySkin; session_id?: string; type: 'skin.changed' }
@ -384,9 +379,21 @@ export type GatewayEvent =
| { payload?: { text?: string }; session_id?: string; type: 'reasoning.delta' | 'reasoning.available' }
| { payload: { name?: string; preview?: string }; session_id?: string; type: 'tool.progress' }
| { payload: { name?: string }; session_id?: string; type: 'tool.generating' }
| { payload: { context?: string; name?: string; tool_id: string }; session_id?: string; type: 'tool.start' }
| {
payload: { error?: string; inline_diff?: string; name?: string; summary?: string; tool_id: string }
payload: { context?: string; name?: string; tool_id: string; todos?: unknown[] }
session_id?: string
type: 'tool.start'
}
| {
payload: {
duration_s?: number
error?: string
inline_diff?: string
name?: string
summary?: string
tool_id: string
todos?: unknown[]
}
session_id?: string
type: 'tool.complete'
}

View File

@ -2,22 +2,56 @@ import type { ScrollBoxHandle } from '@hermes/ink'
import {
type RefObject,
useCallback,
useDeferredValue,
useEffect,
useLayoutEffect,
useMemo,
useRef,
useState,
useSyncExternalStore
} from 'react'
const ESTIMATE = 4
const OVERSCAN = 40
const MAX_MOUNTED = 260
const COLD_START = 40
// Overscan was 40 (= viewport) which is way more than needed when heights
// are well-estimated. Cutting in half saves ~20 mounted items per scroll
// edge → smaller fiber tree → less buffer-compose work per frame. HN/CC
// dev (https://news.ycombinator.com/item?id=46699072) confirmed GC pressure
// from large JSX trees was their main perf issue post-rewrite.
const OVERSCAN = 20
// Hard cap on mounted items. Was 260; profiling showed ~23k live Yoga
// nodes during sustained PageUp catch-up (renderer p99=106ms). The
// viewport+2*overscan = 80 rows of needed coverage = ~25 items at avg 3
// rows/item, so 120 leaves >4× headroom and never blanks the viewport
// even when items are tiny.
const MAX_MOUNTED = 120
const COLD_START = 30
// Floor on unmeasured row height used when computing coverage — guarantees
// the mounted span physically reaches the viewport bottom regardless of how
// small items actually are (at the cost of over-mounting when items are
// larger; overscan absorbs that).
const PESSIMISTIC = 1
// Tightest safe scrollTop bin for the useSyncExternalStore snapshot. Small
// wheel ticks that don't cross a bin short-circuit React's commit entirely;
// Ink keeps painting via ScrollBox.forceRender + direct scrollTop reads.
// Half of OVERSCAN keeps ≥20 rows of cushion before the mounted range
// would actually need to shift.
const QUANTUM = OVERSCAN >> 1
// Renders to keep the mount range frozen after width change (heights scaled
// but not yet re-measured). Render #1 skips measurement so pre-resize Yoga
// doesn't poison the scaled cache; render #2's useLayoutEffect captures
// post-resize heights; render #3 recomputes range with accurate data.
const FREEZE_RENDERS = 2
// Cap on NEW items mounted per commit when scrolling fast. Without this,
// a single PageUp into unmeasured territory mounts ~190 rows with
// PESSIMISTIC=1 coverage — each row running marked lexer + syntax
// highlighting for ~3ms = ~600ms sync block. Sliding toward the target
// over several commits keeps per-commit mount cost bounded. Tightened
// from 25 → 12: each new item adds ~100 fibers / Yoga nodes, and a
// 25-item commit was the dominant contributor to the 100ms+ p99 frames.
const SLIDE_STEP = 12
const upperBound = (arr: number[], target: number) => {
const NOOP = () => {}
const upperBound = (arr: ArrayLike<number>, target: number) => {
let lo = 0
let hi = arr.length
@ -30,29 +64,74 @@ const upperBound = (arr: number[], target: number) => {
return lo
}
export const shouldSetVirtualClamp = ({
itemCount,
liveTailActive = false,
sticky,
viewportHeight
}: {
itemCount: number
liveTailActive?: boolean
sticky: boolean
viewportHeight: number
}) => itemCount > 0 && viewportHeight > 0 && !sticky && !liveTailActive
export function useVirtualHistory(
scrollRef: RefObject<ScrollBoxHandle | null>,
items: readonly { key: string }[],
columns: number,
{ estimate = ESTIMATE, overscan = OVERSCAN, maxMounted = MAX_MOUNTED, coldStartCount = COLD_START } = {}
{
estimate = ESTIMATE,
initialHeights,
liveTailActive = false,
onHeightsChange,
overscan = OVERSCAN,
maxMounted = MAX_MOUNTED,
coldStartCount = COLD_START
}: VirtualHistoryOptions = {}
) {
const nodes = useRef(new Map<string, unknown>())
const heights = useRef(new Map<string, number>())
const heights = useRef(new Map(initialHeights))
const initialHeightsRef = useRef(initialHeights)
const refs = useRef(new Map<string, (el: unknown) => void>())
const [ver, setVer] = useState(0)
const onHeightsChangeRef = useRef(onHeightsChange)
// Bump whenever heightCache mutates so offsets rebuild on next read.
// Ref (not state) — checked during render phase, zero extra commits.
const offsetVersion = useRef(0)
// Cached offsets: reused Float64Array keyed on (itemCount, version) so we
// only rebuild when something actually changed. Previous approach allocated
// a fresh Array(n+1) every render — at n=10k that's ~80KB/render of GC
// pressure during streaming.
const offsetsCache = useRef<{ arr: Float64Array; n: number; version: number }>({
arr: new Float64Array(0),
n: -1,
version: -1
})
const [hasScrollRef, setHasScrollRef] = useState(false)
const metrics = useRef({ sticky: true, top: 0, vp: 0 })
const lastScrollTopRef = useRef(0)
// Width change: scale cached heights (not clear — clearing forces a
// pessimistic back-walk mounting ~190 rows at once, each a fresh
// marked.lexer + syntax highlight ≈ 3ms). Freeze mount range for 2
// renders so warm memos survive; skip one measurement so useLayoutEffect
// doesn't poison the scaled cache with pre-resize Yoga heights.
// Width change: scale cached heights by oldCols/newCols instead of clearing
// (clearing forces a pessimistic back-walk mounting ~190 rows at once, each
// a fresh marked.lexer + syntax highlight ≈ 3ms). Freeze the mount range
// for 2 renders so warm memos survive; skip one measurement pass so
// useLayoutEffect doesn't poison the scaled cache with pre-resize Yoga
// heights.
const prevColumns = useRef(columns)
const skipMeasurement = useRef(false)
const prevRange = useRef<null | readonly [number, number]>(null)
const freezeRenders = useRef(0)
onHeightsChangeRef.current = onHeightsChange
if (initialHeightsRef.current !== initialHeights) {
initialHeightsRef.current = initialHeights
heights.current = new Map(initialHeights)
offsetVersion.current++
}
if (prevColumns.current !== columns && prevColumns.current > 0 && columns > 0) {
const ratio = prevColumns.current / columns
@ -62,6 +141,7 @@ export function useVirtualHistory(
heights.current.set(k, Math.max(1, Math.round(h * ratio)))
}
offsetVersion.current++
skipMeasurement.current = true
freezeRenders.current = FREEZE_RENDERS
}
@ -70,11 +150,19 @@ export function useVirtualHistory(
setHasScrollRef(Boolean(scrollRef.current))
}, [scrollRef])
// Quantized snapshot: same-bin scrolls (most wheel ticks) produce the same
// number → React.Object.is short-circuits the commit entirely. sticky state
// is folded in via the sign bit so sticky→broken transitions also trigger.
// Uses the TARGET (committed + pendingDelta), not committed scrollTop, so
// scrollBy notifications immediately remount for the destination before
// Ink's drain frames need the children.
const subscribe = useCallback(
(cb: () => void) => (hasScrollRef ? scrollRef.current?.subscribe(cb) : null) ?? NOOP,
[hasScrollRef, scrollRef]
)
useSyncExternalStore(
useCallback(
(cb: () => void) => (hasScrollRef ? scrollRef.current?.subscribe(cb) : null) ?? (() => () => {}),
[hasScrollRef, scrollRef]
),
subscribe,
() => {
const s = scrollRef.current
@ -82,9 +170,10 @@ export function useVirtualHistory(
return NaN
}
const b = Math.floor(s.getScrollTop() / QUANTUM)
const target = s.getScrollTop() + s.getPendingDelta()
const bin = Math.floor(target / QUANTUM)
return s.isSticky() ? -b - 1 : b
return s.isSticky() ? ~bin : bin
},
() => NaN
)
@ -103,26 +192,36 @@ export function useVirtualHistory(
}
if (dirty) {
setVer(v => v + 1)
offsetVersion.current++
}
}, [items])
const offsets = useMemo(() => {
void ver
const out = new Array<number>(items.length + 1).fill(0)
// Offsets: Float64Array reused across renders, invalidated by offsetVersion
// bumps from heightCache writers (measureRef, resize-scale, GC). Binary
// search tolerates either monotone source, so no need to rebuild unless
// something changed.
const n = items.length
for (let i = 0; i < items.length; i++) {
out[i + 1] = out[i]! + Math.max(1, Math.floor(heights.current.get(items[i]!.key) ?? estimate))
if (offsetsCache.current.version !== offsetVersion.current || offsetsCache.current.n !== n) {
const arr = offsetsCache.current.arr.length >= n + 1 ? offsetsCache.current.arr : new Float64Array(n + 1)
arr[0] = 0
for (let i = 0; i < n; i++) {
arr[i + 1] = arr[i]! + Math.max(1, Math.floor(heights.current.get(items[i]!.key) ?? estimate))
}
return out
}, [estimate, items, ver])
offsetsCache.current = { arr, n, version: offsetVersion.current }
}
const n = items.length
const offsets = offsetsCache.current.arr
const total = offsets[n] ?? 0
const top = Math.max(0, scrollRef.current?.getScrollTop() ?? 0)
const pendingDelta = scrollRef.current?.getPendingDelta() ?? 0
const target = Math.max(0, top + pendingDelta)
const vp = Math.max(0, scrollRef.current?.getViewportHeight() ?? 0)
const sticky = scrollRef.current?.isSticky() ?? true
const recentManual = Date.now() - (scrollRef.current?.getLastManualScrollAt() ?? 0) < 1200
// During a freeze, drop the frozen range if items shrank past its start
// (/clear, compaction) — clamping would collapse to an empty mount and
@ -139,9 +238,32 @@ export function useVirtualHistory(
} else if (n > 0) {
if (vp <= 0) {
start = Math.max(0, n - coldStartCount)
} else if (sticky && !recentManual) {
const budget = vp + overscan
start = n
while (start > 0 && total - offsets[start - 1]! < budget) {
start--
}
} else {
start = Math.max(0, Math.min(n - 1, upperBound(offsets, Math.max(0, top - overscan)) - 1))
end = Math.max(start + 1, Math.min(n, upperBound(offsets, top + vp + overscan)))
// User scrolled up. Span [committed..target] so every drain frame is
// covered. Claude-code caps the span at 3×viewport so pendingDelta
// growing unbounded (MX Master free-spin) doesn't blow the mount
// budget; the clamp (setClampBounds) shows edge-of-mounted content
// during catch-up.
const MAX_SPAN = vp * 3
const rawLo = Math.min(top, target)
const rawHi = Math.max(top, target)
const span = rawHi - rawLo
const clampedLo = span > MAX_SPAN ? (pendingDelta < 0 ? rawHi - MAX_SPAN : rawLo) : rawLo
const clampedHi = clampedLo + Math.min(span, MAX_SPAN)
const lo = Math.max(0, clampedLo - overscan)
const hi = clampedHi + vp + overscan
// Binary search — offsets is monotone. Linear walk was O(n) at n=10k+,
// ~2ms per render during scroll.
start = Math.max(0, Math.min(n - 1, upperBound(offsets, lo) - 1))
end = Math.max(start + 1, Math.min(n, upperBound(offsets, hi)))
}
}
@ -149,17 +271,140 @@ export function useVirtualHistory(
sticky ? (start = Math.max(0, end - maxMounted)) : (end = Math.min(n, start + maxMounted))
}
// Coverage guarantee: ensure sum(real or pessimistic heights) ≥
// viewportH + 2*overscan so the viewport is physically covered even when
// items are tiny. Pessimistic because uncached items use a floor of 1 —
// over-mounts when items are large, never leaves blank spacer showing.
if (n > 0 && vp > 0 && !frozenRange) {
const needed = vp + 2 * overscan
let coverage = 0
for (let i = start; i < end; i++) {
coverage += heights.current.get(items[i]!.key) ?? PESSIMISTIC
}
if (sticky) {
const minStart = Math.max(0, end - maxMounted)
while (start > minStart && coverage < needed) {
start--
coverage += heights.current.get(items[start]!.key) ?? PESSIMISTIC
}
} else {
const maxEnd = Math.min(n, start + maxMounted)
while (end < maxEnd && coverage < needed) {
coverage += heights.current.get(items[end]!.key) ?? PESSIMISTIC
end++
}
}
}
// Slide cap: limit how many NEW items mount this commit. Gates on scroll
// VELOCITY (|scrollTop delta since last commit| + |pendingDelta| >
// 2×viewport — key-repeat PageUp moves ~viewport/2 per press). Covers
// both scrollBy (pendingDelta) and scrollTo (direct write). Normal single
// PageUp skips this; the clamp holds the viewport at the mounted edge
// during catch-up so there's no blank screen. Only caps range GROWTH;
// shrinking is unbounded.
if (!frozenRange && prevRange.current && vp > 0) {
const velocity = Math.abs(top - lastScrollTopRef.current) + Math.abs(pendingDelta)
if (velocity > vp * 2) {
const [pS, pE] = prevRange.current
start = Math.max(start, pS - SLIDE_STEP)
end = Math.min(end, pE + SLIDE_STEP)
// A large jump past the capped end can invert (start > end); mount
// SLIDE_STEP items from the new start so the viewport isn't blank
// during catch-up.
if (start > end) {
end = Math.min(start + SLIDE_STEP, n)
}
}
}
lastScrollTopRef.current = top
if (freezeRenders.current > 0) {
freezeRenders.current--
} else {
prevRange.current = [start, end]
}
// Time-slice range growth via useDeferredValue. Urgent render keeps Ink
// painting with the OLD range (all memo hits, fast); deferred render
// transitions to the NEW range (fresh mounts: Md, syntax highlight) in a
// non-blocking background commit. The clamp (setClampBounds) pins the
// viewport to the mounted edge so there's no visual artifact from the
// deferred range lagging briefly. Only deferral range GROWTH — shrinking
// is cheap (unmount = remove fiber, no parse).
const dStart = useDeferredValue(start)
const dEnd = useDeferredValue(end)
let effStart = start < dStart ? dStart : start
let effEnd = end > dEnd ? dEnd : end
// Inverted range (large jump with deferred value lagging) or sticky snap
// (scrollToBottom needs the tail mounted NOW so maxScroll lands on content,
// not bottomSpacer) — skip deferral.
if (effStart > effEnd || sticky) {
effStart = start
effEnd = end
}
// Scrolling DOWN — bypass effEnd deferral so the tail mounts immediately.
// Without this, the clamp holds scrollTop short of the real bottom and
// the user feels "stuck before bottom". effStart stays deferred so scroll-
// UP keeps time-slicing (older messages parse on mount).
if (pendingDelta > 0) {
effEnd = end
}
// Final O(viewport) enforcement. Deferred+bypass combinations above can
// leak: during sustained PageUp, concurrent mode interleaves dStart updates
// with effEnd=end bypasses across commits and the effective window drifts
// wider than either bound alone. Trim the far edge by viewport position
// (not pendingDelta direction — that flips mid-settle under concurrent
// scheduling and yanks scrollTop).
if (effEnd - effStart > maxMounted && vp > 0) {
const mid = (offsets[effStart]! + offsets[effEnd]!) / 2
if (top < mid) {
effEnd = effStart + maxMounted
} else {
effStart = effEnd - maxMounted
}
}
const measureRef = useCallback((key: string) => {
let fn = refs.current.get(key)
if (!fn) {
fn = (el: unknown) => (el ? nodes.current.set(key, el) : nodes.current.delete(key))
fn = (el: unknown) => {
if (el) {
nodes.current.set(key, el)
return
}
// Measure-at-unmount: the yogaNode is still valid here (reconciler
// calls ref(null) before removeChild → freeRecursive), so we grab
// the final height before WASM release. Without this, items
// scrolled out during fast pan keep a stale estimate in heightCache
// and offset math drifts until the next mount/remount cycle.
const existing = nodes.current.get(key) as MeasuredNode | undefined
const h = Math.ceil(existing?.yogaNode?.getComputedHeight?.() ?? 0)
if (h > 0 && heights.current.get(key) !== h) {
heights.current.set(key, h)
offsetVersion.current++
onHeightsChangeRef.current?.(heights.current)
}
nodes.current.delete(key)
}
refs.current.set(key, fn)
}
@ -167,12 +412,37 @@ export function useVirtualHistory(
}, [])
useLayoutEffect(() => {
const s = scrollRef.current
let dirty = false
// Give the renderer the mounted-row coverage for passive scroll clamping.
// Clamp MUST use the EFFECTIVE (deferred) range, not the immediate one.
// During fast scroll, immediate [start,end] may already cover the new
// scrollTop position, but children still render at the deferred range.
// If clamp used immediate bounds, render-node-to-output's drain-gate
// would drain past the deferred children's span → viewport lands in
// spacer → white flash.
if (s && shouldSetVirtualClamp({ itemCount: n, liveTailActive, sticky, viewportHeight: vp })) {
const effTopSpacer = offsets[effStart] ?? 0
const effBottom = offsets[effEnd] ?? total
// At effEnd=n there's no bottomSpacer — use Infinity so render-node-
// to-output's own Math.min(cur, maxScroll) governs. Using offsets[n]
// here would bake in heightCache (one render behind Yoga), and during
// streaming the tail item's cached height lags its real height —
// sticky-break would then clamp below the real max and push
// streaming text off-viewport.
const clampMin = effStart === 0 ? 0 : effTopSpacer
const clampMax = effEnd === n ? Infinity : Math.max(effTopSpacer, effBottom - vp)
s.setClampBounds(clampMin, clampMax)
} else {
s?.setClampBounds(undefined, undefined)
}
if (skipMeasurement.current) {
skipMeasurement.current = false
} else {
for (let i = start; i < end; i++) {
for (let i = effStart; i < effEnd; i++) {
const k = items[i]?.key
if (!k) {
@ -188,8 +458,6 @@ export function useVirtualHistory(
}
}
const s = scrollRef.current
if (s) {
const next = {
sticky: s.isSticky(),
@ -208,20 +476,31 @@ export function useVirtualHistory(
}
if (dirty) {
setVer(v => v + 1)
offsetVersion.current++
onHeightsChangeRef.current?.(heights.current)
}
}, [end, hasScrollRef, items, scrollRef, start])
})
return {
bottomSpacer: Math.max(0, total - (offsets[end] ?? total)),
end,
bottomSpacer: Math.max(0, total - (offsets[effEnd] ?? total)),
end: effEnd,
measureRef,
offsets,
start,
topSpacer: offsets[start] ?? 0
start: effStart,
topSpacer: offsets[effStart] ?? 0
}
}
interface MeasuredNode {
yogaNode?: { getComputedHeight?: () => number } | null
}
interface VirtualHistoryOptions {
coldStartCount?: number
estimate?: number
initialHeights?: ReadonlyMap<string, number>
liveTailActive?: boolean
maxMounted?: number
onHeightsChange?: (heights: ReadonlyMap<string, number>) => void
overscan?: number
}

View File

@ -0,0 +1,51 @@
// Tiny FPS tracker fed by ink's onFrame callback. Each entry is an Ink
// frame (React commit + drain-only frames) — the right notion for
// user-perceived motion.
//
// Zero-cost when HERMES_TUI_FPS is unset: trackFrame is undefined so the
// onFrame callback short-circuits at the optional chain.
import { atom } from 'nanostores'
import { SHOW_FPS } from '../config/env.js'
const WINDOW_SIZE = 30
export type FpsState = {
fps: number
/** Wraps at JS-safe int — diff pairs in a debug overlay safely. */
totalFrames: number
/** Ink render-phase total for the last frame. */
lastDurationMs: number
}
export const $fpsState = atom<FpsState>({ fps: 0, lastDurationMs: 0, totalFrames: 0 })
const timestamps: number[] = []
let totalFrames = 0
export const trackFrame = SHOW_FPS
? (durationMs: number) => {
timestamps.push(performance.now())
if (timestamps.length > WINDOW_SIZE) {
timestamps.shift()
}
totalFrames++
if (timestamps.length < 2) {
return
}
const elapsed = (timestamps[timestamps.length - 1]! - timestamps[0]!) / 1000
if (elapsed > 0) {
$fpsState.set({
fps: Math.round(((timestamps.length - 1) / elapsed) * 10) / 10,
lastDurationMs: Math.round(durationMs * 100) / 100,
totalFrames
})
}
}
: undefined

View File

@ -0,0 +1,62 @@
import { stringWidth } from '@hermes/ink'
let _seg: Intl.Segmenter | null = null
const seg = () => (_seg ??= new Intl.Segmenter(undefined, { granularity: 'grapheme' }))
/**
* Mirrors the char-wrap behavior used by the composer TextInput.
* Returns the zero-based visual line and column of the cursor cell.
*/
export function cursorLayout(value: string, cursor: number, cols: number) {
const pos = Math.max(0, Math.min(cursor, value.length))
const w = Math.max(1, cols)
let col = 0,
line = 0
for (const { segment, index } of seg().segment(value)) {
if (index >= pos) {
break
}
if (segment === '\n') {
line++
col = 0
continue
}
const sw = stringWidth(segment)
if (!sw) {
continue
}
if (col + sw > w) {
line++
col = 0
}
col += sw
}
// trailing cursor-cell overflows to the next row at the wrap column
if (col >= w) {
line++
col = 0
}
return { column: col, line }
}
export function inputVisualHeight(value: string, columns: number) {
return cursorLayout(value, value.length, columns).line + 1
}
export function stableComposerColumns(totalCols: number, promptWidth: number) {
// Physical render/wrap width. Always reserve outer composer padding and
// prompt prefix. Only reserve the transcript scrollbar gutter when the
// terminal is wide enough; on narrow panes, preserving input columns beats
// keeping gutters visually aligned.
return Math.max(1, totalCols - promptWidth - 2 - (totalCols - promptWidth >= 24 ? 2 : 0))
}

View File

@ -0,0 +1,116 @@
import { describe, expect, it } from 'vitest'
import type { Msg } from '../types.js'
import { appendToolShelfMessage, canHoldToolShelf, isTodoDone, mergeToolShelfInto } from './liveProgress.js'
describe('isTodoDone', () => {
it('only treats non-empty all-completed/cancelled lists as done', () => {
expect(isTodoDone([])).toBe(false)
expect(isTodoDone([{ content: 'x', id: 'x', status: 'completed' }])).toBe(true)
expect(isTodoDone([{ content: 'x', id: 'x', status: 'in_progress' }])).toBe(false)
expect(
isTodoDone([
{ content: 'x', id: 'x', status: 'completed' },
{ content: 'y', id: 'y', status: 'cancelled' }
])
).toBe(true)
})
})
describe('tool shelf helpers', () => {
it('recognizes contextual thinking shelves as holders', () => {
expect(canHoldToolShelf({ kind: 'trail', role: 'system', text: '', thinking: 'plan' })).toBe(true)
expect(canHoldToolShelf({ kind: 'trail', role: 'system', text: '', tools: ['one ✓'] })).toBe(true)
expect(canHoldToolShelf({ role: 'assistant', text: 'done' })).toBe(false)
})
it('merges source rows into an existing shelf', () => {
expect(
mergeToolShelfInto(
{ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['one ✓'] },
{ kind: 'trail', role: 'system', text: '', tools: ['two ✓'] }
)
).toEqual({ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['one ✓', 'two ✓'] })
})
})
describe('appendToolShelfMessage', () => {
it('merges adjacent tool shelves into one contextual shelf', () => {
const merged = appendToolShelfMessage([{ kind: 'trail', role: 'system', text: '', tools: ['one ✓'] }], {
kind: 'trail',
role: 'system',
text: '',
tools: ['two ✓']
})
expect(merged).toEqual([{ kind: 'trail', role: 'system', text: '', tools: ['one ✓', 'two ✓'] }])
})
it('adds tools to the nearest contextual thinking shelf', () => {
const merged = appendToolShelfMessage(
[{ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['one ✓'] }],
{ kind: 'trail', role: 'system', text: '', tools: ['two ✓'] }
)
expect(merged).toEqual([{ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['one ✓', 'two ✓'] }])
})
it('merges through intervening thinking-only rows back into the nearest holder', () => {
const prev: Msg[] = [
{ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['one ✓'] },
{ kind: 'trail', role: 'system', text: '', thinking: 'more plan' }
]
const merged = appendToolShelfMessage(prev, {
kind: 'trail',
role: 'system',
text: '',
tools: ['two ✓']
})
expect(merged).toHaveLength(2)
expect(merged[0]).toEqual({
kind: 'trail',
role: 'system',
text: '',
thinking: 'plan',
tools: ['one ✓', 'two ✓']
})
expect(merged[1]).toEqual({ kind: 'trail', role: 'system', text: '', thinking: 'more plan' })
})
it('collapses a chronological thinking/tool/thinking/tool stream into one shelf', () => {
const events: Msg[] = [
{ kind: 'trail', role: 'system', text: '', thinking: 'plan' },
{ kind: 'trail', role: 'system', text: '', tools: ['one ✓'] },
{ kind: 'trail', role: 'system', text: '', thinking: 'more plan' },
{ kind: 'trail', role: 'system', text: '', tools: ['two ✓'] },
{ kind: 'trail', role: 'system', text: '', tools: ['three ✓'] }
]
const reduced = events.reduce<Msg[]>((acc, msg) => appendToolShelfMessage(acc, msg), [])
expect(reduced).toHaveLength(2)
expect(reduced[0]).toEqual({
kind: 'trail',
role: 'system',
text: '',
thinking: 'plan',
tools: ['one ✓', 'two ✓', 'three ✓']
})
expect(reduced[1]).toEqual({ kind: 'trail', role: 'system', text: '', thinking: 'more plan' })
})
it('starts a new shelf across assistant text boundaries', () => {
const merged = appendToolShelfMessage(
[
{ kind: 'trail', role: 'system', text: '', tools: ['one ✓'] },
{ role: 'assistant', text: 'done' }
],
{ kind: 'trail', role: 'system', text: '', tools: ['two ✓'] }
)
expect(merged).toHaveLength(3)
})
})

View File

@ -0,0 +1,79 @@
import type { Msg, TodoItem } from '../types.js'
export const countPendingTodos = (todos: readonly TodoItem[]) =>
todos.filter(todo => todo.status === 'in_progress' || todo.status === 'pending').length
export const isTodoDone = (todos: readonly TodoItem[]) =>
todos.length > 0 && todos.every(todo => todo.status === 'completed' || todo.status === 'cancelled')
export const isToolShelfMessage = (msg: Msg | undefined) =>
Boolean(msg?.kind === 'trail' && !msg.text && !msg.thinking?.trim() && msg.tools?.length)
export const canHoldToolShelf = (msg: Msg | undefined) =>
Boolean(msg?.kind === 'trail' && !msg.text && (msg.thinking?.trim() || msg.tools?.length))
export const mergeToolShelfInto = (target: Msg, source: Msg): Msg => ({
...target,
tools: [...(target.tools ?? []), ...(source.tools ?? [])]
})
const isBarrierMessage = (msg: Msg | undefined) => {
if (!msg) {
return true
}
// Assistant text, user input, intro/panel rows all terminate the shelf.
if (msg.kind === 'intro' || msg.kind === 'panel' || msg.kind === 'diff') {
return true
}
if (msg.role && msg.role !== 'system') {
return true
}
if (msg.text) {
return true
}
return false
}
const isToolCarryingTrail = (msg: Msg | undefined) => Boolean(msg?.kind === 'trail' && !msg.text && msg.tools?.length)
export const appendToolShelfMessage = (prev: readonly Msg[], msg: Msg): Msg[] => {
if (!isToolShelfMessage(msg)) {
return [...prev, msg]
}
let fallbackHolder: number | null = null
for (let index = prev.length - 1; index >= 0; index--) {
const candidate = prev[index]
if (isToolCarryingTrail(candidate)) {
const next = [...prev]
next[index] = mergeToolShelfInto(candidate!, msg)
return next
}
if (fallbackHolder === null && canHoldToolShelf(candidate)) {
fallbackHolder = index
}
if (isBarrierMessage(candidate)) {
break
}
}
if (fallbackHolder !== null) {
const next = [...prev]
next[fallbackHolder] = mergeToolShelfInto(prev[fallbackHolder]!, msg)
return next
}
return [...prev, msg]
}

View File

@ -1,3 +1,5 @@
import { evictInkCaches } from '@hermes/ink'
import { type HeapDumpResult, performHeapDump } from './memory.js'
export type MemoryLevel = 'critical' | 'high' | 'normal'
@ -39,6 +41,10 @@ export function startMemoryMonitor({
return
}
// Prune Ink content caches before dump/exit — half on 'high' (recoverable),
// full on 'critical' (post-dump RSS reduction, keeps user running).
evictInkCaches(level === 'critical' ? 'all' : 'half')
dumped.add(level)
const dump = await performHeapDump(level === 'critical' ? 'auto-critical' : 'auto-high').catch(() => null)

View File

@ -0,0 +1,29 @@
import { describe, expect, it } from 'vitest'
import { appendTranscriptMessage } from './messages.js'
describe('appendTranscriptMessage', () => {
it('merges adjacent tool-only shelves into one transcript row', () => {
const out = appendTranscriptMessage([{ kind: 'trail', role: 'system', text: '', tools: ['Terminal("one") ✓'] }], {
kind: 'trail',
role: 'system',
text: '',
tools: ['Terminal("two") ✓']
})
expect(out).toEqual([
{ kind: 'trail', role: 'system', text: '', tools: ['Terminal("one") ✓', 'Terminal("two") ✓'] }
])
})
it('merges tool shelves into the nearest thinking shelf', () => {
const out = appendTranscriptMessage(
[{ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['Terminal("one") ✓'] }],
{ kind: 'trail', role: 'system', text: '', tools: ['Terminal("two") ✓'] }
)
expect(out).toEqual([
{ kind: 'trail', role: 'system', text: '', thinking: 'plan', tools: ['Terminal("one") ✓', 'Terminal("two") ✓'] }
])
})
})

View File

@ -1,4 +1,8 @@
import type { Msg, Role } from '../types.js'
import { appendToolShelfMessage } from './liveProgress.js'
export const appendTranscriptMessage = (prev: Msg[], msg: Msg): Msg[] => appendToolShelfMessage(prev, msg)
export const upsert = (prev: Msg[], role: Role, text: string): Msg[] =>
prev.at(-1)?.role === role ? [...prev.slice(0, -1), { role, text }] : [...prev, { role, text }]

107
ui-tui/src/lib/perfPane.tsx Normal file
View File

@ -0,0 +1,107 @@
// Perf instrumentation for the full render pipeline.
//
// PerfPane (React.Profiler) → per-pane commit times
// logFrameEvent (ink.onFrame) → yoga / renderer / diff / optimize / write
// phases + yoga counters + scroll fast-path
//
// Both gate on HERMES_DEV_PERF=1 and dump JSON-lines (default ~/.hermes/perf.log,
// override HERMES_DEV_PERF_LOG). Tagged { src: 'react' | 'frame' } for jq.
// HERMES_DEV_PERF_MS (default 2) skips sub-ms idle frames; set 0 to capture all.
//
// Zero cost when unset: PerfPane returns children directly, logFrameEvent is
// undefined so ink doesn't pay the timing cost.
import { appendFileSync, mkdirSync } from 'node:fs'
import { homedir } from 'node:os'
import { dirname, join } from 'node:path'
import type { FrameEvent } from '@hermes/ink'
import { scrollFastPathStats } from '@hermes/ink'
import { Profiler, type ProfilerOnRenderCallback, type ReactNode } from 'react'
const ENABLED = /^(?:1|true|yes|on)$/i.test((process.env.HERMES_DEV_PERF ?? '').trim())
const THRESHOLD_MS = Number(process.env.HERMES_DEV_PERF_MS ?? '2') || 0
const LOG_PATH = process.env.HERMES_DEV_PERF_LOG?.trim() || join(homedir(), '.hermes', 'perf.log')
let logReady = false
const writeRow = (row: Record<string, unknown>) => {
if (!logReady) {
logReady = true
try {
mkdirSync(dirname(LOG_PATH), { recursive: true })
} catch {
// Best-effort — never crash the TUI to log a sample.
}
}
try {
appendFileSync(LOG_PATH, `${JSON.stringify(row)}\n`)
} catch {
/* best-effort */
}
}
const round2 = (n: number) => Math.round(n * 100) / 100
const onRender: ProfilerOnRenderCallback = (id, phase, actualMs, baseMs, startTime, commitTime) => {
if (actualMs < THRESHOLD_MS) {
return
}
writeRow({
actualMs: round2(actualMs),
baseMs: round2(baseMs),
commitTimeMs: round2(commitTime),
id,
phase,
src: 'react',
startTimeMs: round2(startTime),
ts: Date.now()
})
}
export function PerfPane({ children, id }: { children: ReactNode; id: string }) {
if (!ENABLED) {
return children
}
return (
<Profiler id={id} onRender={onRender}>
{children}
</Profiler>
)
}
export const logFrameEvent = ENABLED
? (event: FrameEvent) => {
if (event.durationMs < THRESHOLD_MS) {
return
}
writeRow({
durationMs: round2(event.durationMs),
// Cumulative counters — consumers diff pairs to get per-frame deltas.
fastPath: { ...scrollFastPathStats, declined: { ...scrollFastPathStats.declined } },
flickers: event.flickers.length ? event.flickers : undefined,
phases: event.phases
? {
...event.phases,
commit: round2(event.phases.commit),
diff: round2(event.phases.diff),
optimize: round2(event.phases.optimize),
prevFrameDrainMs: round2(event.phases.prevFrameDrainMs),
renderer: round2(event.phases.renderer),
write: round2(event.phases.write),
yoga: round2(event.phases.yoga)
}
: undefined,
src: 'frame',
ts: Date.now()
})
}
: undefined
export const PERF_ENABLED = ENABLED
export const PERF_LOG_PATH = LOG_PATH

View File

@ -1,4 +1,11 @@
import { THINKING_COT_MAX } from '../config/limits.js'
import {
HISTORY_RENDER_MAX_CHARS,
HISTORY_RENDER_MAX_LINES,
LIVE_RENDER_MAX_CHARS,
LIVE_RENDER_MAX_LINES,
THINKING_COT_MAX
} from '../config/limits.js'
import { VERBS } from '../content/verbs.js'
import type { ThinkingMode } from '../types.js'
const ESC = String.fromCharCode(27)
@ -70,12 +77,91 @@ export const pasteTokenLabel = (text: string, lineCount: number) => {
: `[[ ${preview} [${fmtK(lineCount)} lines] ]]`
}
const THINKING_STATUS_RE = new RegExp(`^(?:${VERBS.join('|')})\\.{0,3}$`, 'i')
const THINKING_STATUS_CHUNK_RE = new RegExp(`[^A-Za-z\n]+\\s*(?:${VERBS.join('|')})\\.{0,3}\\s*`, 'giu')
export const cleanThinkingText = (reasoning: string) =>
reasoning
.split('\n')
.map(line => line.replace(THINKING_STATUS_CHUNK_RE, '').trim())
.filter(line => line && !THINKING_STATUS_RE.test(line.replace(/\.\.\.$/, '').trim()))
.join('\n')
.replace(/([^\n])(?=\*\*[^*\n][^\n]*?\*\*)/g, '$1\n\n')
.replace(/\n{3,}/g, '\n\n')
.trim()
export const thinkingPreview = (reasoning: string, mode: ThinkingMode, max: number = THINKING_COT_MAX) => {
const raw = reasoning.trim()
const raw = cleanThinkingText(reasoning)
return !raw || mode === 'collapsed' ? '' : mode === 'full' ? raw : compactPreview(raw.replace(WS_RE, ' '), max)
}
export const boundedLiveRenderText = (
text: string,
{ maxChars = LIVE_RENDER_MAX_CHARS, maxLines = LIVE_RENDER_MAX_LINES } = {}
) => boundedRenderText(text, 'showing live tail', { maxChars, maxLines })
export const boundedHistoryRenderText = (
text: string,
{ maxChars = HISTORY_RENDER_MAX_CHARS, maxLines = HISTORY_RENDER_MAX_LINES } = {}
) => boundedRenderText(text, 'showing tail', { maxChars, maxLines })
const boundedRenderText = (
text: string,
labelPrefix: string,
{ maxChars, maxLines }: { maxChars: number; maxLines: number }
) => {
if (text.length <= maxChars && text.split('\n', maxLines + 1).length <= maxLines) {
return text
}
let start = 0
let idx = text.length
for (let seen = 0; seen < maxLines && idx > 0; seen++) {
idx = text.lastIndexOf('\n', idx - 1)
start = idx < 0 ? 0 : idx + 1
if (idx < 0) {
break
}
}
const lineStart = start
start = Math.max(lineStart, text.length - maxChars)
if (start > lineStart) {
const nextBreak = text.indexOf('\n', start)
if (nextBreak >= 0 && nextBreak < text.length - 1) {
start = nextBreak + 1
}
}
const tail = text.slice(start).trimStart()
const omittedLines = countNewlines(text, start)
const omittedChars = Math.max(0, text.length - tail.length)
const label =
omittedLines > 0
? `[${labelPrefix}; omitted ${fmtK(omittedLines)} lines / ${fmtK(omittedChars)} chars]\n`
: `[${labelPrefix}; omitted ${fmtK(omittedChars)} chars]\n`
return `${label}${tail}`
}
const countNewlines = (text: string, end: number) => {
let count = 0
for (let i = 0; i < end; i++) {
if (text.charCodeAt(i) === 10) {
count++
}
}
return count
}
export const stripTrailingPasteNewlines = (text: string) => (/[^\n]/.test(text) ? text.replace(/\n+$/, '') : text)
export const toolTrailLabel = (name: string) =>
@ -92,10 +178,17 @@ export const formatToolCall = (name: string, context = '') => {
return preview ? `${label}("${preview}")` : label
}
export const buildToolTrailLine = (name: string, context: string, error?: boolean, note?: string) => {
export const buildToolTrailLine = (
name: string,
context: string,
error?: boolean,
note?: string,
duration?: number
) => {
const detail = compactPreview(note ?? '', 72)
const took = duration !== undefined ? ` (${duration.toFixed(1)}s)` : ''
return `${formatToolCall(name, context)}${detail ? ` :: ${detail}` : ''} ${error ? ' ✗' : ' ✓'}`
return `${formatToolCall(name, context)}${took}${detail ? ` :: ${detail}` : ''} ${error ? '✗' : '✓'}`
}
export const isToolTrailResultLine = (line: string) => line.endsWith(' ✓') || line.endsWith(' ✗')
@ -122,6 +215,12 @@ export const parseToolTrailResultLine = (line: string) => {
return { call: body, detail: '', mark }
}
export const splitToolDuration = (call: string) => {
const match = call.match(/^(.*?)( \(\d+(?:\.\d)?s\))$/)
return match ? { label: match[1]!, duration: match[2]! } : { label: call, duration: '' }
}
export const isTransientTrailLine = (line: string) => line.startsWith('drafting ') || line === 'analyzing tool output…'
export const sameToolTrailGroup = (label: string, entry: string) =>

View File

@ -0,0 +1,21 @@
import { describe, expect, it } from 'vitest'
import { todoGlyph, todoTone } from './todo.js'
describe('todoGlyph', () => {
it('uses fixed-width ASCII markers so the active row does not render wide or emoji-like', () => {
expect(todoGlyph('completed')).toBe('[x]')
expect(todoGlyph('in_progress')).toBe('[>]')
expect(todoGlyph('pending')).toBe('[ ]')
expect(todoGlyph('cancelled')).toBe('[-]')
})
})
describe('todoTone', () => {
it('keeps todo status rows neutral instead of red/green', () => {
expect(todoTone('completed')).toBe('dim')
expect(todoTone('cancelled')).toBe('dim')
expect(todoTone('pending')).toBe('body')
expect(todoTone('in_progress')).toBe('active')
})
})

9
ui-tui/src/lib/todo.ts Normal file
View File

@ -0,0 +1,9 @@
import type { TodoItem } from '../types.js'
export type TodoTone = 'active' | 'body' | 'dim'
export const todoGlyph = (status: TodoItem['status']) =>
status === 'completed' ? '[x]' : status === 'cancelled' ? '[-]' : status === 'in_progress' ? '[>]' : '[ ]'
export const todoTone = (status: TodoItem['status']): TodoTone =>
status === 'in_progress' ? 'active' : status === 'pending' ? 'body' : 'dim'

View File

@ -0,0 +1,67 @@
import type { ScrollBoxHandle } from '@hermes/ink'
import type { RefObject } from 'react'
import { useCallback, useMemo, useSyncExternalStore } from 'react'
export interface ViewportSnapshot {
atBottom: boolean
bottom: number
pending: number
scrollHeight: number
top: number
viewportHeight: number
}
const EMPTY: ViewportSnapshot = {
atBottom: true,
bottom: 0,
pending: 0,
scrollHeight: 0,
top: 0,
viewportHeight: 0
}
export function getViewportSnapshot(s?: ScrollBoxHandle | null): ViewportSnapshot {
if (!s) {
return EMPTY
}
const pending = s.getPendingDelta()
const top = Math.max(0, s.getScrollTop() + pending)
const viewportHeight = Math.max(0, s.getViewportHeight())
const scrollHeight = Math.max(viewportHeight, s.getScrollHeight())
const bottom = top + viewportHeight
return {
atBottom: s.isSticky() || bottom >= scrollHeight - 2,
bottom,
pending,
scrollHeight,
top,
viewportHeight
}
}
export function viewportSnapshotKey(v: ViewportSnapshot) {
return `${v.atBottom ? 1 : 0}:${Math.ceil(v.top / 8) * 8}:${v.viewportHeight}:${Math.ceil(v.scrollHeight / 8) * 8}:${v.pending}`
}
export function useViewportSnapshot(scrollRef: RefObject<ScrollBoxHandle | null>): ViewportSnapshot {
const key = useSyncExternalStore(
useCallback((cb: () => void) => scrollRef.current?.subscribe(cb) ?? (() => {}), [scrollRef]),
() => viewportSnapshotKey(getViewportSnapshot(scrollRef.current)),
() => viewportSnapshotKey(EMPTY)
)
return useMemo(() => {
const [atBottom = '1', top = '0', viewportHeight = '0', scrollHeight = '0', pending = '0'] = key.split(':')
return {
atBottom: atBottom === '1',
bottom: Number(top) + Number(viewportHeight),
pending: Number(pending),
scrollHeight: Number(scrollHeight),
top: Number(top),
viewportHeight: Number(viewportHeight)
}
}, [key])
}

View File

@ -0,0 +1,78 @@
import type { Msg } from '../types.js'
import { boundedHistoryRenderText } from './text.js'
const hashText = (text: string) => {
let h = 5381
for (let i = 0; i < text.length; i++) {
h = ((h << 5) + h) ^ text.charCodeAt(i)
}
return (h >>> 0).toString(36)
}
export const messageHeightKey = (msg: Msg) => {
const todoSig = msg.todos?.map(t => `${t.status}:${t.content}`).join('\u0001') ?? ''
const panelSig =
msg.panelData?.sections
.map(s => `${s.title ?? ''}:${s.text?.length ?? 0}:${s.items?.length ?? 0}:${s.rows?.length ?? 0}`)
.join('\u0001') ?? ''
const introSig = msg.kind === 'intro' ? (msg.info?.version ?? '') : ''
return [
msg.role,
msg.kind ?? '',
hashText([msg.text, msg.thinking ?? '', msg.tools?.join('\n') ?? '', todoSig, panelSig, introSig].join('\0'))
].join(':')
}
export const wrappedLines = (text: string, width: number) => {
const w = Math.max(1, width)
return text.split('\n').reduce((n, line) => n + Math.max(1, Math.ceil(line.length / w)), 0)
}
export const estimatedMsgHeight = (
msg: Msg,
cols: number,
{ compact, details, limitHistory = false }: { compact: boolean; details: boolean; limitHistory?: boolean }
) => {
if (msg.kind === 'intro') {
return msg.info?.version ? 9 : 5
}
if (msg.kind === 'panel') {
return Math.max(3, (msg.panelData?.sections.length ?? 1) * 2 + 1)
}
if (msg.kind === 'trail' && msg.todos?.length) {
if (msg.todoCollapsedByDefault) {
return 2
}
return Math.max(2, msg.todos.length + 2)
}
const bodyWidth = Math.max(20, cols - 5)
const text = msg.role === 'assistant' && limitHistory ? boundedHistoryRenderText(msg.text) : msg.text
let h = wrappedLines(text || ' ', bodyWidth)
if (!compact && msg.role === 'assistant') {
h += Math.min(6, (text.match(/\n\s*\n/g) ?? []).length)
}
if (details) {
h += (msg.tools?.length ?? 0) + wrappedLines(msg.thinking ?? '', bodyWidth)
}
if (msg.role === 'user' || msg.kind === 'diff') {
h += 2
} else if (msg.kind === 'slash') {
h++
}
return Math.max(1, h)
}

View File

@ -0,0 +1,190 @@
// Wheel-scroll acceleration state machine.
//
// One event = 1 row feels sluggish on trackpads (200+ ev/s) and sustained
// mouse-wheel; one event = 6 rows teleports and ruins precision.
// Heuristic on inter-event gap + direction flips:
//
// gap < 5ms → same-batch burst → 1 row/event
// gap < 40ms (native) → ramp +0.3, cap 6
// gap 80-500ms (xterm.js) → mult = 1 + (mult-1)·0.5^(gap/150) + 5·decay
// cap 3 slow / 6 fast
// gap > 500ms → reset (deliberate click stays responsive)
// flip + flip-back ≤200ms → encoder bounce → engage wheel-mode (sticky cap)
// 5 consecutive <5ms events → trackpad flick → disengage wheel-mode
//
// Native terminals (Ghostty, iTerm2) and xterm.js embedders (VS Code,
// Cursor) emit wheel events with different cadences, hence two paths.
import { isXtermJs } from '@hermes/ink'
// ── Native (ghostty, iTerm2, WezTerm, …) ───────────────────────────────
const WHEEL_ACCEL_WINDOW_MS = 40
const WHEEL_ACCEL_STEP = 0.3
const WHEEL_ACCEL_MAX = 6
// ── Encoder bounce / wheel-mode (mechanical wheels) ────────────────────
const WHEEL_BOUNCE_GAP_MAX_MS = 200
const WHEEL_MODE_STEP = 15
const WHEEL_MODE_CAP = 15
const WHEEL_MODE_RAMP = 3
const WHEEL_MODE_IDLE_DISENGAGE_MS = 1500
// ── xterm.js (VS Code / Cursor / browser terminals) ────────────────────
const WHEEL_DECAY_HALFLIFE_MS = 150
const WHEEL_DECAY_STEP = 5
const WHEEL_BURST_MS = 5
const WHEEL_DECAY_GAP_MS = 80
const WHEEL_DECAY_CAP_SLOW = 3
const WHEEL_DECAY_CAP_FAST = 6
const WHEEL_DECAY_IDLE_MS = 500
export type WheelAccelState = {
time: number
mult: number
dir: 0 | 1 | -1
xtermJs: boolean
/** Carried fractional scroll (xterm.js). scrollBy floors, so without
* this a mult of 1.5 always gives 1 row; carrying the remainder gives
* 1,2,1,2 correct throughput over time. */
frac: number
/** Native baseline rows/event. Reset on idle/reversal; ramp builds on
* top. xterm.js path ignores. */
base: number
/** Deferred direction flip (native): bounce vs reversal next event
* decides. */
pendingFlip: boolean
/** Sticky once a flip-then-flip-back fires within the bounce window.
* Cleared by idle disengage or trackpad burst. */
wheelMode: boolean
/** Consecutive <5ms events. ≥5 → trackpad flick → disengage. */
burstCount: number
}
export function initWheelAccel(xtermJs = false, base = 1): WheelAccelState {
return { burstCount: 0, base, dir: 0, frac: 0, mult: base, pendingFlip: false, time: 0, wheelMode: false, xtermJs }
}
/** HERMES_TUI_SCROLL_SPEED (or CLAUDE_CODE_SCROLL_SPEED for portability).
* Default 1, clamped (0, 20]. */
export function readScrollSpeedBase(): number {
const n = parseFloat(process.env.HERMES_TUI_SCROLL_SPEED ?? process.env.CLAUDE_CODE_SCROLL_SPEED ?? '')
return Number.isFinite(n) && n > 0 ? Math.min(n, 20) : 1
}
export function initWheelAccelForHost(): WheelAccelState {
return initWheelAccel(isXtermJs(), readScrollSpeedBase())
}
/** Compute rows for one wheel event, mutating `state`. Returns 0 when a
* direction flip is deferred for bounce detection call sites should
* no-op on 0. */
export function computeWheelStep(state: WheelAccelState, dir: -1 | 1, now: number): number {
return state.xtermJs ? xtermJsStep(state, dir, now) : nativeStep(state, dir, now)
}
function nativeStep(state: WheelAccelState, dir: -1 | 1, now: number): number {
// Idle disengage runs first so a pending bounce can't mask "user paused
// 1.5s then mouse-clicked" as a real reversal.
if (state.wheelMode && now - state.time > WHEEL_MODE_IDLE_DISENGAGE_MS) {
state.wheelMode = false
state.burstCount = 0
state.mult = state.base
}
if (state.pendingFlip) {
state.pendingFlip = false
if (dir !== state.dir || now - state.time > WHEEL_BOUNCE_GAP_MAX_MS) {
// Real reversal (flip persisted OR flip-back too late). Commit.
// The deferred event's 1 row is lost — acceptable latency.
state.dir = dir
state.time = now
state.mult = state.base
return Math.floor(state.mult)
}
state.wheelMode = true
}
const gap = now - state.time
if (dir !== state.dir && state.dir !== 0) {
state.pendingFlip = true
state.time = now
return 0
}
state.dir = dir
state.time = now
if (state.wheelMode) {
if (gap < WHEEL_BURST_MS) {
// Same-batch burst (SGR proportional) OR trackpad flick. 1 row/event;
// trackpad flick trips the burst-count disengage.
if (++state.burstCount >= 5) {
state.wheelMode = false
state.burstCount = 0
state.mult = state.base
} else {
return 1
}
} else {
state.burstCount = 0
}
}
if (state.wheelMode) {
const m = Math.pow(0.5, gap / WHEEL_DECAY_HALFLIFE_MS)
const cap = Math.max(WHEEL_MODE_CAP, state.base * 2)
const next = 1 + (state.mult - 1) * m + WHEEL_MODE_STEP * m
state.mult = Math.min(cap, next, state.mult + WHEEL_MODE_RAMP)
return Math.floor(state.mult)
}
// Trackpad / hi-res native: tight 40ms window — sub-window ramps,
// anything slower resets to baseline.
if (gap > WHEEL_ACCEL_WINDOW_MS) {
state.mult = state.base
} else {
const cap = Math.max(WHEEL_ACCEL_MAX, state.base * 2)
state.mult = Math.min(cap, state.mult + WHEEL_ACCEL_STEP)
}
return Math.floor(state.mult)
}
function xtermJsStep(state: WheelAccelState, dir: -1 | 1, now: number): number {
const gap = now - state.time
const sameDir = dir === state.dir
state.time = now
state.dir = dir
if (sameDir && gap < WHEEL_BURST_MS) {
return 1
}
if (!sameDir || gap > WHEEL_DECAY_IDLE_MS) {
// Reversal or long idle — start at 2 so first click after a pause moves visibly.
state.mult = 2
state.frac = 0
} else {
const m = Math.pow(0.5, gap / WHEEL_DECAY_HALFLIFE_MS)
const cap = gap >= WHEEL_DECAY_GAP_MS ? WHEEL_DECAY_CAP_SLOW : WHEEL_DECAY_CAP_FAST
state.mult = Math.min(cap, 1 + (state.mult - 1) * m + WHEEL_DECAY_STEP * m)
}
const total = state.mult + state.frac
const rows = Math.floor(total)
state.frac = total - rows
return rows
}

View File

@ -5,6 +5,12 @@ export interface ActiveTool {
startedAt?: number
}
export interface TodoItem {
content: string
id: string
status: 'cancelled' | 'completed' | 'in_progress' | 'pending'
}
export interface ActivityItem {
id: number
text: string
@ -110,6 +116,9 @@ export interface Msg {
thinkingTokens?: number
toolTokens?: number
tools?: string[]
todos?: TodoItem[]
todoIncomplete?: boolean
todoCollapsedByDefault?: boolean
}
export type Role = 'assistant' | 'system' | 'tool' | 'user'
@ -133,8 +142,11 @@ export interface McpServerStatus {
export interface SessionInfo {
cwd?: string
fast?: boolean
mcp_servers?: McpServerStatus[]
model: string
reasoning_effort?: string
service_tier?: string
release_date?: string
skills: Record<string, string[]>
tools: Record<string, string[]>

View File

@ -33,11 +33,39 @@ declare module '@hermes/ink' {
export type InputHandler = (input: string, key: Key, event: InputEvent) => void
export type FrameEvent = {
readonly durationMs: number
readonly phases?: {
readonly renderer: number
readonly diff: number
readonly optimize: number
readonly write: number
readonly patches: number
readonly optimizedPatches: number
readonly writeBytes: number
readonly backpressure: boolean
readonly prevFrameDrainMs: number
readonly yoga: number
readonly commit: number
readonly yogaVisited: number
readonly yogaMeasured: number
readonly yogaCacheHits: number
readonly yogaLive: number
}
readonly flickers: ReadonlyArray<{
readonly desiredHeight: number
readonly availableHeight: number
readonly reason: 'resize' | 'offscreen' | 'clear'
}>
}
export type RenderOptions = {
readonly stdin?: NodeJS.ReadStream
readonly stdout?: NodeJS.WriteStream
readonly stderr?: NodeJS.WriteStream
readonly exitOnCtrlC?: boolean
readonly patchConsole?: boolean
readonly onFrame?: (event: FrameEvent) => void
}
export type Instance = {
@ -57,8 +85,10 @@ declare module '@hermes/ink' {
readonly getScrollHeight: () => number
readonly getViewportHeight: () => number
readonly getViewportTop: () => number
readonly getLastManualScrollAt: () => number
readonly isSticky: () => boolean
readonly subscribe: (listener: () => void) => () => void
readonly setClampBounds: (min: number | undefined, max: number | undefined) => void
}
export const Box: React.ComponentType<any>
@ -74,6 +104,32 @@ declare module '@hermes/ink' {
export const Text: React.ComponentType<any>
export const TextInput: React.ComponentType<any>
export const stringWidth: (s: string) => number
export function isXtermJs(): boolean
export type ScrollFastPathStats = {
captured: number
taken: number
declined: {
noPrevScreen: number
heightDeltaMismatch: number
other: number
}
lastDeclineReason?: string
lastHeightDelta?: number
lastHintDelta?: number
lastScrollHeight?: number
lastPrevHeight?: number
}
export const scrollFastPathStats: ScrollFastPathStats
export type EvictLevel = 'all' | 'half'
export type InkCacheSizes = {
readonly lineWidth: number
readonly slice: number
readonly width: number
readonly wrap: number
}
export function evictInkCaches(level?: EvictLevel): InkCacheSizes
export function render(node: React.ReactNode, options?: NodeJS.WriteStream | RenderOptions): Instance