Merge pull request #208 from Molecule-AI/feat/hermes-phase1-provider-registry

feat(hermes): Phase 1 — multi-provider registry (15 providers, 26 tests, back-compat preserved)
This commit is contained in:
Hongming Wang 2026-04-15 11:21:05 -07:00 committed by GitHub
commit be53a33546
3 changed files with 523 additions and 58 deletions

View File

@ -1,89 +1,102 @@
"""Hermes adapter executor — implements create_executor() for PR 2.
"""Hermes adapter executor — Phase 1 multi-provider.
Hermes models (Nous Research) are accessed via an OpenAI-compatible API,
either through the Nous Portal directly or via OpenRouter as a fallback.
Hermes models are accessed via an OpenAI-compatible API. Phase 1 supports 15
providers via the shared ``providers.py`` registry: Nous Portal, OpenRouter,
OpenAI, Anthropic, xAI, Gemini, Qwen, GLM, Kimi, MiniMax, DeepSeek, Groq,
Together, Fireworks, Mistral. Every provider is reached through an OpenAI-compat
``/v1/chat/completions`` endpoint, so one code path handles all of them.
Key resolution order
--------------------
1. ``hermes_api_key`` parameter (explicit call-site override)
2. ``HERMES_API_KEY`` environment variable (Nous Portal key)
3. ``OPENROUTER_API_KEY`` environment variable (OpenRouter fallback)
Key resolution order (unchanged from PR 2, extended)
-----------------------------------------------------
1. ``hermes_api_key`` parameter (explicit call-site override routes to Nous Portal)
2. ``provider`` parameter (explicit provider name looks up its env var(s))
3. Auto-detect: walk ``providers.RESOLUTION_ORDER`` and pick the first provider
whose env var is set (``HERMES_API_KEY`` / ``OPENROUTER_API_KEY`` still come
first so PR 2 back-compat holds).
Raises ``ValueError`` if none of the three sources yields a non-empty key.
Raises ``ValueError`` if nothing resolves. The error message lists every env var
that was checked so the operator knows their options without reading source.
"""
from __future__ import annotations
import logging
import os
from typing import Optional
from .providers import PROVIDERS, resolve_provider
logger = logging.getLogger(__name__)
# Default base URLs
_NOUS_BASE_URL = "https://inference-prod.nousresearch.com/v1"
_OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
# Default model when routing through OpenRouter
_DEFAULT_MODEL = "nousresearch/hermes-3-llama-3.1-405b"
def create_executor(hermes_api_key: str | None = None):
def create_executor(
hermes_api_key: Optional[str] = None,
provider: Optional[str] = None,
model: Optional[str] = None,
):
"""Create and return a LangGraph-compatible executor for the Hermes adapter.
Key resolution order:
1. hermes_api_key parameter (if provided)
2. HERMES_API_KEY environment variable
3. OPENROUTER_API_KEY environment variable (fallback)
Raises ValueError if none of the above are found.
Parameters
----------
hermes_api_key:
Explicit API key. When provided, the Nous Portal base URL is used.
When absent and OPENROUTER_API_KEY is the fallback, OpenRouter's
base URL is used instead.
Explicit API key. When provided, the call routes to Nous Portal (the
PR 2 back-compat path) regardless of ``provider``.
provider:
Canonical provider short name from ``providers.PROVIDERS`` (e.g.
``"openai"``, ``"anthropic"``, ``"qwen"``, ``"xai"``). When set, the
registry entry's env vars are used to find the API key and its
base URL + default model override the auto-detect path. When unset,
auto-detect walks ``providers.RESOLUTION_ORDER`` until it finds a
provider whose env var is set.
model:
Override the provider's default model. Passed straight through to
``chat.completions.create``.
Returns
-------
HermesA2AExecutor
A ready-to-use executor instance wired with the resolved key
and matching base URL.
A ready-to-use executor wired with the resolved api_key + base_url
+ model.
Raises
------
ValueError
If ``provider`` is an unknown name, if ``provider`` is known but its
env vars are all empty, or if auto-detect finds nothing.
"""
api_key: str | None = None
base_url: str = _NOUS_BASE_URL
# Path 1: PR 2 back-compat — explicit hermes_api_key routes to Nous Portal.
if hermes_api_key:
api_key = hermes_api_key
base_url = _NOUS_BASE_URL
logger.debug("Hermes: using explicit hermes_api_key param")
else:
env_hermes = os.environ.get("HERMES_API_KEY", "").strip()
if env_hermes:
api_key = env_hermes
base_url = _NOUS_BASE_URL
logger.debug("Hermes: using HERMES_API_KEY env var")
else:
env_openrouter = os.environ.get("OPENROUTER_API_KEY", "").strip()
if env_openrouter:
api_key = env_openrouter
base_url = _OPENROUTER_BASE_URL
logger.debug("Hermes: using OPENROUTER_API_KEY env var (fallback)")
if not api_key:
raise ValueError(
"No API key found: provide hermes_api_key param, "
"or set HERMES_API_KEY or OPENROUTER_API_KEY env var"
cfg = PROVIDERS["nous_portal"]
logger.debug("Hermes: using explicit hermes_api_key param (Nous Portal)")
return HermesA2AExecutor(
api_key=hermes_api_key,
base_url=cfg.base_url,
model=model or cfg.default_model,
)
return HermesA2AExecutor(api_key=api_key, base_url=base_url)
# Path 2/3: registry resolution (either explicit provider name or auto-detect).
cfg, api_key = resolve_provider(provider)
logger.info(
"Hermes: provider=%s base_url=%s model=%s",
cfg.name,
cfg.base_url,
model or cfg.default_model,
)
return HermesA2AExecutor(
api_key=api_key,
base_url=cfg.base_url,
model=model or cfg.default_model,
)
class HermesA2AExecutor:
"""LangGraph-compatible AgentExecutor for Hermes models.
"""LangGraph-compatible AgentExecutor for Hermes-style multi-provider LLMs.
Uses the OpenAI-compatible ``openai`` client pointed at either the
Nous Portal or OpenRouter, matching the pattern of sibling adapters
(AutoGen, LangGraph) which all use OpenAI-compatible clients.
Uses the OpenAI-compatible ``openai`` client pointed at whichever provider
was resolved by ``create_executor`` (Nous Portal, OpenRouter, OpenAI,
Anthropic, xAI, Gemini, Qwen, GLM, Kimi, MiniMax, DeepSeek, Groq, Together,
Fireworks, Mistral). Matches the pattern of sibling adapters (AutoGen,
LangGraph) which also use OpenAI-compat clients.
The ``execute()`` and ``cancel()`` async methods satisfy the
``a2a.server.agent_execution.AgentExecutor`` interface so this
@ -93,8 +106,8 @@ class HermesA2AExecutor:
def __init__(
self,
api_key: str,
base_url: str = _NOUS_BASE_URL,
model: str = _DEFAULT_MODEL,
base_url: str,
model: str,
heartbeat=None,
):
self.api_key = api_key

View File

@ -0,0 +1,289 @@
"""Hermes adapter provider registry — Phase 1 of the multi-provider expansion.
Extends the original PR-2 Hermes executor (Nous Portal + OpenRouter only) to a
registry of 12 providers. Every provider in this registry is reached via its
OpenAI-compat endpoint, which means the existing ``openai.AsyncOpenAI`` client
and request shape in ``executor.py`` Just Works without any new dependencies.
Native SDK paths (Anthropic Messages API, Gemini generateContent API) are
Phase 2 they give better tool-calling + vision fidelity but are not
required to unblock the basic "CEO wants Hermes on Qwen / GLM / xAI /
Gemini" asks that triggered this work.
## Design
- ``ProviderConfig`` captures everything needed to point the OpenAI client at
a provider: env var(s), base URL, default model, auth scheme.
- ``PROVIDERS`` is a dict keyed by canonical short name (``"openai"``,
``"anthropic"``, ``"qwen"``, etc.).
- ``RESOLUTION_ORDER`` is the auto-detect sequence used when the caller
doesn't specify a provider — it tries each provider's env vars in turn and
picks the first one that's set.
- ``resolve_provider(explicit)`` returns ``(ProviderConfig, api_key)`` or
raises ``ValueError`` with a helpful message listing every env var it
checked.
## Back-compat
The original ``HERMES_API_KEY`` and ``OPENROUTER_API_KEY`` env vars still work
and still route to Nous Portal / OpenRouter respectively they're just now
registered as two entries in ``PROVIDERS`` rather than hardcoded in
``create_executor``.
## Adding a new provider
1. Append a new ``ProviderConfig`` entry under ``PROVIDERS``
2. Add its short name to ``RESOLUTION_ORDER`` in the desired priority slot
3. Document the env var in the workspace ``.env.example`` (if present)
That's it. Nothing else needs to change — the executor reads the registry.
"""
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Optional
@dataclass(frozen=True)
class ProviderConfig:
"""Everything the Hermes executor needs to talk to a single LLM provider.
Every provider in Phase 1 is reachable via an OpenAI-compatible
``/v1/chat/completions`` endpoint, so ``auth_scheme`` is always
``"openai"`` (Bearer token, OpenAI-style messages payload). Phase 2
will add ``"anthropic"`` (native Messages API) and ``"gemini"`` (native
generateContent API) for roles that need better tool-call fidelity.
"""
name: str
"""Canonical short name — the key used in ``PROVIDERS`` and the ``provider`` kwarg."""
env_vars: tuple[str, ...]
"""API key env vars, checked in order. First non-empty value wins.
Supporting multiple env vars lets us accept common aliases
(e.g. ``QWEN_API_KEY`` AND ``DASHSCOPE_API_KEY`` both work for Alibaba Qwen)."""
base_url: str
"""OpenAI-compat base URL. Must include the ``/v1`` suffix where applicable."""
default_model: str
"""Default model name to pass to ``chat.completions.create``.
Per-call overrides are possible via the executor constructor."""
auth_scheme: str = "openai"
"""``openai`` (Bearer token + OpenAI-style payload) for every Phase 1 provider.
Phase 2 reserves ``anthropic`` and ``gemini`` for native-SDK paths."""
docs: str = ""
"""Short note — which docs URL the config was derived from, or which quirks
to know about. Not used programmatically; exists to make future audits of
this file cheaper than re-Googling every entry."""
# --- Provider registry ------------------------------------------------------
#
# Ordering within this dict is not semantically meaningful — use
# ``RESOLUTION_ORDER`` below to control auto-detect priority. This dict is
# grouped by "who owns the provider" just for human readability.
PROVIDERS: dict[str, ProviderConfig] = {
# --- Existing (PR 2 baseline) ---------------------------------------
"nous_portal": ProviderConfig(
name="nous_portal",
env_vars=("HERMES_API_KEY", "NOUS_API_KEY"),
base_url="https://inference-prod.nousresearch.com/v1",
default_model="nousresearch/hermes-3-llama-3.1-405b",
docs="Nous Research Portal — original Hermes adapter target from PR 2.",
),
"openrouter": ProviderConfig(
name="openrouter",
env_vars=("OPENROUTER_API_KEY",),
base_url="https://openrouter.ai/api/v1",
default_model="anthropic/claude-sonnet-4.5",
docs="OpenRouter — unified OpenAI-compat gateway to hundreds of models. "
"Useful for A/B testing and as a fallback when a direct provider is down.",
),
# --- Frontier commercial (US) ---------------------------------------
"openai": ProviderConfig(
name="openai",
env_vars=("OPENAI_API_KEY",),
base_url="https://api.openai.com/v1",
default_model="gpt-4o",
docs="OpenAI — canonical OpenAI-compat endpoint. Works out of the box.",
),
"anthropic": ProviderConfig(
name="anthropic",
env_vars=("ANTHROPIC_API_KEY",),
base_url="https://api.anthropic.com/v1",
default_model="claude-sonnet-4-5",
docs="Anthropic — Phase 1 uses the OpenAI-compat shim at /v1. Phase 2 "
"will add the native Messages API path for better tool calling.",
),
"xai": ProviderConfig(
name="xai",
env_vars=("XAI_API_KEY", "GROK_API_KEY"),
base_url="https://api.x.ai/v1",
default_model="grok-4",
docs="xAI — Grok family. OpenAI-compat via api.x.ai/v1.",
),
"gemini": ProviderConfig(
name="gemini",
env_vars=("GEMINI_API_KEY", "GOOGLE_API_KEY"),
base_url="https://generativelanguage.googleapis.com/v1beta/openai",
default_model="gemini-2.5-flash",
docs="Google Gemini — uses the documented OpenAI-compat endpoint at "
"/v1beta/openai. Phase 2 will add native generateContent for vision.",
),
# --- Chinese providers ----------------------------------------------
"qwen": ProviderConfig(
name="qwen",
env_vars=("QWEN_API_KEY", "DASHSCOPE_API_KEY"),
base_url="https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
default_model="qwen3-235b-a22b",
docs="Alibaba Qwen via DashScope international endpoint. OpenAI-compat mode. "
"For domestic China use dashscope.aliyuncs.com (no -intl).",
),
"glm": ProviderConfig(
name="glm",
env_vars=("GLM_API_KEY", "ZHIPU_API_KEY"),
base_url="https://open.bigmodel.cn/api/paas/v4",
default_model="glm-4-plus",
docs="Zhipu AI GLM — open.bigmodel.cn, OpenAI-compat via /api/paas/v4.",
),
"kimi": ProviderConfig(
name="kimi",
env_vars=("KIMI_API_KEY", "MOONSHOT_API_KEY"),
base_url="https://api.moonshot.ai/v1",
default_model="kimi-k2",
docs="Moonshot AI Kimi K2 — OpenAI-compat at api.moonshot.ai/v1.",
),
"minimax": ProviderConfig(
name="minimax",
env_vars=("MINIMAX_API_KEY",),
base_url="https://api.minimax.io/v1",
default_model="MiniMax-M2",
docs="MiniMax — OpenAI-compat at api.minimax.io/v1. "
"Note: older base URL api.minimaxi.chat is deprecated.",
),
"deepseek": ProviderConfig(
name="deepseek",
env_vars=("DEEPSEEK_API_KEY",),
base_url="https://api.deepseek.com/v1",
default_model="deepseek-chat",
docs="DeepSeek — very cheap, OpenAI-compat at api.deepseek.com/v1.",
),
# --- OSS / alt providers --------------------------------------------
"groq": ProviderConfig(
name="groq",
env_vars=("GROQ_API_KEY",),
base_url="https://api.groq.com/openai/v1",
default_model="llama-3.3-70b-versatile",
docs="Groq LPU inference — very fast, OpenAI-compat at api.groq.com/openai/v1.",
),
"together": ProviderConfig(
name="together",
env_vars=("TOGETHER_API_KEY",),
base_url="https://api.together.xyz/v1",
default_model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
docs="Together AI — OSS model hosting, OpenAI-compat at api.together.xyz/v1.",
),
"fireworks": ProviderConfig(
name="fireworks",
env_vars=("FIREWORKS_API_KEY",),
base_url="https://api.fireworks.ai/inference/v1",
default_model="accounts/fireworks/models/llama-v3p3-70b-instruct",
docs="Fireworks AI — OSS model hosting, OpenAI-compat at api.fireworks.ai/inference/v1.",
),
"mistral": ProviderConfig(
name="mistral",
env_vars=("MISTRAL_API_KEY",),
base_url="https://api.mistral.ai/v1",
default_model="mistral-large-latest",
docs="Mistral AI — OpenAI-compat at api.mistral.ai/v1.",
),
}
# --- Auto-detect resolution order -------------------------------------------
#
# When the caller doesn't specify a provider, resolve_provider() walks this
# list in order and picks the first provider whose env var is set. Order is
# chosen to preserve back-compat (the two original PR-2 providers come first)
# followed by the most likely-to-be-configured commercial APIs.
RESOLUTION_ORDER: tuple[str, ...] = (
# Back-compat: PR 2 baseline
"nous_portal",
"openrouter",
# Frontier commercial
"anthropic",
"openai",
"gemini",
"xai",
# Chinese providers
"qwen",
"glm",
"kimi",
"minimax",
"deepseek",
# OSS / alt
"groq",
"mistral",
"together",
"fireworks",
)
def resolve_provider(explicit: Optional[str] = None) -> tuple[ProviderConfig, str]:
"""Resolve a provider name to a ``(ProviderConfig, api_key)`` pair.
Resolution order:
1. If ``explicit`` is given, look it up in ``PROVIDERS`` and try every
env var on that provider's config. Raise with a clear message if the
name is unknown or if all env vars are empty.
2. Otherwise auto-detect: walk ``RESOLUTION_ORDER`` and return the first
provider whose env var is set.
Raises
------
ValueError
If ``explicit`` is an unknown provider name, if ``explicit`` is a
known provider but its env vars are all empty, or if no env var is
set for any provider in auto-detect mode.
"""
if explicit:
if explicit not in PROVIDERS:
raise ValueError(
f"Unknown Hermes provider: {explicit!r}. "
f"Available: {sorted(PROVIDERS)}"
)
cfg = PROVIDERS[explicit]
for env in cfg.env_vars:
val = os.environ.get(env, "").strip()
if val:
return cfg, val
raise ValueError(
f"Hermes provider {explicit!r} specified but no env var set. "
f"Tried: {cfg.env_vars}"
)
# Auto-detect — first provider with a non-empty env var wins.
for name in RESOLUTION_ORDER:
cfg = PROVIDERS[name]
for env in cfg.env_vars:
val = os.environ.get(env, "").strip()
if val:
return cfg, val
# Nothing set — raise with the full list so the operator knows every
# option they have without having to read the source.
tried = []
for name in RESOLUTION_ORDER:
for env in PROVIDERS[name].env_vars:
tried.append(env)
raise ValueError(
"No Hermes provider API key found. Set any one of: " + ", ".join(tried)
)

View File

@ -0,0 +1,163 @@
"""Tests for workspace-template/adapters/hermes/providers.py.
These tests exercise resolve_provider() in isolation they do not import
anything from adapters/__init__.py so they don't need the a2a runtime deps.
"""
from __future__ import annotations
import importlib
import os
import sys
from pathlib import Path
import pytest
# Make the hermes package importable without pulling in adapters/__init__.py
# (which imports the a2a SDK). We load providers.py directly from its file path.
_HERMES_DIR = Path(__file__).parent.parent / "adapters" / "hermes"
sys.path.insert(0, str(_HERMES_DIR))
import providers # type: ignore # noqa: E402
_ALL_PROVIDER_ENV_VARS = (
"HERMES_API_KEY",
"NOUS_API_KEY",
"OPENROUTER_API_KEY",
"OPENAI_API_KEY",
"ANTHROPIC_API_KEY",
"XAI_API_KEY",
"GROK_API_KEY",
"GEMINI_API_KEY",
"GOOGLE_API_KEY",
"QWEN_API_KEY",
"DASHSCOPE_API_KEY",
"GLM_API_KEY",
"ZHIPU_API_KEY",
"KIMI_API_KEY",
"MOONSHOT_API_KEY",
"MINIMAX_API_KEY",
"DEEPSEEK_API_KEY",
"GROQ_API_KEY",
"TOGETHER_API_KEY",
"FIREWORKS_API_KEY",
"MISTRAL_API_KEY",
)
@pytest.fixture(autouse=True)
def _clean_env(monkeypatch):
"""Clear every provider env var before each test so runs are deterministic."""
for key in _ALL_PROVIDER_ENV_VARS:
monkeypatch.delenv(key, raising=False)
yield
def test_registry_is_populated():
"""Phase 1 ships at least 15 providers and every entry is self-consistent."""
assert len(providers.PROVIDERS) >= 15
assert len(providers.RESOLUTION_ORDER) == len(providers.PROVIDERS)
for name, cfg in providers.PROVIDERS.items():
assert cfg.name == name, f"{name}: config.name should match dict key"
assert cfg.env_vars, f"{name}: must declare at least one env var"
assert cfg.base_url.startswith("http"), f"{name}: base_url must be http(s)"
assert cfg.default_model, f"{name}: must declare a default model"
assert name in providers.RESOLUTION_ORDER, f"{name}: missing from resolution order"
def test_resolution_order_has_no_duplicates():
assert len(providers.RESOLUTION_ORDER) == len(set(providers.RESOLUTION_ORDER))
def test_backcompat_hermes_api_key_first():
"""PR 2 back-compat — HERMES_API_KEY auto-detect still routes to Nous Portal."""
os.environ["HERMES_API_KEY"] = "hermes-test-key"
cfg, key = providers.resolve_provider()
assert cfg.name == "nous_portal"
assert key == "hermes-test-key"
def test_backcompat_openrouter_api_key_second():
"""PR 2 back-compat — OPENROUTER_API_KEY still routes to OpenRouter when HERMES_API_KEY is absent."""
os.environ["OPENROUTER_API_KEY"] = "or-test-key"
cfg, key = providers.resolve_provider()
assert cfg.name == "openrouter"
def test_auto_detect_openai():
os.environ["OPENAI_API_KEY"] = "sk-test"
cfg, key = providers.resolve_provider()
assert cfg.name == "openai"
assert cfg.base_url == "https://api.openai.com/v1"
def test_auto_detect_anthropic():
os.environ["ANTHROPIC_API_KEY"] = "ant-test"
cfg, key = providers.resolve_provider()
assert cfg.name == "anthropic"
@pytest.mark.parametrize(
"env_var,expected",
[
("XAI_API_KEY", "xai"),
("GROK_API_KEY", "xai"),
("QWEN_API_KEY", "qwen"),
("DASHSCOPE_API_KEY", "qwen"),
("GLM_API_KEY", "glm"),
("ZHIPU_API_KEY", "glm"),
("KIMI_API_KEY", "kimi"),
("MOONSHOT_API_KEY", "kimi"),
("GROQ_API_KEY", "groq"),
("DEEPSEEK_API_KEY", "deepseek"),
("MISTRAL_API_KEY", "mistral"),
("TOGETHER_API_KEY", "together"),
("FIREWORKS_API_KEY", "fireworks"),
("MINIMAX_API_KEY", "minimax"),
("GEMINI_API_KEY", "gemini"),
("GOOGLE_API_KEY", "gemini"),
],
)
def test_every_provider_env_var_resolves(env_var, expected):
"""Every env var listed in PROVIDERS resolves to the right provider
this guards against typos in the registry dict."""
os.environ[env_var] = "test-key"
cfg, _ = providers.resolve_provider()
assert cfg.name == expected, (
f"{env_var} should route to {expected}, got {cfg.name}"
)
def test_explicit_provider_wins_over_auto_detect():
"""When `provider=` is given, auto-detect is bypassed."""
os.environ["HERMES_API_KEY"] = "hermes-key" # would auto-detect
os.environ["OPENAI_API_KEY"] = "openai-key"
cfg, key = providers.resolve_provider("openai")
assert cfg.name == "openai"
assert key == "openai-key"
def test_unknown_provider_raises():
with pytest.raises(ValueError, match="Unknown Hermes provider"):
providers.resolve_provider("this_provider_does_not_exist")
def test_explicit_provider_with_missing_env_raises():
"""If the operator asks for a specific provider but its env var is empty,
we raise we do NOT fall back to auto-detect because that would be
surprising ("why is my openai config talking to anthropic?")."""
os.environ["HERMES_API_KEY"] = "some-value" # auto-detect would succeed
with pytest.raises(ValueError, match="no env var set"):
providers.resolve_provider("anthropic")
def test_auto_detect_with_no_env_lists_all_options():
"""The error message should list every env var the caller could set,
so operators don't have to read the source."""
# No env vars set (autouse fixture clears them all)
with pytest.raises(ValueError) as exc_info:
providers.resolve_provider()
msg = str(exc_info.value)
# Spot-check: the message names at least a few providers
for env_var in ("OPENAI_API_KEY", "ANTHROPIC_API_KEY", "QWEN_API_KEY"):
assert env_var in msg, f"error message should mention {env_var}"