fix(runtime): MODEL_PROVIDER env is misnamed — accept MODEL/MOLECULE_MODEL, deprecate the legacy name

`molecule_runtime.config.load_config` read the `MODEL_PROVIDER` env var as
the *picked model id* — despite the name, it never carried the provider
(that's `LLM_PROVIDER` / the YAML `provider:` field). So `claude-code`,
`minimax`, and `opus` were all "valid" values for a var named
MODEL_PROVIDER. That footgun bit the dev-team rollout (2026-05-10): the
lead persona env files set `MODEL=claude-opus-4-7` (the intended model)
*and* `MODEL_PROVIDER=claude-code` (mistaking it for "the runtime"); the
loader picked up MODEL_PROVIDER → the claude CLI got `--model claude-code`
→ 404 on every turn, surfaced only as "Command failed with exit code 1"
with empty stderr (the real error is in the stream-json stdout, swallowed
by the SDK's placeholder). The 22 IC workspaces "worked" only because
their `MODEL_PROVIDER=minimax` happened to fuzzy-match on MiniMax's side —
they were actually running `--model minimax`, not `MiniMax-M2.7-highspeed`.

New precedence in `_picked_model_from_env`: `MOLECULE_MODEL` (canonical,
unambiguous) > `MODEL` (the obviously-correct name, already plumbed by
workspace-server's applyRuntimeModelEnv) > `MODEL_PROVIDER` (legacy —
still honored so canvas Save+Restart, the secret-mint path, and existing
persona env files keep working, but if it's the only one set we log a
one-time deprecation pointing at the misnomer) > the YAML `model:` field.
Applied at both the top-level `model` and `runtime_config.model`
resolution sites; semantics are otherwise unchanged. Bonus: workspaces
that already set `MODEL` correctly now get exactly that model instead of
whatever fuzzy-match the upstream did with the provider slug.

Tests: 5 new cases in test_config.py (MODEL beats MODEL_PROVIDER;
MOLECULE_MODEL beats MODEL; MODEL overrides YAML; legacy MODEL_PROVIDER
still resolves + warns; no warning when MODEL is set) + an autouse
fixture that clears MODEL*/resets the warn-latch so resolution is
deterministic regardless of the CI env or test order. `pytest
tests/test_config.py` — 66 passed; the config-importing suites
(test_preflight, test_skills_loader) — 129 passed.

Companion: molecule-dev-department PR #10 fixes the six dev-team lead
`workspace.yaml`s from `model: MiniMax-M2.7` to `model: opus`. Follow-ups
(not in scope here): plumb `MOLECULE_MODEL` from applyRuntimeModelEnv and
the canvas; strip `MODEL`/`MODEL_PROVIDER` from the operator-host persona
env files once the org-template `model:` field is authoritative end-to-end.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
hongming-pc2 2026-05-10 02:38:14 -07:00
parent 3c0d00b43f
commit 2ba3af5330
2 changed files with 141 additions and 8 deletions

View File

@ -1,5 +1,6 @@
"""Load workspace configuration from config.yaml."""
import logging
import os
from dataclasses import dataclass, field
from pathlib import Path
@ -7,6 +8,8 @@ from typing import Optional
import yaml
logger = logging.getLogger(__name__)
@dataclass
class RBACConfig:
@ -381,6 +384,47 @@ def _derive_provider_from_model(model: str) -> str:
return ""
_legacy_model_provider_warned = False
def _picked_model_from_env(default: str) -> str:
"""Resolve the operator-picked model id from env; newest name wins.
Precedence: ``MOLECULE_MODEL`` (canonical, unambiguous) ``MODEL``
``MODEL_PROVIDER`` (legacy) ``default`` (the YAML ``model:`` field).
``MODEL_PROVIDER`` is **misleadingly named**: it carries the picked
*model id*, never the LLM provider the provider lives in
``LLM_PROVIDER`` / the YAML ``provider:`` field. The legacy path stays
so canvas Save+Restart, the workspace-server secret-mint path, and
persona env files that set it keep working, but if it's the *only* one
set we log a deprecation once the misnomer keeps biting (e.g. setting
``MODEL_PROVIDER=claude-code`` expecting it to select the claude-code
*runtime* it doesn't, ``runtime:`` does — after which the claude CLI
404s on ``--model claude-code``). Set ``MODEL``/``MOLECULE_MODEL`` to
an id from ``runtime_config.models[].id`` (e.g. ``opus``, ``sonnet``,
``claude-opus-4-7``, ``MiniMax-M2.7-highspeed``) instead.
"""
global _legacy_model_provider_warned
for name in ("MOLECULE_MODEL", "MODEL"):
v = (os.environ.get(name) or "").strip()
if v:
return v
legacy = (os.environ.get("MODEL_PROVIDER") or "").strip()
if legacy:
if not _legacy_model_provider_warned:
logger.warning(
"MODEL_PROVIDER=%r is deprecated and misleadingly named — it "
"sets the picked *model id*, not the LLM provider (that's "
"LLM_PROVIDER / the YAML `provider:` field). Set MODEL (or "
"MOLECULE_MODEL) to an id from runtime_config.models instead.",
legacy,
)
_legacy_model_provider_warned = True
return legacy
return default
_EVENT_LOG_VALID_BACKENDS = {"memory", "disabled"}
@ -445,8 +489,10 @@ def load_config(config_path: Optional[str] = None) -> WorkspaceConfig:
with open(config_file) as f:
raw = yaml.safe_load(f) or {}
# Override model from env if provided
model = os.environ.get("MODEL_PROVIDER", raw.get("model", "anthropic:claude-opus-4-7"))
# Operator-picked model from env (canvas / secret-mint / persona env),
# falling back to the YAML `model:` field. See _picked_model_from_env for
# the precedence (MOLECULE_MODEL > MODEL > legacy MODEL_PROVIDER).
model = _picked_model_from_env(raw.get("model", "anthropic:claude-opus-4-7"))
# Resolve top-level provider with this priority chain:
# 1. ``LLM_PROVIDER`` env var (canvas Save+Restart sets this so the
@ -517,8 +563,9 @@ def load_config(config_path: Optional[str] = None) -> WorkspaceConfig:
required_env=runtime_raw.get("required_env", []),
timeout=runtime_raw.get("timeout", 0),
# Picked-model precedence (priority order):
# 1. MODEL_PROVIDER env var — canvas-picked model, plumbed via
# workspace-server's secret-mint path or the universal
# 1. operator-picked model from env — MOLECULE_MODEL > MODEL >
# (legacy) MODEL_PROVIDER, plumbed via canvas Save+Restart,
# workspace-server's secret-mint path, or the universal
# MODEL/MODEL_PROVIDER env from applyRuntimeModelEnv. The
# operator's canvas selection MUST win over the template's
# baked-in default; previously the template's
@ -527,13 +574,12 @@ def load_config(config_path: Optional[str] = None) -> WorkspaceConfig:
# surfaced 2026-05-02 during E2E).
# 2. runtime_raw.model — explicit YAML override in the
# template's runtime_config.
# 3. top-level `model` — already honors MODEL_PROVIDER (line
# 359) but only when YAML lacks a top-level `model:`. This
# is the SaaS restart case (CP regenerates a minimal
# 3. top-level `model` (already env-resolved above). This is
# the SaaS restart case (CP regenerates a minimal
# config.yaml on every boot, dropping runtime_config.model).
# Centralising here means EVERY adapter gets the override for
# free — no per-adapter env-reading code required.
model=os.environ.get("MODEL_PROVIDER") or runtime_raw.get("model") or model,
model=_picked_model_from_env(runtime_raw.get("model") or model),
# Same fallback shape as ``model`` above: an explicit
# ``runtime_config.provider`` wins; otherwise inherit the
# top-level resolved provider so adapters see a single

View File

@ -1,10 +1,12 @@
"""Tests for config.py — workspace configuration loading."""
import logging
import os
import pytest
import yaml
import config
from config import (
A2AConfig,
ComplianceConfig,
@ -17,6 +19,17 @@ from config import (
)
@pytest.fixture(autouse=True)
def _clean_model_env(monkeypatch):
"""Every test starts with no MODEL* env vars set and the legacy-name
deprecation latch reset, so picked-model resolution is deterministic
regardless of the CI shell environment or test ordering."""
for name in ("MOLECULE_MODEL", "MODEL", "MODEL_PROVIDER"):
monkeypatch.delenv(name, raising=False)
monkeypatch.setattr(config, "_legacy_model_provider_warned", False, raising=False)
yield
def test_load_config_basic(tmp_path):
"""load_config reads a YAML file and returns a WorkspaceConfig."""
config_yaml = tmp_path / "config.yaml"
@ -164,6 +177,80 @@ def test_runtime_config_model_env_wins_over_explicit_yaml(tmp_path, monkeypatch)
assert cfg.runtime_config.model == "minimax/MiniMax-M2.7"
def test_picked_model_MODEL_env_wins_over_legacy_MODEL_PROVIDER(tmp_path, monkeypatch):
"""MODEL (the correctly-named env var) beats the legacy MODEL_PROVIDER.
Regression for the 2026-05-10 dev-team incident: lead persona env files
set MODEL=claude-opus-4-7 (the intended model) AND MODEL_PROVIDER=claude-code
(mistaking MODEL_PROVIDER for "the runtime"). The old code read
MODEL_PROVIDER the claude CLI got `--model claude-code` 404. MODEL must
win so the operator's intended value lands at both levels.
"""
monkeypatch.setenv("MODEL", "opus")
monkeypatch.setenv("MODEL_PROVIDER", "claude-code")
config_yaml = tmp_path / "config.yaml"
config_yaml.write_text(
yaml.dump({"model": "anthropic:claude-opus-4-7",
"runtime_config": {"model": "sonnet"}})
)
cfg = load_config(str(tmp_path))
assert cfg.model == "opus"
assert cfg.runtime_config.model == "opus"
def test_picked_model_MOLECULE_MODEL_wins_over_MODEL(tmp_path, monkeypatch):
"""MOLECULE_MODEL (the unambiguous canonical name) wins over MODEL, which
in turn wins over the legacy MODEL_PROVIDER."""
monkeypatch.setenv("MOLECULE_MODEL", "claude-opus-4-7")
monkeypatch.setenv("MODEL", "sonnet")
monkeypatch.setenv("MODEL_PROVIDER", "claude-code")
config_yaml = tmp_path / "config.yaml"
config_yaml.write_text(yaml.dump({"model": "openai:gpt-4o"}))
cfg = load_config(str(tmp_path))
assert cfg.model == "claude-opus-4-7"
assert cfg.runtime_config.model == "claude-opus-4-7"
def test_picked_model_MODEL_env_overrides_yaml(tmp_path, monkeypatch):
"""MODEL env overrides the YAML `model:` field — same role MODEL_PROVIDER
had, now under the correctly-named var."""
config_yaml = tmp_path / "config.yaml"
config_yaml.write_text(yaml.dump({"model": "openai:gpt-4o"}))
monkeypatch.setenv("MODEL", "google:gemini-2.0-flash")
cfg = load_config(str(tmp_path))
assert cfg.model == "google:gemini-2.0-flash"
def test_legacy_MODEL_PROVIDER_still_honored_but_warns(tmp_path, monkeypatch, caplog):
"""MODEL_PROVIDER alone still resolves the model (back-compat: canvas
Save+Restart, secret-mint, existing persona env files keep working) but
logs a one-time deprecation pointing at the misnomer."""
config_yaml = tmp_path / "config.yaml"
config_yaml.write_text(yaml.dump({"model": "openai:gpt-4o"}))
monkeypatch.setenv("MODEL_PROVIDER", "MiniMax-M2.7-highspeed")
with caplog.at_level(logging.WARNING):
cfg = load_config(str(tmp_path))
assert cfg.model == "MiniMax-M2.7-highspeed"
assert cfg.runtime_config.model == "MiniMax-M2.7-highspeed"
assert any(
"MODEL_PROVIDER" in r.getMessage() and "deprecated" in r.getMessage()
for r in caplog.records
)
def test_no_deprecation_when_MODEL_is_set(tmp_path, monkeypatch, caplog):
"""When MODEL is set, MODEL_PROVIDER is ignored entirely and NOT warned
about a workspace that already does it right shouldn't get nagged."""
config_yaml = tmp_path / "config.yaml"
config_yaml.write_text(yaml.dump({"model": "openai:gpt-4o"}))
monkeypatch.setenv("MODEL", "opus")
monkeypatch.setenv("MODEL_PROVIDER", "claude-code")
with caplog.at_level(logging.WARNING):
cfg = load_config(str(tmp_path))
assert cfg.model == "opus"
assert not any("MODEL_PROVIDER" in r.getMessage() for r in caplog.records)
def test_runtime_config_model_picks_up_env_via_top_level(tmp_path, monkeypatch):
"""End-to-end path the canvas Save+Restart relies on: user picks
a model workspace_secrets.MODEL_PROVIDER updated CP user-data