From b8b3d5ce1f89cb968bafbf896a2c577a6e6a46a8 Mon Sep 17 00:00:00 2001 From: Hongming Wang Date: Tue, 21 Apr 2026 10:33:27 -0700 Subject: [PATCH] fix(e2e): MODEL_PROVIDER is provider:model slug, not just provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit workspace/config.py:258 reads MODEL_PROVIDER as the full model string (format 'provider:model', e.g. 'anthropic:claude-opus-4-7'). My prior 'openai' alone got parsed as the model name → 404 model_not_found. Use 'openai:gpt-4o' and also set OPENAI_BASE_URL to api.openai.com (default was openrouter.ai which takes different key format). Co-Authored-By: Claude Opus 4.7 (1M context) --- tests/e2e/test_staging_full_saas.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/e2e/test_staging_full_saas.sh b/tests/e2e/test_staging_full_saas.sh index b1d78345..87a44ce3 100755 --- a/tests/e2e/test_staging_full_saas.sh +++ b/tests/e2e/test_staging_full_saas.sh @@ -238,11 +238,12 @@ tenant_call() { # expected and actionable. SECRETS_JSON='{}' if [ -n "${E2E_OPENAI_API_KEY:-}" ]; then - # MODEL_PROVIDER=openai forces Hermes's resolver to pick the OpenAI - # path. Without it Hermes defaults to Claude (resolution order puts - # anthropic before openai) and you get 404 model_not_found because - # the OpenAI endpoint doesn't serve claude-sonnet-* models. - SECRETS_JSON="{\"OPENAI_API_KEY\":\"$E2E_OPENAI_API_KEY\",\"MODEL_PROVIDER\":\"openai\"}" + # MODEL_PROVIDER is a full model slug in 'provider:model' format per + # workspace/config.py:258. Using just "openai" gets parsed as the + # model name → 404 model_not_found. Also set OPENAI_BASE_URL to + # OpenAI's own endpoint — default is openrouter.ai which would need + # a different key format. + SECRETS_JSON="{\"OPENAI_API_KEY\":\"$E2E_OPENAI_API_KEY\",\"OPENAI_BASE_URL\":\"https://api.openai.com/v1\",\"MODEL_PROVIDER\":\"openai:gpt-4o\"}" fi log "5/11 Provisioning parent workspace (runtime=$RUNTIME)..."