molecule-core/infra/litellm_config.yml
Hongming Wang 24fec62d7f initial commit — Molecule AI platform
Forked clean from public hackathon repo (Starfire-AgentTeam, BSL 1.1)
with full rebrand to Molecule AI under github.com/Molecule-AI/molecule-monorepo.

Brand: Starfire → Molecule AI.
Slug: starfire / agent-molecule → molecule.
Env vars: STARFIRE_* → MOLECULE_*.
Go module: github.com/agent-molecule/platform → github.com/Molecule-AI/molecule-monorepo/platform.
Python packages: starfire_plugin → molecule_plugin, starfire_agent → molecule_agent.
DB: agentmolecule → molecule.

History truncated; see public repo for prior commits and contributor
attribution. Verified green: go test -race ./... (platform), pytest
(workspace-template 1129 + sdk 132), vitest (canvas 352), build (mcp).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-13 11:55:37 -07:00

71 lines
3.1 KiB
YAML

# LiteLLM Proxy Configuration — Molecule AI
# ─────────────────────────────────────────────────────────────────────────────
# This file is mounted into the litellm container at /app/config.yaml.
# Workspace agents reach the proxy at http://litellm:4000 (Docker-internal).
#
# Usage:
# docker compose --profile multi-provider up
#
# Then in a workspace config.yaml, set:
# model: openai/claude-opus-4-5 # routed via LiteLLM → Anthropic
# model: openai/gpt-4o # routed via LiteLLM → OpenAI
# model: openai/ollama/llama3.2 # routed via LiteLLM → Ollama
#
# The workspace's OPENAI_BASE_URL env var must be set to http://litellm:4000
# and OPENAI_API_KEY to the value of LITELLM_MASTER_KEY below (default: sk-molecule).
# ─────────────────────────────────────────────────────────────────────────────
general_settings:
master_key: ${LITELLM_MASTER_KEY:-sk-molecule} # used as OPENAI_API_KEY by agents
model_list:
# ── Anthropic ──────────────────────────────────────────────────────────────
- model_name: claude-opus-4-5
litellm_params:
model: anthropic/claude-opus-4-5
api_key: ${ANTHROPIC_API_KEY}
- model_name: claude-sonnet-4-6
litellm_params:
model: anthropic/claude-sonnet-4-6
api_key: ${ANTHROPIC_API_KEY}
# ── OpenAI ─────────────────────────────────────────────────────────────────
- model_name: gpt-4o
litellm_params:
model: openai/gpt-4o
api_key: ${OPENAI_API_KEY}
- model_name: gpt-4o-mini
litellm_params:
model: openai/gpt-4o-mini
api_key: ${OPENAI_API_KEY}
# ── OpenRouter ─────────────────────────────────────────────────────────────
- model_name: openrouter/deepseek-r1
litellm_params:
model: openrouter/deepseek/deepseek-r1
api_key: ${OPENROUTER_API_KEY}
- model_name: openrouter/qwen3-235b
litellm_params:
model: openrouter/qwen/qwen3-235b-a22b
api_key: ${OPENROUTER_API_KEY}
# ── Ollama (local — requires --profile local-models) ───────────────────────
- model_name: ollama/llama3.2
litellm_params:
model: ollama/llama3.2
api_base: http://ollama:11434
- model_name: ollama/qwen2.5-coder
litellm_params:
model: ollama/qwen2.5-coder:7b
api_base: http://ollama:11434
litellm_settings:
# Drop unsupported params silently (e.g. some models don't support temperature)
drop_params: true
# Request timeout in seconds
request_timeout: 120