fix: runtime deployment fixes for Docker Compose stack
- Add .gitignore for __pycache__, node_modules, .playwright-mcp - Add CLAUDE.md project instructions - docker-compose: remove host port exposure for internal services, remove Ollama container (use host), add CORS origin, bake NEXT_PUBLIC_API_URL at build time, run alembic migrations on gateway startup, add CPU-only torch pre-install - gateway: add CORS middleware, graceful Slack degradation without bot token, fix None guard on slack_handler - gateway pyproject: add aiohttp dependency for slack-bolt async - llm-pool pyproject: install litellm from GitHub (removed from PyPI), enable hatch direct references - portal: enable standalone output in next.config.ts - Remove orphaned migration 003_phase2_audit_kb.py (renamed to 004) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -9,13 +9,15 @@ description = "LLM Backend Pool — LiteLLM router for Ollama, vLLM, OpenAI, Ant
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
"konstruct-shared",
|
||||
# Pinned: do NOT upgrade past 1.82.5 — a September 2025 OOM regression exists
|
||||
# in later releases. Verify fix before bumping.
|
||||
"litellm==1.82.5",
|
||||
# litellm removed from PyPI — installing from GitHub
|
||||
"litellm @ git+https://github.com/BerriAI/litellm.git",
|
||||
"fastapi[standard]>=0.115.0",
|
||||
"httpx>=0.28.0",
|
||||
]
|
||||
|
||||
[tool.hatch.metadata]
|
||||
allow-direct-references = true
|
||||
|
||||
[tool.uv.sources]
|
||||
konstruct-shared = { workspace = true }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user