Files
konstruct/docker-compose.yml
Adolfo Delorenzo ebf6e76174 feat: make Ollama model configurable via OLLAMA_MODEL env var
- Add OLLAMA_MODEL setting to shared config (default: qwen3:32b)
- LLM router reads from settings instead of hardcoded model name
- Create .env file with all configurable settings documented
- docker-compose passes OLLAMA_MODEL to llm-pool container

To change the model: edit OLLAMA_MODEL in .env and restart llm-pool.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 13:22:18 -06:00

203 lines
6.7 KiB
YAML

networks:
konstruct-net:
driver: bridge
volumes:
postgres_data:
redis_data:
services:
postgres:
image: pgvector/pgvector:pg16
container_name: konstruct-postgres
environment:
POSTGRES_DB: konstruct
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres_dev
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/init-db.sh:/docker-entrypoint-initdb.d/init-db.sh:ro
networks:
- konstruct-net
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres -d konstruct"]
interval: 5s
timeout: 5s
retries: 10
redis:
image: redis:7-alpine
container_name: konstruct-redis
command: redis-server --save 60 1 --loglevel warning
volumes:
- redis_data:/data
networks:
- konstruct-net
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 5s
retries: 10
# Ollama runs on the host (port 11434) — containers access it via host.docker.internal
llm-pool:
build:
context: .
dockerfile_inline: |
FROM python:3.12-slim
WORKDIR /app
RUN apt-get update -qq && apt-get install -y -qq git curl > /dev/null 2>&1
RUN pip install uv
COPY pyproject.toml ./
COPY packages/shared ./packages/shared
COPY packages/llm-pool ./packages/llm-pool
RUN uv pip install --system torch --index-url https://download.pytorch.org/whl/cpu
RUN uv pip install --system -e packages/shared -e "packages/llm-pool"
CMD ["uvicorn", "llm_pool.main:app", "--host", "0.0.0.0", "--port", "8004"]
container_name: konstruct-llm-pool
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- konstruct-net
depends_on:
redis:
condition: service_healthy
environment:
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://host.docker.internal:11434}
- OLLAMA_MODEL=${OLLAMA_MODEL:-qwen3:32b}
- REDIS_URL=redis://redis:6379/0
- LOG_LEVEL=${LOG_LEVEL:-INFO}
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8004/health || exit 1"]
interval: 10s
timeout: 5s
retries: 5
portal:
build:
context: packages/portal
dockerfile_inline: |
FROM node:22-alpine AS base
WORKDIR /app
COPY package.json package-lock.json* ./
RUN npm ci --production=false
COPY . .
ENV NEXT_PUBLIC_API_URL=http://100.64.0.10:8001
RUN npm run build
FROM node:22-alpine AS runner
WORKDIR /app
ENV NODE_ENV=production
COPY --from=base /app/.next/standalone ./
COPY --from=base /app/.next/static ./.next/static
COPY --from=base /app/public ./public
EXPOSE 3000
CMD ["node", "server.js"]
container_name: konstruct-portal
ports:
- "3000:3000"
networks:
- konstruct-net
environment:
- NODE_ENV=production
- API_URL=http://gateway:8001
- NEXT_PUBLIC_API_URL=http://100.64.0.10:8001
- AUTH_SECRET=${AUTH_SECRET:-insecure-dev-secret-change-in-production}
- AUTH_URL=http://100.64.0.10:3000
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "wget -q --spider http://localhost:3000 || exit 1"]
interval: 10s
timeout: 5s
retries: 5
gateway:
build:
context: .
dockerfile_inline: |
FROM python:3.12-slim
WORKDIR /app
RUN apt-get update -qq && apt-get install -y -qq git curl > /dev/null 2>&1
RUN pip install uv
COPY pyproject.toml ./
COPY packages/shared ./packages/shared
COPY packages/router ./packages/router
COPY packages/gateway ./packages/gateway
COPY packages/orchestrator ./packages/orchestrator
COPY migrations ./migrations
COPY alembic.ini ./
RUN uv pip install --system torch --index-url https://download.pytorch.org/whl/cpu
RUN uv pip install --system -e packages/shared -e packages/router -e packages/gateway -e packages/orchestrator
CMD ["sh", "-c", "alembic upgrade head && uvicorn gateway.main:app --host 0.0.0.0 --port 8001"]
container_name: konstruct-gateway
ports:
- "8001:8001"
networks:
- konstruct-net
depends_on:
redis:
condition: service_healthy
postgres:
condition: service_healthy
celery-worker:
condition: service_started
environment:
- DATABASE_URL=postgresql+asyncpg://konstruct_app:konstruct_dev@postgres:5432/konstruct
- DATABASE_ADMIN_URL=postgresql+asyncpg://postgres:postgres_dev@postgres:5432/konstruct
- REDIS_URL=redis://redis:6379/0
- CELERY_BROKER_URL=redis://redis:6379/1
- CELERY_RESULT_BACKEND=redis://redis:6379/2
- SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN:-}
- SLACK_SIGNING_SECRET=${SLACK_SIGNING_SECRET:-}
- SLACK_APP_TOKEN=${SLACK_APP_TOKEN:-}
- LOG_LEVEL=INFO
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8001/health || exit 1"]
interval: 10s
timeout: 5s
retries: 5
celery-worker:
build:
context: .
dockerfile_inline: |
FROM python:3.12-slim
WORKDIR /app
RUN apt-get update -qq && apt-get install -y -qq git curl > /dev/null 2>&1
RUN pip install uv
COPY pyproject.toml ./
COPY packages/shared ./packages/shared
COPY packages/router ./packages/router
COPY packages/gateway ./packages/gateway
COPY packages/orchestrator ./packages/orchestrator
RUN uv pip install --system torch --index-url https://download.pytorch.org/whl/cpu
RUN uv pip install --system -e packages/shared -e packages/router -e packages/gateway -e packages/orchestrator
CMD ["celery", "-A", "orchestrator.main", "worker", "--loglevel=info"]
container_name: konstruct-celery-worker
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- konstruct-net
depends_on:
redis:
condition: service_healthy
postgres:
condition: service_healthy
llm-pool:
condition: service_healthy
environment:
- DATABASE_URL=postgresql+asyncpg://konstruct_app:konstruct_dev@postgres:5432/konstruct
- REDIS_URL=redis://redis:6379/0
- CELERY_BROKER_URL=redis://redis:6379/1
- CELERY_RESULT_BACKEND=redis://redis:6379/2
- LLM_POOL_URL=http://llm-pool:8004
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- OLLAMA_BASE_URL=http://host.docker.internal:11434
- LOG_LEVEL=INFO
restart: unless-stopped