feat: make Ollama model configurable via OLLAMA_MODEL env var
- Add OLLAMA_MODEL setting to shared config (default: qwen3:32b) - LLM router reads from settings instead of hardcoded model name - Create .env file with all configurable settings documented - docker-compose passes OLLAMA_MODEL to llm-pool container To change the model: edit OLLAMA_MODEL in .env and restart llm-pool. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -32,7 +32,7 @@ _model_list: list[dict] = [
|
||||
{
|
||||
"model_name": "local",
|
||||
"litellm_params": {
|
||||
"model": "ollama/qwen3:32b",
|
||||
"model": f"ollama/{settings.ollama_model}",
|
||||
"api_base": settings.ollama_base_url,
|
||||
},
|
||||
},
|
||||
@@ -40,7 +40,7 @@ _model_list: list[dict] = [
|
||||
{
|
||||
"model_name": "fast",
|
||||
"litellm_params": {
|
||||
"model": "ollama/qwen3:32b",
|
||||
"model": f"ollama/{settings.ollama_model}",
|
||||
"api_base": settings.ollama_base_url,
|
||||
},
|
||||
},
|
||||
@@ -48,7 +48,7 @@ _model_list: list[dict] = [
|
||||
{
|
||||
"model_name": "economy",
|
||||
"litellm_params": {
|
||||
"model": "ollama/qwen3:32b",
|
||||
"model": f"ollama/{settings.ollama_model}",
|
||||
"api_base": settings.ollama_base_url,
|
||||
},
|
||||
},
|
||||
@@ -56,7 +56,7 @@ _model_list: list[dict] = [
|
||||
{
|
||||
"model_name": "balanced",
|
||||
"litellm_params": {
|
||||
"model": "ollama/qwen3:32b",
|
||||
"model": f"ollama/{settings.ollama_model}",
|
||||
"api_base": settings.ollama_base_url,
|
||||
},
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user