Docker support and Ollama support (#47)

- Added support for running CLI and Ollama server via Docker
- Introduced tests for local embeddings model and standalone Docker setup
- Enabled conditional Ollama server launch via LLM_PROVIDER
This commit is contained in:
Geeta Chauhan
2025-06-25 20:57:05 -07:00
committed by GitHub
parent 7abff0f354
commit 78ea029a0b
23 changed files with 2141 additions and 19 deletions

View File

@@ -151,6 +151,8 @@ def select_shallow_thinking_agent(provider) -> str:
],
"ollama": [
("llama3.2 local", "llama3.2"),
("qwen3 small local", "qwen3:0.6b"),
("deepseek-r1 local", "deepseek-r1:1.5b"),
]
}
@@ -211,7 +213,9 @@ def select_deep_thinking_agent(provider) -> str:
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
],
"ollama": [
("qwen3", "qwen3"),
("qwen3 local", "qwen3"),
("qwen3 small local", "qwen3:0.6b"),
("deepseek-r1 local", "deepseek-r1:1.5b"),
]
}