Files
localgenai/pyinfra/framework/compose/openwebui.yml
noisedestroyers a29793032d Document current coding-workflow stack state
Snapshot of where opencode + Qwen3-Coder + MCPs + Kimi-Linear + voice
  + Phoenix tracing land today, plus in-flight (oc-tree, kimi-linear
  context ramp) and next (ComfyUI) items with pointers to per-project
  NEXT_STEPS.md guides.
2026-05-10 21:14:43 -04:00

31 lines
1.3 KiB
YAML

# OpenWebUI — ChatGPT-like web UI in front of Ollama. Pre-configured to
# use the host's Ollama instance and the project's SearXNG for web
# search. Default port 3000.
#
# Persistent state (users, conversations, uploaded docs, RAG vector
# index) lives at /srv/docker/openwebui/data so backups touch one path.
services:
openwebui:
image: ghcr.io/open-webui/open-webui:main
container_name: openwebui
restart: unless-stopped
ports:
- "3000:8080"
extra_hosts:
# Lets the container reach Ollama on the host's :11434 without
# needing to share Docker networks.
- "host.docker.internal:host-gateway"
environment:
- OLLAMA_BASE_URL=http://host.docker.internal:11434
# vLLM (Kimi-Linear) exposed as an OpenAI-compatible backend. The
# model isn't strongly tool-trained — opencode's agentic system
# prompt confuses it. OpenWebUI's plain chat UI is the right home.
- OPENAI_API_BASE_URLS=http://host.docker.internal:8000/v1
- OPENAI_API_KEYS=dummy
# Built-in web search via the project's SearXNG instance.
- ENABLE_RAG_WEB_SEARCH=true
- RAG_WEB_SEARCH_ENGINE=searxng
- SEARXNG_QUERY_URL=https://searxng.n0n.io/search?q=<query>&format=json
volumes:
- /srv/docker/openwebui/data:/app/backend/data