services: ollama: image: 'ollama/ollama:latest' volumes: - ollama_data:/root/.ollama ports: - '11434:11434' mem_limit: 4g memswap_limit: 16g deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] # healthcheck: # test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"] # interval: 10s # timeout: 5s # retries: 3 # start_period: 20s # ollama-pull: # image: 'ollama/ollama:latest' # depends_on: # ollama: # condition: service_healthy # volumes: # - ollama_data:/root/.ollama # environment: # - OLLAMA_HOST=http://ollama:11434 # command: ollama pull llama3.2:latest nomic-embed-text # restart: "no" pgvector: image: 'pgvector/pgvector:pg16' environment: - 'POSTGRES_DB=gcp_docs' - 'POSTGRES_PASSWORD=password' - 'POSTGRES_USER=admin' ports: - '15432:5432' volumes: - ./schema.sql:/docker-entrypoint-initdb.d/schema.sql - ./pgdata:/var/lib/postgresql/data volumes: pgdata: ollama_data: