-
Notifications
You must be signed in to change notification settings - Fork 445
/
docker-compose.yml
79 lines (76 loc) · 3.12 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
version: "3"
services:
backend:
build:
context: ./backend
dockerfile: Dockerfile
volumes:
- ./backend:/code
environment:
- NEO4J_URI=${NEO4J_URI-neo4j://database:7687}
- NEO4J_PASSWORD=${NEO4J_PASSWORD-password}
- NEO4J_USERNAME=${NEO4J_USERNAME-neo4j}
- OPENAI_API_KEY=${OPENAI_API_KEY-}
- DIFFBOT_API_KEY=${DIFFBOT_API_KEY-}
- EMBEDDING_MODEL=${EMBEDDING_MODEL-all-MiniLM-L6-v2}
- LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-}
- LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-}
- LANGCHAIN_PROJECT=${LANGCHAIN_PROJECT-}
- LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY-}
- KNN_MIN_SCORE=${KNN_MIN_SCORE-0.94}
- IS_EMBEDDING=${IS_EMBEDDING-true}
- GEMINI_ENABLED=${GEMINI_ENABLED-False}
- GCP_LOG_METRICS_ENABLED=${GCP_LOG_METRICS_ENABLED-False}
- UPDATE_GRAPH_CHUNKS_PROCESSED=${UPDATE_GRAPH_CHUNKS_PROCESSED-20}
- NUMBER_OF_CHUNKS_TO_COMBINE=${NUMBER_OF_CHUNKS_TO_COMBINE-6}
- ENTITY_EMBEDDING=${ENTITY_EMBEDDING-False}
- GCS_FILE_CACHE=${GCS_FILE_CACHE-False}
# - LLM_MODEL_CONFIG_anthropic_claude_35_sonnet=${LLM_MODEL_CONFIG_anthropic_claude_35_sonnet-}
# - LLM_MODEL_CONFIG_fireworks_llama_v3_70b=${LLM_MODEL_CONFIG_fireworks_llama_v3_70b-}
# - LLM_MODEL_CONFIG_azure_ai_gpt_4o=${LLM_MODEL_CONFIG_azure_ai_gpt_4o-}
# - LLM_MODEL_CONFIG_azure_ai_gpt_35=${LLM_MODEL_CONFIG_azure_ai_gpt_35-}
# - LLM_MODEL_CONFIG_groq_llama3_70b=${LLM_MODEL_CONFIG_groq_llama3_70b-}
# - LLM_MODEL_CONFIG_bedrock_claude_3_5_sonnet=${LLM_MODEL_CONFIG_bedrock_claude_3_5_sonnet-}
# - LLM_MODEL_CONFIG_fireworks_qwen_72b=${LLM_MODEL_CONFIG_fireworks_qwen_72b-}
- LLM_MODEL_CONFIG_ollama_llama3=${LLM_MODEL_CONFIG_ollama_llama3-}
# env_file:
# - ./backend/.env
container_name: backend
extra_hosts:
- host.docker.internal:host-gateway
ports:
- "8000:8000"
networks:
- net
frontend:
depends_on:
- backend
build:
context: ./frontend
dockerfile: Dockerfile
args:
- VITE_BACKEND_API_URL=${VITE_BACKEND_API_URL-http://localhost:8000}
- VITE_REACT_APP_SOURCES=${VITE_REACT_APP_SOURCES-local,wiki,s3}
- VITE_GOOGLE_CLIENT_ID=${VITE_GOOGLE_CLIENT_ID-}
- VITE_BLOOM_URL=${VITE_BLOOM_URL-https://workspace-preview.neo4j.io/workspace/explore?connectURL={CONNECT_URL}&search=Show+me+a+graph&featureGenAISuggestions=true&featureGenAISuggestionsInternal=true}
- VITE_TIME_PER_PAGE=${VITE_TIME_PER_PAGE-50}
- VITE_CHUNK_SIZE=${VITE_CHUNK_SIZE-5242880}
- VITE_LARGE_FILE_SIZE=${VITE_LARGE_FILE_SIZE-5242880}
- VITE_ENV=${VITE_ENV-DEV}
- VITE_CHAT_MODES=${VITE_CHAT_MODES-}
- VITE_BATCH_SIZE=${VITE_BATCH_SIZE-2}
- VITE_LLM_MODELS=${VITE_LLM_MODELS-}
- VITE_LLM_MODELS_PROD=${VITE_LLM_MODELS_PROD-openai_gpt_4o,openai_gpt_4o_mini,diffbot,gemini_1.5_flash}
- DEPLOYMENT_ENV=local
volumes:
- ./frontend:/app
- /app/node_modules
env_file:
- ./frontend/.env
container_name: frontend
ports:
- "8080:8080"
networks:
- net
networks:
net: