version: "3.9" networks: llm_network: driver: bridge services: nginx: container_name: ${APP_PREFIX}_reverseproxy image: nginxinc/nginx-unprivileged #:1.25-alpine restart: always ports: - "81:8080" volumes: - type: bind source: ./nginx.conf target: /etc/nginx/nginx.conf networks: - llm_network #minio: # container_name: ${APP_PREFIX}_minio # #image: docker.io/bitnami/minio #:2022 # image: minio/minio # ports: # - '9000:9000' # - '9001:9001' # networks: # - llm_network # volumes: # - 'minio_data:/data' # environment: # - MINIO_ROOT_USER=root # - MINIO_ROOT_PASSWORD=rootrootroot # - MINIO_DEFAULT_BUCKETS=defaultbucket # command: server --console-address ":9001" /data elasticsearch: container_name: ${APP_PREFIX}_elasticsearch image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0 #image: opensearchproject/opensearch restart: always mem_limit: 4024m ports: - "9200:9200" environment: - discovery.type=single-node - logger.level=ERROR - bootstrap.memory_lock=true # Disable JVM heap memory swapping - xpack.security.enabled=false #- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_INITIAL_ADMIN_PASSWORD} # Sets the demo admin user password when using demo configuration, required for OpenSearch 2.12 and later #- "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" # Set min and max JVM heap sizes to at least 50% of system RAM #- ES_JAVA_OPTS="-Xms2g -Xmx2g" volumes: - esdata:/usr/share/elasticsearch/data ulimits: memlock: soft: -1 # Set memlock to unlimited (no soft or hard limit) hard: -1 nofile: soft: 65536 # Maximum number of open files for the opensearch user - set to at least 65536 hard: 65536 networks: - llm_network ollama: container_name: ${APP_PREFIX}_ollama image: ollama/ollama:latest restart: always ports: - "11434:11434" volumes: - ..:/code - ../ollama/ollama:/root/.ollama networks: - llm_network #command: "ollama pull llama2" ollama-webui: image: ghcr.io/ollama-webui/ollama-webui:main container_name: ollama-webui volumes: - ../ollama/ollama-webui:/app/backend/data depends_on: - ollama ports: - 8888:8080 environment: - '/ollama/api=http://ollama:11434/api' extra_hosts: - host.docker.internal:host-gateway restart: unless-stopped networks: - llm_network #frontend: # container_name: ${APP_PREFIX}_frontend # image: ${APP_PREFIX}-vue-frontend # restart: always # ports: # - "3000:3000" # build: ./frontend backend: container_name: ${APP_PREFIX}_backend image: registry.tobiasweise.dev/${APP_PREFIX}-python-backend restart: always ports: - "5000:5000" env_file: - ../backend/backend.env build: ../backend networks: - llm_network depends_on: - elasticsearch - ollama #- minio volumes: esdata: driver: local filedata: minio_data: driver: local