--- # Name of this instance of immich instance: immich immich: # Immich version version: 1.105.1 postgres: database: '[[ .instance ]]' user: '{{ with secret "[[ .vault.root ]]database/creds/[[ .instance ]]" }}{{ .Data.username }}{{ end }}' password: '{{ with secret "[[ .vault.root ]]database/creds/[[ .instance ]]" }}{{ .Data.password }}{{ end }}' pooler: mode: session # API server settings server: # Docker image to use image: ghcr.io/immich-app/immich-server:v[[ .immich.version ]] # Additional env to set ni the container env: NODE_OPTIONS: --max-old-space-size={{ env "NOMAD_MEMORY_LIMIT" }} vault: policies: - '[[ .instance ]][[ .consul.suffix ]]' # Resource allocation resources: cpu: 300 memory: 320 memory_max: 512 # Wait for services to be ready before starting wait_for: - service: master.postgres[[ .consul.suffix ]] - service: '[[ .instance ]]-ml[[ .consul.suffix ]]' consul: connect: # Connect to some services through the mesh upstreams: - destination_name: '[[ .instance ]]-ml[[ .consul.suffix ]]' local_bind_port: 3003 config: protocol: http - destination_name: postgres[[ .consul.suffix ]] local_bind_port: 5432 - destination_name: '[[ .mail.smtp_service_name ]]' local_bind_port: 25 # The URL where Immich will be exposed to users public_url: https://immich.example.org # Controls how Traefik will expose the service traefik: # Immich needs a specific CSP csp: connect-src: "'self' https://maputnik.github.io https://*.cofractal.com https://fonts.openmaptiles.org" img-src: "'self' data: blob:" worker-src: "'self' blob:" middlewares: rate-limit: false # Use distinct Traefik settings for /share. This can be used for example to restrict the main app to trusted IP but allow /share from anywhere share: traefik: rule: 'Host(`[[ (urlParse .immich.server.public_url).Hostname ]]`) && PathRegexp(`^[[ (urlParse .immich.server.public_url).Path ]]/(share/|_app/immutable/|custom\\.css|api/(asset|server-info)/.*)`)' router: share # Volumes used for data storage volumes: data: type: csi source: '[[ .instance ]]-data' access_mode: multi-node-multi-writer # The microservices do the bulk of media handling (thumbnails etc.) microservices: # Docker image to use image: ghcr.io/immich-app/immich-server:v[[ .immich.version ]] # Env vars to set in the container env: NODE_OPTIONS: --max-old-space-size={{ env "NOMAD_MEMORY_LIMIT" }} vault: policies: - '[[ .instance ]][[ .consul.suffix ]]' # Resource allocation resources: cpu: 500 memory: 768 memory_max: 1200 # The machine learning machine_learning: # Machine learning is optional, and can be disabled enabled: true # The Docker image to use image: ghcr.io/immich-app/immich-machine-learning:v[[ .immich.version ]] # Environment var to set in the container env: {} # Resource allocation resources: cpu: 1024 memory: 512 memory_max: 1536 volumes: # Volume used for models cache ml: type: csi source: '[[ .instance ]]-ml' access_mode: multi-node-multi-writer # Redis task will use a common template # We just set custom resources allocation redis: resources: cpu: 20 memory: 64