elasticsearch/example/elasticsearch.nomad.hcl

370 lines
9.1 KiB
HCL

job "elasticsearch" {
datacenters = ["dc1"]
region = "global"
group "server" {
count = 3
constraint {
operator = "distinct_hosts"
value = "true"
}
# Wait a few seconds between service deregistration from consul catalog and task kill
shutdown_delay = "6s"
network {
mode = "bridge"
port "transport" {}
}
volume "data" {
source = "elasticsearch-data"
type = "csi"
access_mode = "single-node-writer"
attachment_mode = "file-system"
per_alloc = true
}
# The main elasticsearch service. It'll only be available through the mesh
service {
name = "elasticsearch"
port = 9200
meta {
addr = "es-${NOMAD_ALLOC_INDEX}.elasticsearch.service.consul"
alloc = "${NOMAD_ALLOC_INDEX}"
datacenter = "${NOMAD_DC}"
group = "${NOMAD_GROUP_NAME}"
job = "${NOMAD_JOB_NAME}"
namespace = "${NOMAD_NAMESPACE}"
node = "${node.unique.name}"
region = "${NOMAD_REGION}"
}
connect {
sidecar_service {
}
sidecar_task {
config {
args = [
"-c",
"${NOMAD_SECRETS_DIR}/envoy_bootstrap.json",
"-l",
"${meta.connect.log_level}",
"--concurrency",
"${meta.connect.proxy_concurrency}",
"--disable-hot-restart"
]
}
resources {
cpu = 50
memory = 64
}
}
}
# Use a script check instead of http so we can report ok for green, warning for yellow and critical for any other state
check {
name = "health"
type = "script"
command = "sh"
task = "server"
args = [
"-c",
"set -e; STATUS=$(curl localhost:9200/_cluster/health?local=true | jq -r .status); if [ \"$STATUS\" == \"green\" ]; then exit 0; elif [ \"$STATUS\" == \"yellow\" ]; then exit 1; else exit 2; fi"
]
interval = "30s"
timeout = "8s"
}
tags = [
"es-${NOMAD_ALLOC_INDEX}",
]
}
# The transport service is used for the différent instances to find each others and form the cluster
service {
name = "elasticsearch-transport"
port = "transport"
meta {
alloc = "${NOMAD_ALLOC_INDEX}"
}
}
# The main Elasticsearch task
task "server" {
driver = "docker"
leader = true
# Give ES some time to shutdown
kill_timeout = "120s"
config {
image = "danielberteaud/elasticsearch:8.13.2-1"
pids_limit = 1024
readonly_rootfs = true
volumes = [
"secrets/entrypoint.env:/entrypoint.d/94-elasticsearch-users.env"
]
mount {
type = "tmpfs"
target = "/tmp"
tmpfs_options {
size = 2000000
}
}
}
vault {
policies = ["elasticsearch"]
env = false
disable_file = true
change_mode = "noop"
}
env {
# Use /local/tmp as rootfs is read only
ES_TMPDIR = "/local/tmp"
TMPDIR = "/local/tmp"
ES_PATH_CONF = "/secrets"
}
# Use a template block instead of env {} so we can fetch values from vault
template {
data = <<_EOT
JVM_XMX_RATIO=0.4
LANG=fr_FR.utf8
TZ=Europe/Paris
_EOT
destination = "secrets/.env"
perms = 400
env = true
}
# The main configuration file
template {
data = <<_EOT
path:
data: /data
logs: /alloc/logs
cluster:
name: elasticsearch
initial_master_nodes:
- elasticsearch-0
- elasticsearch-1
- elasticsearch-2
node:
name: elasticsearch-{{ env "NOMAD_ALLOC_INDEX" }}
roles:
- master
- data
- ingest
- transform
http:
port: 9200
host: 127.0.0.1
transport:
port: {{ env "NOMAD_ALLOC_PORT_transport" }}
host: 0.0.0.0
publish_port: {{ env "NOMAD_HOST_PORT_transport" }}
publish_host: {{ env "NOMAD_HOST_IP_transport" }}
discovery:
seed_providers: file
xpack:
watcher:
enabled: false
security:
enabled: true
authc:
anonymous:
username: anonymous
roles: health
transport:
ssl:
enabled: True
verification_mode: full
client_authentication: required
key: /secrets/es.key
certificate: /secrets/es.crt
certificate_authorities: /secrets/ca.crt
_EOT
destination = "secrets/elasticsearch.yml"
}
# This is the list of nodes. It's updated when an instance is restarted, but elasticsearch
# already monitors and reloads it automatically, so make it a noop for Nomad
template {
data = <<_EOT
{{ range $index, $instance := service "elasticsearch-transport" }}
{{- if not (eq (env "NOMAD_ALLOC_INDEX") (index $instance.ServiceMeta "alloc")) }}
{{ .Address }}:{{ .Port }}
{{- end }}
{{- end }}
_EOT
destination = "secrets/unicast_hosts.txt"
change_mode = "noop"
}
# Custom roles
template {
data = <<_EOT
monitor:
cluster:
- monitor
indices:
- names:
- '*'
privileges:
- monitor
health:
cluster:
- 'cluster:monitor/health'
healthdata-services:
indices:
- names:
- eht-scandmweb-surgery
privileges:
- all
stock-services:
indices:
- names:
- eht-scandmweb-purchase-requests
- eht-scandmweb-inventory
- eht-scandmweb-stock
privileges:
- all
_EOT
destination = "secrets/roles.yml"
}
# An entrypoint snippet to create users
# Note : created with .env extension so it's sourced in the current shell
# as it cannot be executed from /secrets which is mounted with noexec
template {
data = <<_EOT
#!/bin/sh
# vim: syntax=sh
set -euo pipefail
# ES is using ES_JAVA_HOME, and having JAVA_HOME set split warnings in logs
unset JAVA_HOME
echo "Adding elastic bootstrap password in the keystore"
[ -f config/elasticsearch.keystore ] || elasticsearch-keystore create
echo '{{ with secret "kv/service/elasticsearch" }}{{ .Data.data.es_bootstrap_pwd }}{{ end }}' | elasticsearch-keystore add -x 'bootstrap.password'
{{ with secret "kv/service/elasticsearch" -}}
echo "Creating exporter user"
(elasticsearch-users list | grep -qE '^exporter$') || elasticsearch-users useradd exporter -r monitor -p '{{ .Data.data.exporter_pwd }}'
{{- end }}
echo "Creating user healthdata-services"
(elasticsearch-users list | grep -qE '^healthdata-services$') || elasticsearch-users useradd healthdata-services -p '{{ with secret "kv/service/elasticsearch" }}{{ .Data.data.healthdata_services_pwd }}{{ end }}' -r healthdata-services
echo "Creating user stock-services"
(elasticsearch-users list | grep -qE '^stock-services$') || elasticsearch-users useradd stock-services -p '{{ with secret "kv/service/elasticsearch" }}{{ .Data.data.stock_services_pwd }}{{ end }}' -r stock-services
_EOT
destination = "secrets/entrypoint.env"
uid = 100000
gid = 109200
perms = "0440"
}
# The certificate and private key for ES.
# ES doesn't support PEM bundles with both the cert and the key in the same file. So we create 2 files
template {
data = <<_EOT
{{ with secret "pki/elasticsearch/issue/server"
"common_name=elasticsearch.service.consul"
(printf "alt_names=es-%s.elasticsearch.service.consul" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.certificate }}
{{ end }}
_EOT
destination = "secrets/es.crt"
# ES monitors and reload cert every 5 sec, make it a noop on Nomad
change_mode = "noop"
}
# The private key
template {
data = <<_EOT
{{ with secret "pki/elasticsearch/issue/server"
"common_name=elasticsearch.service.consul"
(printf "alt_names=es-%s.elasticsearch.service.consul" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.private_key }}
{{ end }}
_EOT
destination = "secrets/es.key"
# ES monitors and reload the key every 5 sec, make it a noop on Nomad
change_mode = "noop"
uid = 100000
gid = 109200
perms = "0440"
}
# The trusted CA to verify other nodes
template {
data = <<_EOT
{{ with secret "pki/elasticsearch/cert/ca_chain" }}{{ .Data.ca_chain }}{{ end }}
_EOT
destination = "secrets/ca.crt"
change_mode = "noop"
}
# Data is stored in /data
volume_mount {
volume = "data"
destination = "/data"
}
resources {
cpu = 300
memory = 2048
}
}
}
}