elasticsearch/elasticsearch.nomad.hcl

220 lines
6.3 KiB
HCL

job "[[ .instance ]]" {
[[ template "common/job_start" . ]]
group "server" {
[[ $c := merge .elasticsearch.server .elasticsearch . ]]
[[ template "common/group_start" $c ]]
network {
mode = "bridge"
port "transport" {}
[[- if conv.ToBool $c.prometheus.enabled ]]
port "metrics" {}
[[- end ]]
}
[[ template "common/volumes" $c ]]
# The main elasticsearch service. It'll only be available through the mesh
service {
name = "[[ .instance ]][[ .consul.suffix ]]"
port = 9200
[[ template "common/service_meta" $c ]]
[[ template "common/connect" $c ]]
# Use a script check instead of http so we can report ok for green, warning for yellow and critical for any other state
check {
name = "health"
type = "script"
command = "sh"
task = "server"
args = [
"-c",
"set -e; STATUS=$(curl localhost:9200/_cluster/health?local=true | jq -r .status); if [ \"$STATUS\" == \"green\" ]; then exit 0; elif [ \"$STATUS\" == \"yellow\" ]; then exit 1; else exit 2; fi"
]
interval = "[[ $c.consul.check.interval ]]"
timeout = "[[ $c.consul.check.timeout ]]"
}
tags = [
"es-${NOMAD_ALLOC_INDEX}",
[[ template "common/traefik_tags" $c ]]
]
}
# The transport service is used for the différent instances to find each others and form the cluster
service {
name = "[[ .instance ]]-transport[[ .consul.suffix ]]"
port = "transport"
meta {
alloc = "${NOMAD_ALLOC_INDEX}"
}
}
# The main Elasticsearch task
task "server" {
driver = "[[ $c.nomad.driver ]]"
leader = true
# Give ES some time to shutdown
kill_timeout = "120s"
config {
[[ template "common/image" $c ]]
pids_limit = 1024
volumes = [
"secrets/entrypoint.env:/entrypoint.d/94-elasticsearch-users.env"
]
[[ template "common/tmpfs" dict "size" "2000000" "target" "/tmp" ]]
}
[[ template "common/vault.policies" $c ]]
env {
# Use /local/tmp as rootfs is read only
ES_TMPDIR = "/local/tmp"
TMPDIR = "/local/tmp"
ES_PATH_CONF = "/secrets"
}
[[ template "common/file_env" $c ]]
# The main configuration file
template {
data =<<_EOT
[[ template "elasticsearch/elasticsearch.yml" $c ]]
_EOT
destination = "secrets/elasticsearch.yml"
}
# This is the list of nodes. It's updated when an instance is restarted, but elasticsearch
# already monitors and reloads it automatically, so make it a noop for Nomad
template {
data = <<_EOT
[[ template "elasticsearch/unicast_hosts.txt" $c ]]
_EOT
destination = "secrets/unicast_hosts.txt"
change_mode = "noop"
}
# Custom roles
template {
data = <<_EOT
[[ template "elasticsearch/roles.yml" $c ]]
_EOT
destination = "secrets/roles.yml"
}
# An entrypoint snippet to create users
# Note : created with .env extension so it's sourced in the current shell
# as it cannot be executed from /secrets which is mounted with noexec
template {
data = <<_EOT
[[ template "elasticsearch/entrypoint.env" $c ]]
_EOT
destination = "secrets/entrypoint.env"
uid = 100000
gid = 109200
perms = "0440"
}
# The certificate and private key for ES.
# ES doesn't support PEM bundles with both the cert and the key in the same file. So we create 2 files
template {
data = <<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/issue/server"
"common_name=[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]"
(printf "alt_names=es-%s.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.certificate }}
{{ end }}
_EOT
destination = "secrets/es.crt"
# ES monitors and reload cert every 5 sec, make it a noop on Nomad
change_mode = "noop"
}
# The private key
template {
data = <<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/issue/server"
"common_name=[[ .instance ]].service.[[ .consul.domain ]]"
(printf "alt_names=es-%s.[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.private_key }}
{{ end }}
_EOT
destination = "secrets/es.key"
# ES monitors and reload the key every 5 sec, make it a noop on Nomad
change_mode = "noop"
uid = 100000
gid = 109200
perms = "0440"
}
# The trusted CA to verify other nodes
template {
data =<<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/cert/ca_chain" }}{{ .Data.ca_chain }}{{ end }}
_EOT
destination = "secrets/ca.crt"
change_mode = "noop"
}
# Data is stored in /data
volume_mount {
volume = "data"
destination = "/data"
}
[[ template "common/resources" $c ]]
}
[[- if conv.ToBool $c.prometheus.enabled ]]
[[- $e := merge .elasticsearch.exporter .elasticsearch . ]]
# Prometheus exporter
task "exporter" {
driver = "[[ $e.nomad.driver ]]"
lifecycle {
hook = "poststart"
sidecar = true
}
config {
[[ template "common/image" $e ]]
pids_limit = 100
command = "elasticsearch_exporter"
args = [
"--web.listen-address=0.0.0.0:${NOMAD_ALLOC_PORT_metrics}",
"--web.config.file=/local/web_tls.yml"
]
}
[[ template "common/vault.policies" $e ]]
[[ template "common/file_env" $e ]]
[[ template "common/metrics_cert" $c ]]
template {
data = <<_EOT
tls_server_config:
cert_file: /secrets/metrics.bundle.pem
key_file: /secrets/metrics.bundle.pem
client_auth_type: RequireAndVerifyClientCert
client_ca_file: /local/monitoring.ca.pem
_EOT
destination = "local/web_tls.yml"
}
[[ template "common/resources" $e ]]
}
[[- end ]]
}
}