New bundle for an Elasticsearch cluster

This commit is contained in:
Daniel Berteaud 2024-01-25 14:52:55 +01:00
parent 76cf1770f4
commit cc3358883b
21 changed files with 741 additions and 0 deletions

4
bundles.yml Normal file
View File

@ -0,0 +1,4 @@
---
dependencies:
- url: ../common.git

View File

@ -0,0 +1,3 @@
Kind = "service-defaults"
Name = "[[ .instance ]][[ .consul.suffix ]]"
Protocol = "http"

View File

@ -0,0 +1,15 @@
Kind = "service-intentions"
Name = "[[ .instance ]][[ .consul.suffix ]]"
Sources = [
{
Name = "[[ (merge .elasticsearch.server .elasticsearch .).traefik.instance ]]"
Permissions = [
{
Action = "allow"
HTTP {
PathPrefix = "/"
}
}
]
},
]

View File

@ -0,0 +1,3 @@
Kind = "service-resolver"
Name = "[[ .instance ]][[ .consul.suffix ]]"
RequestTimeout = "60m"

213
elasticsearch.nomad.hcl Normal file
View File

@ -0,0 +1,213 @@
job "[[ .instance ]]" {
[[ template "common/job_start" . ]]
group "server" {
[[ $c := merge .elasticsearch.server .elasticsearch . ]]
count = [[ $c.count ]]
# Wait a few seconds between service deregistration from consul catalog and task kill
shutdown_delay = "6s"
network {
mode = "bridge"
port "transport" {}
[[- if $c.prometheus.enabled ]]
port "metrics" {}
[[- end ]]
}
[[ template "common/volumes" $c ]]
# The main elasticsearch service. It'll only be available through the mesh
service {
name = "[[ .instance ]][[ .consul.suffix ]]"
port = 9200
[[ template "common/metrics_meta" $c ]]
[[ template "common/connect" $c ]]
# Use a script check instead of http so we can report ok for green, warning for yellow and critical for any other state
check {
name = "health"
type = "script"
command = "sh"
task = "server"
args = [
"-c",
"set -e; STATUS=$(curl localhost:9200/_cluster/health?local=true | jq -r .status); if [ \"$STATUS\" == \"green\" ]; then exit 0; elif [ \"$STATUS\" == \"yellow\" ]; then exit 1; else exit 2; fi"
]
interval = "30s"
timeout = "8s"
}
# TODO : add tags for Traefik if enabled
tags = [
"[[ .instance ]]-${NOMAD_ALLOC_INDEX}"
]
}
# The transport service is used for the différent instances to find each others and form the cluster
service {
name = "[[ .instance ]]-transport[[ .consul.suffix ]]"
port = "transport"
meta {
# Publish the alloc index in service metadata
alloc = "${NOMAD_ALLOC_INDEX}"
}
}
# The main Elasticsearch task
task "server" {
driver = "[[ $c.nomad.driver ]]"
leader = true
# Give ES some time to shutdown
kill_timeout = "120s"
config {
image = "[[ $c.image ]]"
pids_limit = 1024
readonly_rootfs = true
volumes = [
"secrets/entrypoint.env:/entrypoint.d/94-elasticsearch-users.env"
]
[[ template "common/tmpfs" dict "size" "2000000" "target" "/tmp" ]]
}
[[ template "common/vault.policies" $c ]]
env {
# Use /local/tmp as rootfs is read only
ES_TMPDIR = "/local/tmp"
TMPDIR = "/local/tmp"
ES_PATH_CONF = "/secrets"
}
[[ template "common/file_env" $c ]]
# The main configuration file
template {
data =<<_EOT
[[ template "elasticsearch/elasticsearch.yml" $c ]]
_EOT
destination = "secrets/elasticsearch.yml"
}
# This is the list of nodes. It's updated when an instance is restarted, but elasticsearch
# already monitors and reloads it automatically, so make it a noop for Nomad
template {
data = <<_EOT
[[ template "elasticsearch/unicast_hosts.txt" $c ]]
_EOT
destination = "secrets/unicast_hosts.txt"
change_mode = "noop"
}
# Custom roles
template {
data = <<_EOT
[[ template "elasticsearch/roles.yml" $c ]]
_EOT
destination = "secrets/roles.yml"
}
# An entrypoint snippet to create users
# Note : created with .env extension so it's sourced in the current shell
# as it cannot be executed from /secrets which is mounted with noexec
template {
data = <<_EOT
[[ template "elasticsearch/entrypoint.env" $c ]]
_EOT
destination = "secrets/entrypoint.env"
uid = 100000
gid = 109200
perms = "0440"
}
# The certificate and private key for ES.
# ES doesn't support PEM bundles with both the cert and the key in the same file. So we create 2 files
template {
data = <<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/issue/server"
"common_name=[[ .instance ]].service.[[ .consul.domain ]]"
(printf "alt_names=es-%s.[[ .instance ]].service.[[ .consul.domain ]]" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.certificate }}
{{ end }}
_EOT
destination = "secrets/es.crt"
# ES monitors and reload cert every 5 sec, make it a noop on Nomad
change_mode = "noop"
}
# The private key
template {
data = <<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/issue/server"
"common_name=[[ .instance ]].service.[[ .consul.domain ]]"
(printf "alt_names=es-%s.[[ .instance ]].service.[[ .consul.domain ]]" (env "NOMAD_ALLOC_INDEX"))
(printf "ip_sans=%s,127.0.0.1" (env "NOMAD_HOST_IP_transport"))
(printf "ttl=%dh" (env "NOMAD_ALLOC_INDEX" | parseInt | multiply 24 | add 72)) }}
{{ .Data.private_key }}
{{ end }}
_EOT
destination = "secrets/es.key"
# ES monitors and reload the key every 5 sec, make it a noop on Nomad
change_mode = "noop"
uid = 100000
gid = 109200
perms = "0440"
}
# The trusted CA to verify other nodes
template {
data =<<_EOT
{{ with secret "[[ $c.vault.pki.path ]]/cert/ca_chain" }}{{ .Data.ca_chain }}{{ end }}
_EOT
destination = "secrets/ca.crt"
change_mode = "noop"
}
# Data is stored in /data
volume_mount {
volume = "data"
destination = "/data"
}
[[ template "common/resources" $c ]]
}
[[- if $c.prometheus.enabled ]]
[[- $e := merge .elasticsearch.exporter .elasticsearch . ]]
# Prometheus exporter
task "exporter" {
driver = "[[ $e.nomad.driver ]]"
lifecycle {
hook = "poststart"
sidecar = true
}
config {
image = "[[ $e.image ]]"
pids_limit = 100
readonly_rootfs = true
command = "elasticsearch_exporter"
args = [
"--web.listen-address=127.0.0.1:9114"
]
}
[[ template "common/vault.policies" $e ]]
[[ template "common/file_env" $e ]]
[[ template "common/resources" $e ]]
}
[[ template "common/task.metrics_proxy" $e ]]
[[- end ]]
}
}

View File

@ -0,0 +1,25 @@
FROM [[ .docker.repo ]][[ .docker.base_images.alpine.image ]] AS builder
ARG ES_EXPORTER_VERSION=[[ .elasticsearch.exporter.version ]]
RUN set -eux &&\
apk add curl ca-certificates &&\
cd /tmp &&\
curl -sSLO https://github.com/prometheus-community/elasticsearch_exporter/releases/download/v${ES_EXPORTER_VERSION}/elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64.tar.gz &&\
curl -sSLO https://github.com/prometheus-community/elasticsearch_exporter/releases/download/v${ES_EXPORTER_VERSION}/sha256sums.txt &&\
grep "elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64.tar.gz" sha256sums.txt | sha256sum -c &&\
tar xzf elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64.tar.gz &&\
mv elasticsearch_exporter-${ES_EXPORTER_VERSION}.linux-amd64/elasticsearch_exporter /
FROM [[ .docker.repo ]][[ .docker.base_images.alpine.image ]]
MAINTAINER [[ .docker.maintainer ]]
RUN set -eux &&\
apk --no-cache upgrade &&\
apk add ca-certificates
COPY --from=builder /elasticsearch_exporter /usr/local/bin/
EXPOSE 9114
USER 9114
CMD ["elasticsearch_exporter"]

View File

@ -0,0 +1,46 @@
FROM [[ .docker.repo ]][[ .docker.base_images.alpine.image ]] AS builder
ARG ES_VERSION=[[ .elasticsearch.server.version ]]
RUN set -eux &&\
apk add ca-certificates curl tar &&\
cd /tmp &&\
curl -sSLO https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz &&\
curl -sSLO https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz.sha512 &&\
sha512sum -c elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz.sha512 &&\
tar xzf elasticsearch-${ES_VERSION}-linux-x86_64.tar.gz &&\
mv elasticsearch-${ES_VERSION} /opt/elasticsearch &&\
# Remove X-Pack ML as it's not used, and not working on Alpine \
rm -rf /opt/elasticsearch/modules/x-pack-ml/ &&\
# Remove the JDK, we have our own \
rm -rf /opt/elasticsearch/jdk &&\
sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' /opt/elasticsearch/bin/elasticsearch-env &&\
mkdir -p /opt/elasticsearch/config/jvm.config.d &&\
mkdir -p /opt/elasticsearch/.aws &&\
touch /opt/elasticsearch/.aws/config &&\
chown -R 9200:9200 /opt/elasticsearch/config &&\
chown -R 9200:9200 /opt/elasticsearch/.aws
FROM [[ .docker.repo ]][[ .docker.base_images.java17.image ]]
MAINTAINER [[ .docker.maintainer ]]
ENV PATH=/opt/elasticsearch/bin:$PATH \
ES_JAVA_HOME=/opt/java/openjdk \
ES_TMPDIR=/tmp
COPY --from=builder /opt/elasticsearch/ /opt/elasticsearch/
RUN set -eux &&\
apk --no-cache update &&\
apk --no-cache add openssl bash jq &&\
addgroup -g 9200 elasticsearch &&\
adduser --system --ingroup elasticsearch --disabled-password --uid 9200 elasticsearch --home /opt/elasticsearch --no-create-home --shell /sbin/nologin &&\
mkdir /data &&\
chown -R elasticsearch:elasticsearch /data
COPY root/ /
WORKDIR /opt/elasticsearch
USER elasticsearch
EXPOSE 9200 9300
CMD ["elasticsearch"]

View File

@ -0,0 +1,15 @@
#!/bin/sh
set -euo pipefail
[ -d "${ES_TMPDIR}" ] || mkdir -p "${ES_TMPDIR}"
if [ -n "${ES_PATH_CONF}" -a "${ES_PATH_CONF}" != "/opt/elasticsearch/config" ]; then
[ -d "${ES_PATH_CONF}" ] || mkdir -p "${ES_PATH_CONF}"
for FILE in /opt/elasticsearch/config/*; do
if [ ! -e "${ES_PATH_CONF}/$(basename ${FILE})" ]; then
echo "Copy ${FILE} to ${ES_PATH_CONF}/$(basename ${FILE})"
cp -r ${FILE} ${ES_PATH_CONF}/$(basename ${FILE})
fi
done
fi

View File

@ -0,0 +1,10 @@
#!/bin/sh
set -euo pipefail
[ -d "${ES_TMPDIR}" ] || mkdir -p ${ES_TMPDIR}
export ES_JAVA_OPTS="${JAVA_OPTS:-} -Xms${JVM_XMX} ${ES_JAVA_OPTS:-} -Des.cgroups.hierarchy.override=/"
echo "ES_JAVA_OPTS=${ES_JAVA_OPTS}"
# Elasticsearch uses ES_JAVA_OPTS, unset JAVA_OPTS to prevent warnings in logs
unset JAVA_OPTS

View File

@ -0,0 +1,3 @@
cluster.name: "docker-cluster"
network.host: 0.0.0.0
path.data: /data

View File

@ -0,0 +1,7 @@
## G1GC Configuration
14-19:-XX:+UseG1GC
## Log settings
-Xlog:disable
-Xlog:all=warning:stderr:utctime,level,tags
-Xlog:gc=debug:stderr:utctime

View File

@ -0,0 +1,159 @@
status = error
######## Server JSON ############################
appender.rolling.type = Console
appender.rolling.name = rolling
appender.rolling.layout.type = ESJsonLayout
appender.rolling.layout.type_name = server
################################################
################################################
rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling
######## Deprecation JSON #######################
appender.deprecation_rolling.type = Console
appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.layout.type = ESJsonLayout
appender.deprecation_rolling.layout.type_name = deprecation.elasticsearch
appender.deprecation_rolling.layout.esmessagefields=x-opaque-id,key,category,elasticsearch.elastic_product_origin
appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
appender.header_warning.type = HeaderWarningAppender
appender.header_warning.name = header_warning
#################################################
#################################################
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = WARN
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.appenderRef.header_warning.ref = header_warning
logger.deprecation.additivity = false
######## Search slowlog JSON ####################
appender.index_search_slowlog_rolling.type = Console
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
appender.index_search_slowlog_rolling.layout.type = ESJsonLayout
appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog
appender.index_search_slowlog_rolling.layout.esmessagefields=message,took,took_millis,total_hits,types,stats,search_type,total_shards,source,id
#################################################
#################################################
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
logger.index_search_slowlog_rolling.additivity = false
######## Indexing slowlog JSON ##################
appender.index_indexing_slowlog_rolling.type = Console
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout
appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog
appender.index_indexing_slowlog_rolling.layout.esmessagefields=message,took,took_millis,doc_type,id,routing,source
#################################################
#################################################
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.additivity = false
appender.audit_rolling.type = Console
appender.audit_rolling.name = audit_rolling
appender.audit_rolling.layout.type = PatternLayout
appender.audit_rolling.layout.pattern = {\
"type":"audit", \
"timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
%varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
%varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
%varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
%varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
%varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
%varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
%varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
%varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
%varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
%varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
%varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
%varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
%varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
%varsNotEmpty{, "user.roles":%map{user.roles}}\
%varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
%varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
%varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
%varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
%varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
%varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
%varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
%varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
%varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
%varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
%varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
%varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
%varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
%varsNotEmpty{, "indices":%map{indices}}\
%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
%varsNotEmpty{, "trace.id":"%enc{%map{trace.id}}{JSON}"}\
%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
%varsNotEmpty{, "put":%map{put}}\
%varsNotEmpty{, "delete":%map{delete}}\
%varsNotEmpty{, "change":%map{change}}\
%varsNotEmpty{, "create":%map{create}}\
%varsNotEmpty{, "invalidate":%map{invalidate}}\
}%n
# "node.name" node name from the `elasticsearch.yml` settings
# "node.id" node id which should not change between cluster restarts
# "host.name" unresolved hostname of the local node
# "host.ip" the local bound ip (i.e. the ip listening for connections)
# "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
# "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
# "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
# "user.name" the subject name as authenticated by a realm
# "user.run_by.name" the original authenticated subject name that is impersonating another one.
# "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
# "user.realm" the name of the realm that authenticated "user.name"
# "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
# "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
# "user.roles" the roles array of the user; these are the roles that are granting privileges
# "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
# "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
# "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
# "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
# "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
# "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
# "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
# "url.path" the URI component between the port and the query string; it is percent (URL) encoded
# "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
# "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
# "request.body" the content of the request body entity, JSON escaped
# "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
# "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
# "indices" the array of indices that the "action" is acting upon
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
# "trace_id" an identifier conveyed by the part of "traceparent" request header
# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
# "rule" name of the applied rule if the "origin.type" is "ip_filter"
# the "put", "delete", "change", "create", "invalidate" fields are only present
# when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
logger.xpack_security_audit_logfile.level = info
logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
logger.xpack_security_audit_logfile.additivity = false
logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
logger.xmlsig.level = error
logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
logger.samlxml_decrypt.level = fatal
logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
logger.saml2_decrypt.level = fatal

17
init/vault-pki Executable file
View File

@ -0,0 +1,17 @@
#!/bin/sh
set -e
[[- $c := merge .elasticsearch.server .elasticsearch .]]
[[ template "common/vault.mkpki.sh.tpl" $c ]]
vault write [[ $c.vault.pki.path ]]/roles/server \
allowed_domains="[[ .instance ]][[ .consul.suffix ]].service.[[ .consul.domain ]]" \
allow_bare_domains=true \
allow_subdomains=true \
allow_localhost=false \
allow_ip_sans=true \
server_flag=true \
client_flag=true \
allow_wildcard_certificates=false \
max_ttl=720h

1
prep.d/01-mv-conf.sh Executable file
View File

@ -0,0 +1 @@
[[ template "common/mv_conf.sh" dict "ctx" . "services" (dict "elasticsearch" .instance) ]]

10
prep.d/10-rand-secrets.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/sh
set -euo pipefail
[[ template "common/vault.rand_secrets" dict "ctx" . "keys" (coll.Slice "es_bootstrap_pwd" "exporter_pwd") ]]
[[- range $k, $v := .elasticsearch.server.users ]]
[[- if not (has $v "password") ]]
[[ template "common/vault.rand_secrets" dict "ctx" $ "keys" (coll.Slice (printf "%s_pwd" ($k | regexp.Replace "\\-" "_"))) ]]
[[- end ]]
[[- end ]]

View File

@ -0,0 +1,54 @@
path:
data: /data
logs: /alloc/logs
cluster:
name: [[ .instance ]]
initial_master_nodes:
- [[ .instance ]]-0
[[- if gt .elasticsearch.server.count 1 ]]
- [[ .instance ]]-1
[[- end ]]
[[- if gt .elasticsearch.server.count 2 ]]
- [[ .instance ]]-2
[[- end ]]
node:
name: [[ .instance ]]-{{ env "NOMAD_ALLOC_INDEX" }}
roles:
- master
- data
- ingest
- transform
http:
port: 9200
host: 127.0.0.1
transport:
port: {{ env "NOMAD_ALLOC_PORT_transport" }}
host: 0.0.0.0
publish_port: {{ env "NOMAD_HOST_PORT_transport" }}
publish_host: {{ env "NOMAD_HOST_IP_transport" }}
discovery:
seed_providers: file
xpack:
watcher:
enabled: false
security:
enabled: true
authc:
anonymous:
username: anonymous
roles: health
transport:
ssl:
enabled: True
verification_mode: full
client_authentication: required
key: /secrets/es.key
certificate: /secrets/es.crt
certificate_authorities: /secrets/ca.crt

24
templates/entrypoint.env Normal file
View File

@ -0,0 +1,24 @@
#!/bin/sh
# vim: syntax=sh
set -euo pipefail
# ES is using ES_JAVA_HOME, and having JAVA_HOME set split warnings in logs
unset JAVA_HOME
[[- $c := merge .elasticsearch.server .elasticsearch . ]]
echo "Adding elastic bootstrap password in the keystore"
[ -f config/elasticsearch.keystore ] || elasticsearch-keystore create
echo '{{ with secret "[[ $c.vault.kv.path ]]" }}{{ .Data.data.es_bootstrap_pwd }}{{ end }}' | elasticsearch-keystore add -x 'bootstrap.password'
{{ with secret "[[ $c.vault.kv.path ]]" -}}
echo "Creating exporter user"
(elasticsearch-users list | grep -qE '^exporter$') || elasticsearch-users useradd exporter -r monitor -p '{{ .Data.data.exporter_pwd }}'
{{- end }}
[[- range $k, $v := $c.users ]]
echo "Creating user [[ $k ]]"
(elasticsearch-users list | grep -qE '^[[ $k ]]$') || elasticsearch-users useradd [[ $k ]] -p '[[ if has $v "password" ]][[ $v.password ]][[ else ]]{{ with secret "[[ $c.vault.kv.path ]]" }}{{ .Data.data.[[ $k | regexp.Replace "\\-" "_" ]]_pwd }}{{ end }}[[ end ]]' [[- if gt (len $v.roles) 0 ]] -r [[ join $v.roles "," ]][[ end ]]
[[- end ]]

17
templates/roles.yml Normal file
View File

@ -0,0 +1,17 @@
monitor:
cluster:
- monitor
indices:
- names:
- '*'
privileges:
- monitor
health:
cluster:
- 'cluster:monitor/health'
[[ range $k, $v := .elasticsearch.server.roles -]]
[[ $k ]]:
[[ $v | toYAML | indent 2 ]]
[[- end ]]

View File

@ -0,0 +1,5 @@
{{ range $index, $instance := service "[[ .instance ]]-transport[[ .consul.suffix ]]" }}
{{- if not (eq (env "NOMAD_ALLOC_INDEX") (index $instance.ServiceMeta "alloc")) }}
{{ .Address }}:{{ .Port }}
{{- end }}
{{- end }}

104
variables.yml Normal file
View File

@ -0,0 +1,104 @@
---
# Name of this instance
instance: elasticsearch
# ES settings
elasticsearch:
# Vault policies (for the server and the exporter)
vault:
policies:
- '[[ .instance ]][[ .consul.suffix ]]'
# ES server settings
server:
# ES version
version: 8.12.0
# Docker image to use
image: '[[ .docker.repo ]]elasticsearch:[[ .elasticsearch.server.version ]]-2'
# Number of instances
count: 3
# Env var to set in the container
env: {}
# Vault PKI (mTLS between the different nodes
vault:
pki:
path: '[[ .vault.prefix ]]pki/[[ .instance ]]'
ou: Elasticsearch Cluster
issuer: '[[ .vault.prefix ]]pki/root'
# Resource allocation
resources:
cpu: 300
memory: 2048
# Should Traefik expose Elasticsearch API ?
traefik:
enabled: false
# List of roles and users to create. Eg
# roles:
# my_role:
# order-processor:
# indices:
# - names:
# - order
# - devices
# privileges:
# - all
# # For users, the password attr is optional. If set, it can be a raw password (not recommended of course)
# # a consul-template snippet (which can fetch secrets from vault), or omitted. If ommited, default is to fetch it from vault
# # at kv/service/elasticsearch with the key <user with - replaced with _>_pwd
# users:
# order-dc1:
# roles:
# - order-processor
# password: s3cr3t
# order-dc2:
# roles:
# - order-processor
# password: '{{ with secret "kv/service/order" }}{{ .Data.data.es_password }}{{ end }}'
# # No password specified : it'll be fetched from vault at kv/service/elasticsearch under the key order_dc3_pwd
# order-dc3:
# roles:
# - order-processor
#
roles: {}
users: {}
# Volumes for data persistence
volumes:
data:
type: csi
source: '[[ .instance ]]-data'
per_alloc: true
# Prometheus exporter settings
exporter:
# Version of the exporter
version: 1.7.0
# Docker image to use
image: '[[ .docker.repo ]]elasticsearch-exporter:[[ .elasticsearch.exporter.version ]]-2'
# Environment var to set in the container
env:
ES_USERNAME: exporter
ES_PASSWORD: '{{ with secret "[[ .vault.kv.path ]]" }}{{ .Data.data.exporter_pwd }}{{ end }}'
# Resource allocation
resources:
cpu: 50
memory: 64
# Prometheus settings
prometheus:
# Metrics URL, as seen from inside the alloc
metrics_url: http://127.0.0.1:9114/metrics

View File

@ -0,0 +1,6 @@
[[- $c := merge .elasticsearch.server .elasticsearch . ]]
[[ template "common/vault.kv_policy" $c ]]
path "[[ $c.vault.pki.path ]]/issue/server" {
capabilities = ["update"]
}