Rename all functions, cleanup on exit and fix exec

This commit is contained in:
Daniel Berteaud 2023-09-29 14:36:18 +02:00
parent 679f082910
commit 3285bce1c7
1 changed files with 150 additions and 118 deletions

268
ctctl
View File

@ -1,7 +1,10 @@
#!/usr/bin/env bash
trap ctctl_clean INT
# Print current environnement
current_env(){
ctctl_current_env(){
if [ -z "${CTCTL_DOMAIN}" ]; then
echo "Unknown container domain"
kill -INT $$
@ -14,7 +17,7 @@ current_env(){
echo "Namespace: ${CTCTL_ENV}"
}
check_env() {
ctctl_check_env() {
if [ -n "${CTCTL_DOMAIN}" -a -n "${CTCTL_ENV}" ]; then
echo 1
else
@ -22,7 +25,7 @@ check_env() {
fi
}
load_config(){
ctctl_load_config(){
if [ -n "${CTCTL_DOMAIN}" -a -n "${CTCTL_ENV}" ]; then
# Load env configuration
if [ -e ~/.ctctl/${TARGET_DOM}/${CTCTL_ENV}.conf ]; then
@ -41,13 +44,13 @@ load_config(){
}
# Switch to a target environment (either from no current, or from another current env)
switch_env(){
ctctl_switch_env(){
TARGET_DOM=$1
TARGET_NAMESPACE=$2
if [ -z "${TARGET_DOM}" ]; then
echo "Select the container install you want to work on"
TARGET_DOM=$(ls_env | fzf)
TARGET_DOM=$(ctctl_ls_env | fzf)
fi
if [ ! -e ~/.ctctl/${TARGET_DOM}/ctctl.conf ]; then
@ -75,14 +78,14 @@ switch_env(){
fi
# Authenticate
auth_env
ctctl_auth_env
if [ -z "${TARGET_NAMESPACE}" ]; then
if [ $(ls_namespace | wc -w) -eq 1 ]; then
TARGET_NAMESPACE=$(ls_namespace)
if [ $(ctctl_ls_namespace | wc -w) -eq 1 ]; then
TARGET_NAMESPACE=$(ctctl_ls_namespace)
else
echo "Select the namespace you are working in"
TARGET_NAMESPACE=$(ls_namespace | fzf)
TARGET_NAMESPACE=$(ctctl_ls_namespace | fzf)
fi
fi
export CTCTL_ENV=${TARGET_NAMESPACE}
@ -93,7 +96,7 @@ switch_env(){
}
# Check if we have a valid token for vault
check_vault_token(){
ctctl_check_vault_token(){
vault token lookup > /dev/null 2>&1
if vault token lookup > /dev/null 2>&1; then
echo 1
@ -103,7 +106,7 @@ check_vault_token(){
}
# Check if we have a valid token for consul
check_consul_token(){
ctctl_check_consul_token(){
CONSUL_TOKEN_VALID=0
if [ -n "${CONSUL_HTTP_TOKEN}" ]; then
consul acl token read -self > /dev/null 2>&1
@ -118,7 +121,7 @@ check_consul_token(){
}
# Check if we have a valid token for nomad
check_nomad_token(){
ctctl_check_nomad_token(){
if [ -n "${NOMAD_TOKEN}" ]; then
nomad acl token self > /dev/null 2>&1
if [ $? == 0 ]; then
@ -132,20 +135,20 @@ check_nomad_token(){
}
# Auth on vault, consul and nomad on the current env
auth_env(){
ctctl_auth_env(){
if [ -z "${CTCTL_DOMAIN}" ]; then
echo "Unknown environment"
kill -INT $$
fi
NEED_LOGIN=1
if [ "$(check_vault_token)" != "1" ]; then
if [ "$(ctctl_check_vault_token)" != "1" ]; then
if [ -e ~/.vault-token ]; then
# If VAULT_TOKEN is defined, unset it and try again. This is because we might
# have a valid token in ~/.vault-token but an expired token is set in VAULT_TOKEN
# and is taking precedence
export VAULT_TOKEN=$(cat ~/.vault-token)
if [ "$(check_vault_token)" != "1" ]; then
if [ "$(ctctl_check_vault_token)" != "1" ]; then
unset VAULT_TOKEN
else
NEED_LOGIN=0
@ -192,7 +195,7 @@ auth_env(){
consul-template -config ~/.ctctl/${CTCTL_DOMAIN}/nomad/consul-template.hcl -once
# Check if we have a valid nomad token already
if [ "$(check_nomad_token)" != "1" ]; then
if [ "$(ctctl_check_nomad_token)" != "1" ]; then
echo "Fecthing a Nomad token from vault"
NOMAD_CREDS=$(vault read -format=json ${VAULT_PREFIX:-}nomad/creds/${NOMAD_ROLE})
export NOMAD_TOKEN=$(echo -n ${NOMAD_CREDS} | jq -r .data.secret_id)
@ -203,7 +206,7 @@ auth_env(){
vault lease renew ${NOMAD_LEASE} >/dev/null
fi
# Check if we have a valid consul token already
if [ "$(check_consul_token)" != "1" ]; then
if [ "$(ctctl_check_consul_token)" != "1" ]; then
echo "Fetching a Consul token from vault"
CONSUL_CREDS=$(vault read -format=json ${VAULT_PREFIX:-}consul/creds/${CONSUL_ROLE})
export CONSUL_HTTP_TOKEN=$(echo -n ${CONSUL_CREDS} | jq -r .data.token)
@ -214,10 +217,10 @@ auth_env(){
vault lease renew ${CONSUL_LEASE} >/dev/null
fi
load_config
ctctl_load_config
}
renew_leases(){
ctctl_renew_leases(){
# Renew vault token
[ -n "${VAULT_TOKEN}" ] && vault token renew >/dev/null
[ -n "${NOMAD_LEASE}" ] && vault lease renew ${NOMAD_LEASE} >/dev/null
@ -225,7 +228,7 @@ renew_leases(){
}
# Logout from the current env
logout_env(){
ctctl_logout_env(){
if [ -z "${CTCTL_DOMAIN}" ]; then
echo "Unknown environment"
kill -INT $$
@ -239,32 +242,32 @@ logout_env(){
}
# List available env
ls_env(){
ctctl_ls_env(){
find ~/.ctctl/ -name ctctl.conf | xargs dirname | xargs basename -a
}
# List available namespaces
ls_namespace(){
ctctl_ls_namespace(){
nomad namespace list -json | jq -r ".[] | .Name"
}
# List buildable Docker images
ls_build_docker_images(){
ctctl_ls_build_docker_images(){
(for JOB in $(find . -maxdepth 1 \( -name \*.nomad -o -name \*.nomad.hcl \)); do
nomad run -output $JOB | jq '.Job.TaskGroups' | jq '.[] | .Tasks' | jq -r '.[] | .Config.image' 2>/dev/null
done) | grep -E "${CTCTL_DOCKER_BUILD_REPO_REGEX:-docker-repo.ehtrace.com}" | sort -u
}
# Load policies for vault, Consul and Nomad
load_policies(){
if [ "$(check_env)" = "0" ]; then
ctctl_load_policies(){
if [ "$(ctctl_check_env)" = "0" ]; then
echo "Not currently in a valid env. Run ctctl (with no argument) and select your env first"
kill -INT $$
fi
for DIR in ./output .; do
if [ -d "${DIR}/vault/policies" ]; then
if [ "$(check_vault_token)" != "1" ]; then
if [ "$(ctctl_check_vault_token)" != "1" ]; then
echo "No valid vault token. You have to authenticate first"
kill -INT $$
fi
@ -278,7 +281,7 @@ load_policies(){
fi
PNAME=$(basename ${PFILE} .hcl)
echo "Loading vault policy ${PNAME}"
replace_conf_var ${PFILE} | vault policy write ${PNAME} -
ctctl_replace_conf_var ${PFILE} | vault policy write ${PNAME} -
done
fi
if [ -d "${DIR}/consul/policies" ]; then
@ -300,15 +303,15 @@ load_policies(){
# so we need to detect if the policy already exists
if [ "$(echo ${CONSUL_CUR_POLICIES} | jq -r '.[] | select(.Name=='\"${PNAME}\"') | .Name')" == "${PNAME}" ]; then
echo "Updating consul policy ${PNAME}"
replace_conf_var ${PFILE} | consul acl policy update -name=${PNAME} -rules=-
ctctl_replace_conf_var ${PFILE} | consul acl policy update -name=${PNAME} -rules=-
else
echo "Adding new consul policy ${PNAME}"
replace_conf_var ${PFILE} | consul acl policy create -name=${PNAME} -rules=-
ctctl_replace_conf_var ${PFILE} | consul acl policy create -name=${PNAME} -rules=-
fi
done
fi
if [ -d "${DIR}/nomad/policies" ]; then
if [ "$(check_nomad_token)" != "1" ]; then
if [ "$(ctctl_check_nomad_token)" != "1" ]; then
echo "No valid nomad token. You have to authenticate first"
kill -INT $$
fi
@ -322,18 +325,18 @@ load_policies(){
continue
fi
echo "Loading Nomad policy ${PNAME}"
replace_conf_var ${PFILE} | nomad acl policy apply ${PNAME} -
ctctl_replace_conf_var ${PFILE} | nomad acl policy apply ${PNAME} -
done
fi
done
}
# Load consul config
load_consul_conf(){
ctctl_load_consul_conf(){
for DIR in ./output .; do
if [ -d "${DIR}/consul/config" ]; then
if [ "$(check_consul_token)" != "1" ]; then
if [ "$(ctctl_check_consul_token)" != "1" ]; then
echo "No valid consul token. You have to authenticate first"
kill -INT $$
fi
@ -349,7 +352,7 @@ load_consul_conf(){
fi
echo "Loading consul conf from ${FILE}"
TEMP=$(mktemp)
replace_conf_var ${FILE} > ${TEMP}
ctctl_replace_conf_var ${FILE} > ${TEMP}
consul config write ${TEMP}
rm -f ${TEMP}
done
@ -368,7 +371,7 @@ load_consul_conf(){
fi
echo "Loading consul conf from ${FILE}"
TEMP=$(mktemp)
replace_conf_var ${FILE} > ${TEMP}
ctctl_replace_conf_var ${FILE} > ${TEMP}
consul config write ${TEMP}
rm -f ${TEMP}
done
@ -379,8 +382,8 @@ load_consul_conf(){
}
# Build all images for the current project
build_required_images(){
for DOCKER_IMAGE in $(ls_build_docker_images); do
ctctl_build_required_images(){
for DOCKER_IMAGE in $(ctctl_ls_build_docker_images); do
if ! docker manifest inspect ${DOCKER_IMAGE} > /dev/null 2>&1; then
build_image ${DOCKER_IMAGE}
else
@ -390,15 +393,15 @@ build_required_images(){
}
# Build selected images
build_selected_images(){
ctctl_build_selected_images(){
local NO_CACHE=$1
for DOCKER_IMAGE in $(ls_build_docker_images | fzf -m); do
for DOCKER_IMAGE in $(ctctl_ls_build_docker_images | fzf -m); do
build_image "${DOCKER_IMAGE}" ${NO_CACHE}
done
}
# Build a single image
build_image(){
ctctl_build_image(){
local DOCKER_IMAGE=$1
local NO_CACHE=$2
export DOCKER_BUILDKIT=1
@ -432,7 +435,7 @@ build_image(){
}
# Run all executable in the render.d directory
handle_render_scripts(){
ctctl_handle_render_scripts(){
for DIR in ./output ./; do
if [ -d "${DIR}/render.d" ]; then
for H in $(find ${DIR}/render.d -type f -o -type l | sort); do
@ -448,7 +451,7 @@ handle_render_scripts(){
}
# Run all executable in the prep.d directory
handle_prep_scripts(){
ctctl_handle_prep_scripts(){
for DIR in ./output ./; do
if [ -d "${DIR}/prep.d" ]; then
for H in $(find ${DIR}/prep.d -type f -o -type l | sort); do
@ -463,7 +466,7 @@ handle_prep_scripts(){
done
}
add_submodule(){
ctctl_add_submodule(){
local NAME=$1
local URL=$2
local BRANCH=$3
@ -481,7 +484,7 @@ add_submodule(){
}
# Update ctctl bundles with git
update_submodules(){
ctctl_update_submodules(){
if [ -e "bundles.yml" ]; then
for BUNDLE in $(yq e -o=j -I=0 '.bundles[]' bundles.yml); do
local URL=$(echo ${BUNDLE} | jq -r .url)
@ -492,7 +495,7 @@ update_submodules(){
if [ "${BRANCH}" = "null" ]; then
BRANCH=master
fi
add_submodule ${NAME} ${URL} ${BRANCH} bundles
ctctl_add_submodule ${NAME} ${URL} ${BRANCH} bundles
if [ -e "bundles/${NAME}/bundles.yml" ]; then
for DEP in $(yq e -o=j -I=0 '.dependencies[]' bundles/${NAME}/bundles.yml); do
local DEP_URL=$(echo ${DEP} | jq -r .url)
@ -508,7 +511,7 @@ update_submodules(){
if echo ${DEP_URL} | grep -qE '^\.\./'; then
DEP_URL=$(dirname ${URL})/$(echo ${DEP_URL} | sed -E 's|^\.\./||')
fi
add_submodule ${DEP_NAME} ${DEP_URL} ${DEP_BRANCH} bundles
ctctl_add_submodule ${DEP_NAME} ${DEP_URL} ${DEP_BRANCH} bundles
done
fi
done
@ -516,10 +519,10 @@ update_submodules(){
}
# Render templates using gomplate (or levant for backward compat)
render_templates(){
ctctl_render_templates(){
# If a bundles.yml file exist, use the new gomplate rendering method
if [ -e bundles.yml ]; then
if [ -e "bundles.yml" ]; then
mkdir -p bundles
CONFIG=$(mktemp -t tmp.XXXXXXXXX.yml)
@ -535,7 +538,7 @@ render_templates(){
echo "Working on the ${NAME} bundle"
if [ ! -d bundles/${NAME} ]; then
update_submodules
ctctl_update_submodules
fi
# Use [[ and ]] so it won't clash with consul-template fragments
@ -632,7 +635,7 @@ render_templates(){
find ./ -maxdepth 1 -type f \( -name \*nomad.hcl -o -name \*.nomad \) -exec nomad fmt {} \;
# Run prep.d scripts
handle_render_scripts
ctctl_handle_render_scripts
# And now relete the config
rm -f ${CONFIG}
@ -640,9 +643,9 @@ render_templates(){
else
# backward compatible, levant based rendering
MERGED_CONF=$(mktemp tmp.XXXXXXXX.yml)
get_merged_conf > ${MERGED_CONF}
ctctl_get_merged_conf > ${MERGED_CONF}
handle_render_scripts ${MERGED_CONF}
ctctl_handle_render_scripts ${MERGED_CONF}
for TEMPLATE in $(find . -type f -name \*.tpl ! -path "*templates/*"); do
local DIR=$(dirname ${TEMPLATE})
@ -659,13 +662,13 @@ render_templates(){
}
# Print Consul and Nomad tokens (not vault, for security reasons)
print_tokens(){
if [ "$(check_nomad_token)" == "1" ]; then
ctctl_print_tokens(){
if [ "$(ctctl_check_nomad_token)" == "1" ]; then
echo "Nomad token: ${NOMAD_TOKEN}"
else
echo "No valid Nomad token, you should auth with ctctl auth"
fi
if [ "$(check_consul_token)" == "1" ]; then
if [ "$(ctctl_check_consul_token)" == "1" ]; then
echo "Consul token: ${CONSUL_HTTP_TOKEN}"
else
echo "No valid Consul token, you should auth with ctctl auth"
@ -673,7 +676,7 @@ print_tokens(){
}
# Follow current jobs logs
loki_logs(){
ctctl_loki_logs(){
# Remove the first arg passed to ctctl, which is logs
shift
local SELECTOR
@ -697,7 +700,7 @@ loki_logs(){
${LOGCLI_CMD} $@
else
# Exclude connect-proxy logs as it's often not wanted
SELECTOR='{job=~"'$(ls_jobs | sed -zE 's/\n/|/g' | sed -E 's/\s+//')'", task!~"connect-proxy-.+|tls-proxy|metrics-proxy"}'
SELECTOR='{job=~"'$(ctctl_ls_jobs | sed -zE 's/\n/|/g' | sed -E 's/\s+//')'", task!~"connect-proxy-.+|tls-proxy|metrics-proxy"}'
echo "Running ${LOGCLI_CMD} $@ ${SELECTOR}"
${LOGCLI_CMD} $@ "${SELECTOR}"
fi
@ -707,7 +710,7 @@ loki_logs(){
### Helpers ###
# Merge the configuration files for the current env and return the result (as string)
get_merged_conf() {
ctctl_get_merged_conf() {
CONF_FILES=""
if [ -e "./vars/defaults.yml" ]; then
CONF_FILES="./vars/defaults.yml"
@ -731,9 +734,9 @@ get_merged_conf() {
# Replace ${local.conf.foo} or ${foo} with the value of foo from the various merged configuration files
# This is used to have policies (vault, consul, nomad) and config (consul intentions etc.) with variables
replace_conf_var() {
ctctl_replace_conf_var() {
MERGED_CONF=$(mktemp)
get_merged_conf > $MERGED_CONF
ctctl_get_merged_conf > $MERGED_CONF
RES=$(cat $1 | \
# Replace ${local.conf.foo} or ${foo} with the value of foo from the various merged configuration files \
# This is used to have policies (vault, consul, nomad) and config (consul intentions etc.) with variables \
@ -746,47 +749,58 @@ replace_conf_var() {
}
# Get a value from the conf
get_conf(){
get_merged_conf | yq ".$1"
ctctl_get_conf(){
ctctl_get_merged_conf | yq ".$1"
}
# Return a space separated list of jobs the current dir
ls_jobs(){
ctctl_ls_jobs(){
local JOBS=""
for JOBFILE in $(find . -maxdepth 1 \( -name \*.nomad -o -name \*.nomad.hcl \)); do
echo $(nomad run -output ${JOBFILE} | jq -r '.Job.Name')
done
unset JOB JOBFILE
}
# Return a list of allocation for the given job
ls_alloc_of_job(){
ctctl_ls_alloc_of_job(){
local JOB=$1
local IFS=$'\n'
for ALLOC in $(nomad alloc status -json | jq -c ".[] | select(.JobID==\"${JOB}\") | select(.ClientStatus==\"running\")"); do
local ID=$(echo ${ALLOC} | jq -r .ID)
local GROUP=$(echo ${ALLOC} | jq -r .TaskGroup)
local ALLOC_INDEX=$(echo ${ALLOC} | jq -r .Name | sed -E "s/.*\[([0-9]+)\].*/\1/")
local HOST=$(echo ${ALLOC} | jq -r .NodeName)
local ID="$(echo ${ALLOC} | jq -r .ID)"
local GROUP="$(echo ${ALLOC} | jq -r .TaskGroup)"
local ALLOC_INDEX="$(echo ${ALLOC} | jq -r .Name | sed -E "s/.*\[([0-9]+)\].*/\1/")"
local HOST="$(echo ${ALLOC} | jq -r .NodeName)"
echo "${ID} (Task group ${GROUP}, allocation index ${ALLOC_INDEX} on host ${HOST}"
done
unset JOB ID GROUP ALLOC_INDEX HOST
}
# Return a list of tasks for the given allocation
ls_tasks_of_alloc(){
ctctl_ls_tasks_of_alloc(){
local ALLOC=$1
local IFS=$'\n'
for TASK in $(nomad alloc status -json ${ALLOC} | jq -r '.TaskResources | keys[]'); do
echo ${TASK}
for TASK in $(nomad alloc status -json "${ALLOC}" | jq -r '.TaskResources | keys[]'); do
echo "${TASK}"
done
unset TASK ALLOC
}
# Exec a command in a container
exec_ct(){
ctctl_exec_ct(){
local IFS=$'\n'
local CMD=$1
ALLOC=$((for JOB in $(ls_jobs); do ls_alloc_of_job ${JOB}; done) | fzf -m)
ALLOCS=$(for JOB in $(ctctl_ls_jobs); do ctctl_ls_alloc_of_job ${JOB}; done)
if [ $(echo "${ALLOCS}" | wc -l) -eq 1 ]; then
ALLOC="${ALLOCS}"
else
ALLOC=$(echo "${ALLOCS}" | fzf)
fi
ALLOC=$(echo ${ALLOC} | sed -E 's/^([^ \(]+).*/\1/')
TASK=$(ls_tasks_of_alloc ${ALLOC})
# Only Keep the UUID of the target alloc
ALLOC=$(echo ${ALLOC} | sed -E 's/^([^ \(]+).*/\1/')
TASKS=$(ctctl_ls_tasks_of_alloc "${ALLOC}")
if [ $(echo "${TASKS}" | wc -l) -eq 1 ]; then
TASK=${TASKS}
else
@ -794,104 +808,122 @@ exec_ct(){
fi
echo "Running nomad alloc exec -task ${TASK} ${ALLOC} ${CMD}"
nomad alloc exec -task ${TASK} ${ALLOC} ${CMD}
unset TASKS TASK ALLOCS ALLOC CMD
}
# Enter a container by execing sh
# This is just a shortcut for exec sh
enter_ct(){
exec_ct sh
ctctl_enter_ct(){
ctctl_exec_ct sh
}
# Follow logs of a task
alloc_logs(){
ctctl_alloc_logs(){
local IFS=$'\n'
ALLOC=$((for JOB in $(ls_jobs); do ls_alloc_of_job ${JOB}; done) | fzf -m)
ALLOCS=$(for JOB in $(ctctl_ls_jobs); do ctctl_ls_alloc_of_job ${JOB}; done)
if [ $(echo "${ALLOCS}" | wc -l) -eq 1 ]; then
ALLOC="${ALLOCS}"
else
ALLOC=$(echo "${ALLOCS}" | fzf)
fi
ALLOC=$(echo ${ALLOC} | sed -E 's/^([^ \(]+).*/\1/')
TASK=$(ls_tasks_of_alloc ${ALLOC})
TASKS=$(ctctl_ls_tasks_of_alloc ${ALLOC})
if [ $(echo "${TASKS}" | wc -l) -eq 1 ]; then
TASK=${TASKS}
else
TASK=$(echo "${TASKS}" | fzf -m)
fi
echo "Running nomad alloc logs -f ${ALLOC} ${TASK}"
nomad alloc logs -f ${ALLOC} ${TASK}
echo "Running nomad alloc logs -f -tail -n 50 ${ALLOC} ${TASK}"
nomad alloc logs -f -tail -n 50 ${ALLOC} ${TASK}
unset ALLOCS ALLOC TASKS TASK
}
ctctl_clean(){
# Cleanup by unseting all functions
for FUNC in $(declare -F | grep -E '^declare -f ctctl_' | sed -E 's/^declare -f //'); do
unset -f ${FUNC}
done
# Remove trap on SIGINT
trap - INT
}
export FZF_DEFAULT_OPTS=${FZF_DEFAULT_OPTS:-"--height=~10% --cycle --bind 'space:toggle' --marker='*'"}
case $1 in
current)
current_env
renew_leases
ctctl_current_env
ctctl_renew_leases
;;
auth)
auth_env
ctctl_auth_env
;;
disconnect)
logout_env
ctctl_logout_env
;;
ls|list)
ls_env
renew_leases
ctctl_ls_env
ctctl_renew_leases
;;
render)
render_templates
renew_leases
ctctl_render_templates
ctctl_renew_leases
;;
fetch)
update_submodules
render_templates
handle_prep_scripts
renew_leases
ctctl_update_submodules
ctctl_render_templates
ctctl_handle_prep_scripts
ctctl_renew_leases
;;
prep)
update_submodules
render_templates
handle_prep_scripts
load_policies
load_consul_conf
build_required_images
renew_leases
ctctl_update_submodules
ctctl_render_templates
ctctl_handle_prep_scripts
ctctl_load_policies
ctctl_load_consul_conf
ctctl_build_required_images
ctctl_renew_leases
;;
build)
build_selected_images
renew_leases
ctctl_build_selected_images
ctctl_renew_leases
;;
build-no-cache)
build_selected_images "no-cache"
renew_leases
ctctl_build_selected_images "no-cache"
ctctl_renew_leases
;;
tokens)
print_tokens
renew_leases
ctctl_print_tokens
ctctl_renew_leases
;;
logs)
shift
alloc_logs "$@"
renew_leases
ctctl_alloc_logs "$@"
ctctl_renew_leases
;;
loki)
loki_logs "$@"
renew_leases
ctctl_loki_logs "$@"
ctctl_renew_leases
;;
conf)
get_merged_conf
renew_leases
ctctl_get_merged_conf
ctctl_renew_leases
;;
exec)
shift
exec_ct "$@"
renew_leases
ctctl_exec_ct "$@"
ctctl_renew_leases
;;
sh)
enter_ct
renew_leases
ctctl_enter_ct
ctctl_renew_leases
;;
switch)
shift
switch_env "$@"
ctctl_switch_env "$@"
;;
*)
switch_env "$@"
ctctl_switch_env "$@"
;;
esac
ctctl_clean