#!/usr/bin/env bash

set -o nounset
set -o errexit

# COLOR CONSTANTS
readonly GREEN='\033[0;32m'
readonly LIGHT_BLUE='\033[1;34m'
readonly RED='\033[0;31m'
readonly NC='\033[0m'

readonly -a REQUIRED_TOOLS=(
    kubectl
    helm
)

script=$(basename $0)
# default values for different loggers
k10_log_level="info"
kopia_log_level="info"
kopia_file_log_level="debug"

usage() {
    cat <<EOF
Usage: ${script} -r RepoType -o Operation ...

This script is used to diagnose K10 repositories of different types, or
to manually connect to such repositories to inspect or manage their content.

The script uses kubectl to launch a diagnostic pod in the K10 namespace
of the current cluster.  In the case of a manual connection, a second pod is
left executing with the repository runtime environment configured within.
The invoker is expected to use kubectl to run the appropriate interactive
tool in the second pod and terminate it when done.

Application repository usage:
  ${script} -r application -o diagnose -a ApplicationName -p Profile [-n K10Namespace]
  ${script} -r application -o connect -a ApplicationName -p Profile [-b [-w WorkloadName] [-P PVCName]* [-D PVCName]* [-T] [-V VolumeSnapshotClass]] [-n K10Namespace]
  ${script} -r application -o upgrade_begin -a ApplicationName -p Profile [-n K10Namespace]
  ${script} -r application -o upgrade_rollback -a ApplicationName -p Profile [-n K10Namespace]

Collection repository usage:
  ${script} -r collections -o diagnose -p Profile -l PolicyName [-u PolicyNamespace] [-n K10Namespace]
  ${script} -r collections -o connect -p Profile -l PolicyName [-u PolicyNamespace] [-n K10Namespace]
  ${script} -r collections -o upgrade_begin -p Profile -l PolicyName [-u PolicyNamespace] [-n K10Namespace]
  ${script} -r collections -o upgrade_rollback -p Profile -l PolicyName [-u PolicyNamespace] [-n K10Namespace]

Inventory usage:
  ${script} -r inventory [-p Profile] [-R RepositoryName] [-F json|table] [-O] [-n K10Namespace]

Cleanup usage:
  ${script} -r cleanup [-p Profile] [-R RepositoryName] [-S SnapshotIDPrefix] [-X] [-Z] [-A MinAge] [-n K10Namespace]
  -X              Dry-run: show what would be deleted/reported without acting (default in Makefile)
  -S SnapshotID   Target a single orphan by snapshot ID prefix
  -Z              DANGEROUS: also delete orphans blocked by raw catalog references
  -A MinAge       Minimum age for orphan deletion (default: 168h = 7d)

Disaster recovery repository usage:
  ${script} -r disaster_recovery -o diagnose -p Profile [-n K10Namespace]
  ${script} -r disaster_recovery -o connect -p Profile [-n K10Namespace]
  ${script} -r disaster_recovery -o upgrade_begin -p Profile [-n K10Namespace]
  ${script} -r disaster_recovery -o upgrade_rollback -p Profile [-n K10Namespace]

In all cases flags that override built in defaults may be specified as needed.

Flags:
-a ApplicationName     The application name is the name of the workload namespace. Single workload applications
                       often use their namespace name for the workload object too, but if this is not the case
                       then specify the workload name in addition.
-b                     Block mode application repository
-D PVCName             Specify a block volume mode PVC in the K10 namespace to be mounted as a block device in the
                       datamover pod. Optional, repeat for multiple, in the same order as the PVCs in the -P flag.
                       If specified then the PVCs identified by -P need not exist.
-i ImageRepo           Specify the image repo (default: "gcr.io/kasten-images")
-l PolicyName          Specify the name of a K10 policy
-n K10Namespace        Specify the K10 namespace (default: "kasten-io")
-o Operation           Specify an operation: diagnose, connect, upgrade_begin, upgrade_rollback
-p Profile             Specify the name of a K10 profile
-P PVCName             Specify an application namespaced PVC. Repeat for multiple.
-r RepoType            Specify the repository type: application, collections, disaster_recovery
-t ImageTag            Specify the image tag (default: latest "kasten/k10" in the local helm repo)
-T                     Specify if the PVCs are targets. Default is to consider them as sources.
-u PolicyNamespace     Specify the policy namespace. Default is the K10Namespace
-V VolumeSnapshotClass Specify a volume snapshot class name.
-w WorkloadName        Specify the name of the workload if different from the application name.
-L LogLevel            Specify tools log levels. Possible loggers to set:
                       k10_log_level(default "info") | kopia_log_level(default "info") | kopia_file_log_level(default "debug")
-F Format              Output format for inventory: table or json (default: table). Only used with -r inventory.
-O                     Show only orphaned snapshots. Only used with -r inventory.
-R RepositoryName      Filter inventory to a specific repository by name. Only used with -r inventory/cleanup.
-S SnapshotIDPrefix    Target a single orphaned snapshot by ID prefix. Only used with -r cleanup.
-X                     Dry-run mode: show what would be deleted without deleting. Only used with -r cleanup.
-Z                     DANGEROUS: delete orphans blocked by raw catalog references. Only with -r cleanup.
EOF
}

print_heading() {
    printf "${LIGHT_BLUE}$1${NC}\n"
}

print_error(){
    printf "${RED}$1${NC}\n" >&2
    usage >&2
    exit 1
}

print_success(){
    printf "${GREEN}$1${NC}\n"
}

set_k10_tools_version() {
    print_heading "Fetching K10 Tools Image tag"
    local -r custom_tag=${1:-""}
    if [[ ${custom_tag} == "" ]]; then
      local latest_k10_ver=$(helm search repo kasten/k10 -o yaml | head -1 | awk '{print $3}')
      K10_VER=${latest_k10_ver}
    else
      K10_VER=${custom_tag}
    fi
    print_success "K10 Tools image tag: ${K10_VER}"
}

validate_repo_type() {
    local -r repo_type=${1?"unspecified parameter 1: repo_type "}
    case ${repo_type} in
        application|disaster_recovery|collections|inventory|cleanup)
            ;;
        *)
            print_error "Unsupported repo type: ${repo_type}"
            ;;
    esac
    if [[ "${block_mode}" != "" && ${repo_type} != "application" ]]; then
        print_error "Block mode is only supported for the 'application' repo type"
    fi
    if [[ "${pvc_names}" != "" && "${block_mode}" == "" ]]; then
        print_error "-P is only supported with block mode"
    fi
    if [[ "${device_names}" != "" && "${block_mode}" == "" ]]; then
        print_error "-D is only supported with block mode"
    fi
    if [[ "${devices_are_targets}" != "" && "${block_mode}" == "" ]]; then
        print_error "-T is only supported with block mode"
    fi
    if [[ "${device_names}" != "" && "${pvc_names}" == "" ]]; then
        print_error "-D must be used with -P"
    fi

    return 0
}

validate_and_set_log_level() {
    local -r in=$1
    if [[ $in =~ ^(k10_log_level|kopia_log_level|kopia_file_log_level)=(.*)$ ]]; then
        local logger_name=${BASH_REMATCH[1]}
        local log_value=${BASH_REMATCH[2]}
        declare -g $logger_name="$log_value"
        print_success "$logger_name set to $log_value"
    else
        print_error "Invalid log level parameter value. Must match 'k10_log_level=*' or 'kopia_log_level=*' or 'kopia_file_log_level=*'."
    fi
}

create_k10tools_pod() {
    print_heading "Creating K10Tools pod"
    local has_volumes=0
    local have_k10_features_cm=0
    set +e
    kubectl -n  ${k10_namespace} get cm k10-features >/dev/null 2>&1
    if [[ $? == 0 ]]; then
        have_k10_features_cm=1
        has_volumes=1
    fi
    set -e
    cat > "${POD_SPEC_FILE}" << EOF
apiVersion: v1
kind: Pod
metadata:
  name: ${POD_NAME}
  labels:
    release: k10
  namespace: ${k10_namespace}
spec:
  containers:
  - args:
    - -c
    - tail -f /dev/null
    command:
    - /bin/bash
    image: '${IMAGE_REPO}/k10tools:${K10_VER}'
    imagePullPolicy: Always
    name: repo-checker
    securityContext:
      runAsUser: 0
    env:
    - name: DATA_MOVER_IMAGE
      value: '${IMAGE_REPO}/datamover:${K10_VER}'
    - name: KANISTER_TOOLS
      value: '${IMAGE_REPO}/kanister-tools:${K10_VER}'
    - name: LOG_LEVEL
      value: ${k10_log_level}
    - name: DATA_STORE_LOG_LEVEL
      value: ${kopia_log_level}
    - name: DATA_STORE_FILE_LOG_LEVEL
      value: ${kopia_file_log_level}
EOF
    if [[ $has_volumes == 1 ]]; then
        cat >> "${POD_SPEC_FILE}" << EOF
    volumeMounts:
EOF
    fi
    if [[ $have_k10_features_cm == 1 ]]; then
        cat >> "${POD_SPEC_FILE}" << EOF
    - mountPath: /mnt/k10-features
      name: k10-features
EOF
    fi
    cat >> "${POD_SPEC_FILE}" << EOF
  serviceAccountName: executor-svc
  imagePullSecrets:
  - name: k10-ecr
EOF
    if [[ $has_volumes == 1 ]]; then
        cat >> "${POD_SPEC_FILE}" << EOF
  volumes:
EOF
    fi
    if [[ $have_k10_features_cm == 1 ]]; then
        cat >> "${POD_SPEC_FILE}" << EOF
  - configMap:
      defaultMode: 420
      name: k10-features
    name: k10-features
EOF
    fi

    kubectl create -f "${POD_SPEC_FILE}"
    sleep 5
    while true;
    do
        status=$(kubectl get pod -n ${k10_namespace} ${POD_NAME} -o 'jsonpath={.status.phase}')
        if [[ ${status} == "Running" ]]; then
            print_success "Created K10 tools pod"
            break
        else
            echo "Waiting for K10 tools pod to be Running, current status: ${status}"
            sleep 1
        fi
    done
}

delete_k10tools_pod() {
    print_heading "Deleting K10 Tools Pod"
    kubectl delete pod ${POD_NAME} -n ${k10_namespace}
    rm "${POD_SPEC_FILE}"
}

inspect_application_repository() {
    if [[ "${operation}" == "" ]]; then
        print_error "operation must be provided"
    fi
    if [[ "${application}" == "" ]]; then
        print_error "application must be provided"
    fi
    if [[ "${profile_name}" == "" ]]; then
        print_error "profile_name must be provided"
    fi
    case ${operation} in
        diagnose)
            if [[ "${block_mode}" != "" ]]; then
                print_error "block mode is only supported with the 'connect' operation"
            fi
            print_heading "Running diagnosis on the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository diagnose application -a ${application} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
            datetime=$(date -u +"%Y_%m_%d_%H_%M")
            filename="kopia-diagnose-${datetime}.tar.gz"
            kubectl exec ${POD_NAME} -n ${k10_namespace} -- tar -zcf /tmp/kopia-diagnose.tar.gz /tmp/kopia-debug-logs
            kubectl exec ${POD_NAME} -n ${k10_namespace} -- rm -rf /tmp/kopia-debug-logs
            kubectl cp -n ${k10_namespace} ${POD_NAME}:/tmp/kopia-diagnose.tar.gz "${filename}"
            ;;
        connect)
            print_heading "Establishing connection to the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository connect application -a ${application} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER} ${block_mode} ${workload_name} ${volume_snapshot_class} ${pvc_names} ${device_names} ${devices_are_targets}
            if [[ "${block_mode}" == "" ]]; then
                print_success "Successfully connected to kopia repository.

You may now setup this environment variable before running any commands:
'export KOPIA_CONFIG_PATH=/tmp/kopia-repository.config'

The config file has been generated at the path specified to this environment variable above."
            fi
            ;;
		upgrade_begin)
            print_heading "Upgrading for the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository upgrade application begin -a ${application} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
			;;
		upgrade_rollback)
            print_heading "Rolling-back upgrade attempt for the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository upgrade application rollback -a ${application} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
			;;
        *)
            print_error "Invalid operation ${operation}. Supported: diagnose, connect, upgrade_begin, upgrade_rollback"
            ;;
    esac
}

inspect_dr_repository() {
    if [[ "${operation}" == "" ]]; then
        print_error "operation must be provided"
    fi
    if [[ "${profile_name}" == "" ]]; then
        print_error "profile_name must be provided"
    fi
    case ${operation} in
        diagnose)
            print_heading "Running diagnosis on the K10 DR repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository diagnose disaster-recovery -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
            datetime=$(date -u +"%Y_%m_%d_%H_%M")
            filename="kopia-diagnose-${datetime}.tar.gz"
            kubectl exec ${POD_NAME} -n ${k10_namespace} -- tar -zcf /tmp/kopia-diagnose.tar.gz /tmp/kopia-debug-logs
            kubectl exec ${POD_NAME} -n ${k10_namespace} -- rm -rf /tmp/kopia-debug-logs
            kubectl cp -n ${k10_namespace} ${POD_NAME}:/tmp/kopia-diagnose.tar.gz "${filename}"
            ;;
        connect)
            print_heading "Establishing connection to the K10 DR repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository connect disaster-recovery -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
            print_success "Successfully connected to kopia repository.

You may now setup this environment variable before running any commands:
'export KOPIA_CONFIG_PATH=/tmp/kopia-repository.config'

The config file has been generated at the path specified to this environment variable above."
            ;;
	upgrade_begin)
            print_heading "Upgrading for the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository upgrade disaster-recovery begin -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
	    ;;
	upgrade_rollback)
            print_heading "Rolling-back upgrade attempt for the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository upgrade disaster-recovery rollback -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
  	    ;;
        *)
            print_error "Invalid operation ${operation}. Supported: diagnose, connect, upgrade_begin, upgrade_rollback"
            ;;
    esac
}

inspect_collections_repository() {
    if [[ "${operation}" == "" ]]; then
        print_error "operation must be provided"
    fi
    if [[ "${policy_name}" == "" ]]; then
        print_error "policy_name must be provided"
    fi
    if [[ "${policy_namespace}" == "" ]]; then
        policy_namespace=${k10_namespace}
    fi
    if [[ "${profile_name}" == "" ]]; then
        print_error "profile_name must be provided"
    fi
    case ${operation} in
        diagnose)
            print_heading "Running diagnosis on the ${policy_name} policy repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository diagnose collections -a ${policy_name} -u ${policy_namespace} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
            datetime=$(date -u +"%Y_%m_%d_%H_%M")
            filename="kopia-diagnose-${datetime}.tar.gz"
            kubectl exec ${POD_NAME} -n ${k10_namespace} -- tar -zcf /tmp/kopia-diagnose.tar.gz /tmp/kopia-debug-logs
            kubectl exec ${POD_NAME} -n ${k10_namespace} -- rm -rf /tmp/kopia-debug-logs
            kubectl cp -n ${k10_namespace} ${POD_NAME}:/tmp/kopia-diagnose.tar.gz "${filename}"
            ;;
        connect)
            print_heading "Establishing connection to the ${policy_name} policy repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository connect collections -a ${policy_name} -u ${policy_namespace} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
            print_success "Successfully connected to kopia repository.

You may now setup this environment variable before running any commands:
'export KOPIA_CONFIG_PATH=/tmp/kopia-repository.config'

The config file has been generated at the path specified to this environment variable above."
            ;;
	upgrade_begin)
            print_heading "Upgrading for the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository upgrade collections begin -a ${policy_name} -u ${policy_namespace} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
	    ;;
	upgrade_rollback)
            print_heading "Rolling-back upgrade attempt for the ${application} application repository"
            kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- /k10tools repository upgrade collections rollback -a ${policy_name} -u ${policy_namespace} -p ${profile_name} -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${K10_VER}
	    ;;
        *)
            print_error "Invalid operation ${operation}. Supported: diagnose, connect, upgrade_begin, upgrade_rollback"
            ;;
    esac
}

run_inventory() {
    print_heading "Running repository inventory"
    local component_ver=${COMPONENT_VER:-${K10_VER}}
    local cmd="/k10tools repository inventory -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${component_ver}"
    if [[ "${profile_name}" != "" ]]; then
        cmd="${cmd} -p ${profile_name}"
    fi
    if [[ "${inventory_repository}" != "" ]]; then
        cmd="${cmd} -r ${inventory_repository}"
    fi
    if [[ "${inventory_output}" != "" ]]; then
        cmd="${cmd} -o ${inventory_output}"
    fi
    if [[ "${inventory_orphaned_only}" != "" ]]; then
        cmd="${cmd} --orphaned-only"
    fi
    kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- ${cmd}
}

run_cleanup() {
    print_heading "Cleaning up orphaned snapshots"
    local component_ver=${COMPONENT_VER:-${K10_VER}}
    local cmd="/k10tools repository cleanup -n ${k10_namespace} --image-registry ${IMAGE_REPO} --image-tag ${component_ver}"
    if [[ "${profile_name}" != "" ]]; then
        cmd="${cmd} -p ${profile_name}"
    fi
    if [[ "${inventory_repository}" != "" ]]; then
        cmd="${cmd} -r ${inventory_repository}"
    fi
    if [[ "${cleanup_snapshot_id}" != "" ]]; then
        cmd="${cmd} --snapshot-id ${cleanup_snapshot_id}"
    fi
    if [[ "${cleanup_dry_run}" != "" ]]; then
        cmd="${cmd} --dry-run"
    fi
    if [[ "${cleanup_allow_raw_refs}" != "" ]]; then
        cmd="${cmd} --danger-allow-raw-refs"
    fi
    if [[ "${cleanup_min_age}" != "" ]]; then
        cmd="${cmd} --min-age ${cleanup_min_age}"
    fi
    kubectl exec -i ${POD_NAME} -n ${k10_namespace} -- ${cmd}
}

readonly POD_NAME="k10tools-${RANDOM}"
readonly POD_SPEC_FILE="repo-checker.yaml"
K10_VER=""

repo_type=""
profile_name=""
application=""
policy_name=""
policy_namespace=""
k10_namespace=""
custom_tag=""
custom_component_tag=""
custom_repo=""
operation=""
block_mode=""
pvc_names=""
volume_snapshot_class=""
workload_name=""
device_names=""
devices_are_targets=""
inventory_repository=""
inventory_output=""
inventory_orphaned_only=""
cleanup_snapshot_id=""
cleanup_dry_run=""
cleanup_allow_raw_refs=""
cleanup_min_age=""

while getopts "r:p:o:a:l:u:n:t:c:i:bP:V:w:D:TL:R:F:OS:XZA:" opt; do
    case "${opt}" in
        r)
            repo_type="${OPTARG}"
            ;;
        p)
            profile_name="${OPTARG}"
            ;;
        o)
            operation="${OPTARG}"
            ;;
        a)
            application="${OPTARG}"
            ;;
        l)
            policy_name="${OPTARG}"
            ;;
        u)
            policy_namespace="${OPTARG}"
            ;;
        n)
            k10_namespace="${OPTARG}"
            ;;
        t)
            custom_tag="${OPTARG}"
            ;;
        c)
            custom_component_tag="${OPTARG}"
            ;;
        i)
            custom_image="${OPTARG}"
            ;;
        b)
            block_mode="-b"
            ;;
        P)
            pvc_names="${pvc_names} -P ${OPTARG}"
            ;;
        V)
            volume_snapshot_class="-V ${OPTARG}"
            ;;
        w)
            workload_name="-w ${OPTARG}"
            ;;
        D)
            device_names="${device_names} -D ${OPTARG}"
            ;;
        T)
            devices_are_targets="-T"
            ;;
        L)
            validate_and_set_log_level "${OPTARG}"
            ;;
        R)
            inventory_repository="${OPTARG}"
            ;;
        F)
            inventory_output="${OPTARG}"
            ;;
        O)
            inventory_orphaned_only="true"
            ;;
        S)
            cleanup_snapshot_id="${OPTARG}"
            ;;
        X)
            cleanup_dry_run="true"
            ;;
        Z)
            cleanup_allow_raw_refs="true"
            ;;
        A)
            cleanup_min_age="${OPTARG}"
            ;;
        \?|\:)
            print_error "Unknown flag or value missing"
            ;;
    esac
done

IMAGE_REPO=${custom_image:-"gcr.io/kasten-images"}

if [[ ${k10_namespace} == "" ]]; then
    k10_namespace="kasten-io"
fi

# COMPONENT_VER defaults to K10_VER but can be overridden with -c to use a
# different tag for kanister-tools/datamover when only k10tools was rebuilt.
COMPONENT_VER=""

echo "Repo type: ${repo_type}"

validate_repo_type "${repo_type}"

case ${repo_type} in
    application)
        set_k10_tools_version ${custom_tag}
        echo "Profile: ${profile_name}"
        echo "Application Name: ${application}"
        echo "Operation: ${operation}"
        echo "K10 Namespace: ${k10_namespace}"
        create_k10tools_pod
        trap delete_k10tools_pod EXIT
        inspect_application_repository
        ;;
    disaster_recovery)
        set_k10_tools_version ${custom_tag}
        echo "Profile: ${profile_name}"
        echo "Operation: ${operation}"
        echo "K10 Namespace: ${k10_namespace}"
        create_k10tools_pod
        trap delete_k10tools_pod EXIT
        inspect_dr_repository
        ;;
    collections)
        set_k10_tools_version ${custom_tag}
        echo "Profile: ${profile_name}"
        echo "Policy Name: ${policy_name}"
        echo "Policy Namespace: ${policy_namespace}"
        echo "Operation: ${operation}"
        echo "K10 Namespace: ${k10_namespace}"
        create_k10tools_pod
        trap delete_k10tools_pod EXIT
        inspect_collections_repository
        ;;
    inventory)
        set_k10_tools_version ${custom_tag}
        COMPONENT_VER=${custom_component_tag:-${K10_VER}}
        echo "Profile: ${profile_name}"
        echo "Repository Filter: ${inventory_repository}"
        echo "Output Format: ${inventory_output:-table}"
        echo "Orphaned Only: ${inventory_orphaned_only:-false}"
        echo "K10 Namespace: ${k10_namespace}"
        create_k10tools_pod
        trap delete_k10tools_pod EXIT
        run_inventory
        ;;
    cleanup)
        set_k10_tools_version ${custom_tag}
        COMPONENT_VER=${custom_component_tag:-${K10_VER}}
        echo "Profile: ${profile_name}"
        echo "Repository Filter: ${inventory_repository}"
        echo "Snapshot ID Filter: ${cleanup_snapshot_id:-<all>}"
        echo "Dry Run: ${cleanup_dry_run:-false}"
        echo "Allow Raw Refs: ${cleanup_allow_raw_refs:-false}"
        echo "Min Age: ${cleanup_min_age:-168h (default 7d)}"
        echo "K10 Namespace: ${k10_namespace}"
        create_k10tools_pod
        trap delete_k10tools_pod EXIT
        run_cleanup
        ;;
esac
